repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
antiface/mne-python | examples/decoding/plot_linear_model_patterns.py | 13 | 3098 | """
===============================================================
Linear classifier on sensor data with plot patterns and filters
===============================================================
Decoding, a.k.a MVPA or supervised machine learning applied to MEG and EEG
data in sensor space. Fit a linear classifier with the LinearModel object
providing topographical patterns which are more neurophysiologically
interpretable [1] than the classifier filters (weight vectors).
The patterns explain how the MEG and EEG data were generated from the
discriminant neural sources which are extracted by the filters.
Note patterns/filters in MEG data are more similar than EEG data
because the noise is less spatially correlated in MEG than EEG.
[1] Haufe, S., Meinecke, F., Görgen, K., Dähne, S., Haynes, J.-D.,
Blankertz, B., & Bießmann, F. (2014). On the interpretation of
weight vectors of linear models in multivariate neuroimaging.
NeuroImage, 87, 96–110. doi:10.1016/j.neuroimage.2013.10.067
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Romain Trachel <trachelr@gmail.com>
#
# License: BSD (3-clause)
import mne
from mne import io
from mne.datasets import sample
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
# import a linear classifier from mne.decoding
from mne.decoding import LinearModel
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
# Setup for reading the raw data
raw = io.Raw(raw_fname, preload=True)
raw.filter(2, None, method='iir') # replace baselining with high-pass
events = mne.read_events(event_fname)
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
decim=4, baseline=None, preload=True)
labels = epochs.events[:, -1]
# get MEG and EEG data
meg_epochs = epochs.pick_types(meg=True, eeg=False, copy=True)
meg_data = meg_epochs.get_data().reshape(len(labels), -1)
eeg_epochs = epochs.pick_types(meg=False, eeg=True, copy=True)
eeg_data = eeg_epochs.get_data().reshape(len(labels), -1)
###############################################################################
# Decoding in sensor space using a LogisticRegression classifier
clf = LogisticRegression()
sc = StandardScaler()
# create a linear model with LogisticRegression
model = LinearModel(clf)
# fit the classifier on MEG data
X = sc.fit_transform(meg_data)
model.fit(X, labels)
# plot patterns and filters
model.plot_patterns(meg_epochs.info, title='MEG Patterns')
model.plot_filters(meg_epochs.info, title='MEG Filters')
# fit the classifier on EEG data
X = sc.fit_transform(eeg_data)
model.fit(X, labels)
# plot patterns and filters
model.plot_patterns(eeg_epochs.info, title='EEG Patterns')
model.plot_filters(eeg_epochs.info, title='EEG Filters')
| bsd-3-clause |
aselle/tensorflow | tensorflow/python/keras/layers/merge_test.py | 11 | 8732 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for merge layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class MergeLayersTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_merge_add(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
i3 = keras.layers.Input(shape=(4, 5))
o = keras.layers.add([i1, i2, i3])
self.assertListEqual(o.get_shape().as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2, i3], o)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
x3 = np.random.random((2, 4, 5))
out = model.predict([x1, x2, x3])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, x1 + x2 + x3, atol=1e-4)
def test_merge_add_masking(self):
with self.test_session():
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
m1 = keras.layers.Masking()(i1)
layer = keras.layers.Add()
o = layer([m1, i2])
self.assertListEqual(o.get_shape().as_list(), [None, 4, 5])
mask = layer.output_mask
self.assertListEqual(mask.get_shape().as_list(), [None, 4])
def test_merge_add_dynamic_shape(self):
with self.test_session():
i1 = array_ops.placeholder(shape=(4, None), dtype='float32')
i2 = array_ops.placeholder(shape=(4, 5), dtype='float32')
layer = keras.layers.Add()
o = layer([i1, i2])
self.assertListEqual(o.get_shape().as_list(), [4, 5])
def test_merge_elementwise_errors(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 6))
with self.assertRaises(ValueError):
keras.layers.add([i1, i2])
with self.assertRaises(ValueError):
keras.layers.add([i1])
with self.assertRaises(ValueError):
keras.layers.add(i1)
with self.assertRaises(ValueError):
keras.layers.add([i1])
@tf_test_util.run_in_graph_and_eager_modes
def test_merge_multiply(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
i3 = keras.layers.Input(shape=(4, 5))
o = keras.layers.multiply([i1, i2, i3])
self.assertListEqual(o.get_shape().as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2, i3], o)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
x3 = np.random.random((2, 4, 5))
out = model.predict([x1, x2, x3])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, x1 * x2 * x3, atol=1e-4)
@tf_test_util.run_in_graph_and_eager_modes
def test_merge_average(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
o = keras.layers.average([i1, i2])
self.assertListEqual(o.get_shape().as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, 0.5 * (x1 + x2), atol=1e-4)
@tf_test_util.run_in_graph_and_eager_modes
def test_merge_maximum(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
o = keras.layers.maximum([i1, i2])
self.assertListEqual(o.get_shape().as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, np.maximum(x1, x2), atol=1e-4)
@tf_test_util.run_in_graph_and_eager_modes
def test_merge_minimum(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
o = keras.layers.minimum([i1, i2])
self.assertListEqual(o.get_shape().as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, np.minimum(x1, x2), atol=1e-4)
@tf_test_util.run_in_graph_and_eager_modes
def test_merge_concatenate(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
o = keras.layers.concatenate([i1, i2], axis=1)
self.assertListEqual(o.get_shape().as_list(), [None, 8, 5])
model = keras.models.Model([i1, i2], o)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 8, 5))
self.assertAllClose(out, np.concatenate([x1, x2], axis=1), atol=1e-4)
def test_merge_concatenate_masking(self):
with self.test_session():
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
m1 = keras.layers.Masking()(i1)
layer = keras.layers.Concatenate()
o = layer([m1, i2])
self.assertListEqual(o.get_shape().as_list(), [None, 4, 10])
mask = layer.output_mask
self.assertListEqual(mask.get_shape().as_list(), [None, 4])
def test_concatenate_errors(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(3, 5))
with self.assertRaisesRegexp(ValueError, 'inputs with matching shapes'):
keras.layers.concatenate([i1, i2], axis=-1)
with self.assertRaisesRegexp(ValueError, 'called on a list'):
keras.layers.concatenate(i1, axis=-1)
with self.assertRaisesRegexp(ValueError, 'called on a list'):
keras.layers.concatenate([i1], axis=-1)
@tf_test_util.run_in_graph_and_eager_modes
def test_merge_dot(self):
i1 = keras.layers.Input(shape=(4,))
i2 = keras.layers.Input(shape=(4,))
o = keras.layers.dot([i1, i2], axes=1)
self.assertListEqual(o.get_shape().as_list(), [None, 1])
model = keras.models.Model([i1, i2], o)
_ = keras.layers.Dot(axes=1).get_config()
x1 = np.random.random((2, 4))
x2 = np.random.random((2, 4))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 1))
expected = np.zeros((2, 1))
expected[0, 0] = np.dot(x1[0], x2[0])
expected[1, 0] = np.dot(x1[1], x2[1])
self.assertAllClose(out, expected, atol=1e-4)
# Test with negative tuple of axes.
o = keras.layers.dot([i1, i2], axes=(-1, -1))
self.assertListEqual(o.get_shape().as_list(), [None, 1])
model = keras.models.Model([i1, i2], o)
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 1))
self.assertAllClose(out, expected, atol=1e-4)
# test compute_output_shape
layer = keras.layers.Dot(axes=-1)
self.assertEqual(layer.compute_output_shape([(4, 5), (4, 5)]), (4, 1))
def test_dot_errors(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 6))
i3 = keras.layers.Input(shape=(4, 6))
with self.assertRaises(ValueError):
keras.layers.dot([i1, i2], axes=-1)
with self.assertRaises(ValueError):
keras.layers.dot(i1, axes=-1)
with self.assertRaises(ValueError):
keras.layers.dot([i1], axes=-1)
with self.assertRaises(ValueError):
keras.layers.dot([i1, i2, i3], axes=-1)
with self.assertRaises(ValueError):
dot = keras.layers.Dot(1)
dot.compute_output_shape(1)
@tf_test_util.run_in_graph_and_eager_modes
def test_merge_subtract(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
y = keras.layers.subtract([i1, i2])
self.assertEqual(y.get_shape().as_list(), [None, 4, 5])
# Test invalid use cases
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(3, 5))
with self.assertRaises(ValueError):
keras.layers.subtract([i1, i2])
with self.assertRaises(ValueError):
keras.layers.subtract([i1, i1, i1])
if __name__ == '__main__':
test.main()
| apache-2.0 |
anryko/ansible | lib/ansible/modules/cloud/vmware/vmware_dvs_portgroup_info.py | 21 | 10269 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_dvs_portgroup_info
short_description: Gathers info DVS portgroup configurations
description:
- This module can be used to gather information about DVS portgroup configurations.
version_added: '2.9'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
datacenter:
description:
- Name of the datacenter.
required: true
type: str
dvswitch:
description:
- Name of a dvswitch to look for.
required: false
type: str
version_added: "2.9"
show_network_policy:
description:
- Show or hide network policies of DVS portgroup.
type: bool
default: True
show_port_policy:
description:
- Show or hide port policies of DVS portgroup.
type: bool
default: True
show_teaming_policy:
description:
- Show or hide teaming policies of DVS portgroup.
type: bool
default: True
show_vlan_info:
description:
- Show or hide vlan information of the DVS portgroup.
type: bool
default: False
version_added: "2.9"
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Get info about DVPG
vmware_dvs_portgroup_info:
hostname: "{{ vcenter_server }}"
username: "{{ vcenter_user }}"
password: "{{ vcenter_pass }}"
validate_certs: no
datacenter: "{{ datacenter_name }}"
register: dvpg_info
- name: Get number of ports for portgroup 'dvpg_001' in 'dvs_001'
debug:
msg: "{{ item.num_ports }}"
with_items:
- "{{ dvpg_info.dvs_portgroup_info['dvs_001'] | json_query(query) }}"
vars:
query: "[?portgroup_name=='dvpg_001']"
'''
RETURN = r'''
dvs_portgroup_info:
description: metadata about DVS portgroup configuration
returned: on success
type: dict
sample: {
"dvs_0":[
{
"description": null,
"dvswitch_name": "dvs_001",
"network_policy": {
"forged_transmits": false,
"mac_changes": false,
"promiscuous": false
},
"num_ports": 8,
"port_policy": {
"block_override": true,
"ipfix_override": false,
"live_port_move": false,
"network_rp_override": false,
"port_config_reset_at_disconnect": true,
"security_override": false,
"shaping_override": false,
"traffic_filter_override": false,
"uplink_teaming_override": false,
"vendor_config_override": false,
"vlan_override": false
},
"portgroup_name": "dvpg_001",
"teaming_policy": {
"inbound_policy": true,
"notify_switches": true,
"policy": "loadbalance_srcid",
"rolling_order": false
},
"vlan_info": {
"trunk": false,
"pvlan": false,
"vlan_id": 0
},
"type": "earlyBinding"
},
]
}
'''
try:
from pyVmomi import vim
except ImportError as e:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi, get_all_objs, find_dvs_by_name
class DVSPortgroupInfoManager(PyVmomi):
def __init__(self, module):
super(DVSPortgroupInfoManager, self).__init__(module)
self.dc_name = self.params['datacenter']
self.dvs_name = self.params['dvswitch']
datacenter = self.find_datacenter_by_name(self.dc_name)
if datacenter is None:
self.module.fail_json(msg="Failed to find the datacenter %s" % self.dc_name)
if self.dvs_name:
# User specified specific dvswitch name to gather information
dvsn = find_dvs_by_name(self.content, self.dvs_name)
if dvsn is None:
self.module.fail_json(msg="Failed to find the dvswitch %s" % self.dvs_name)
self.dvsls = [dvsn]
else:
# default behaviour, gather information about all dvswitches
self.dvsls = get_all_objs(self.content, [vim.DistributedVirtualSwitch], folder=datacenter.networkFolder)
def get_vlan_info(self, vlan_obj=None):
"""
Return vlan information from given object
Args:
vlan_obj: vlan managed object
Returns: Dict of vlan details of the specific object
"""
vdret = dict()
if not vlan_obj:
return vdret
if isinstance(vlan_obj, vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec):
vlan_id_list = []
for vli in vlan_obj.vlanId:
if vli.start == vli.end:
vlan_id_list.append(str(vli.start))
else:
vlan_id_list.append(str(vli.start) + "-" + str(vli.end))
vdret = dict(trunk=True, pvlan=False, vlan_id=vlan_id_list)
elif isinstance(vlan_obj, vim.dvs.VmwareDistributedVirtualSwitch.PvlanSpec):
vdret = dict(trunk=False, pvlan=True, vlan_id=str(vlan_obj.pvlanId))
else:
vdret = dict(trunk=False, pvlan=False, vlan_id=str(vlan_obj.vlanId))
return vdret
def gather_dvs_portgroup_info(self):
dvs_lists = self.dvsls
result = dict()
for dvs in dvs_lists:
result[dvs.name] = list()
for dvs_pg in dvs.portgroup:
network_policy = dict()
teaming_policy = dict()
port_policy = dict()
vlan_info = dict()
if self.module.params['show_network_policy'] and dvs_pg.config.defaultPortConfig.securityPolicy:
network_policy = dict(
forged_transmits=dvs_pg.config.defaultPortConfig.securityPolicy.forgedTransmits.value,
promiscuous=dvs_pg.config.defaultPortConfig.securityPolicy.allowPromiscuous.value,
mac_changes=dvs_pg.config.defaultPortConfig.securityPolicy.macChanges.value
)
if self.module.params['show_teaming_policy']:
# govcsim does not have uplinkTeamingPolicy, remove this check once
# PR https://github.com/vmware/govmomi/pull/1524 merged.
if dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy:
teaming_policy = dict(
policy=dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy.policy.value,
inbound_policy=dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy.reversePolicy.value,
notify_switches=dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy.notifySwitches.value,
rolling_order=dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy.rollingOrder.value,
)
if self.params['show_port_policy']:
# govcsim does not have port policy
if dvs_pg.config.policy:
port_policy = dict(
block_override=dvs_pg.config.policy.blockOverrideAllowed,
ipfix_override=dvs_pg.config.policy.ipfixOverrideAllowed,
live_port_move=dvs_pg.config.policy.livePortMovingAllowed,
network_rp_override=dvs_pg.config.policy.networkResourcePoolOverrideAllowed,
port_config_reset_at_disconnect=dvs_pg.config.policy.portConfigResetAtDisconnect,
security_override=dvs_pg.config.policy.securityPolicyOverrideAllowed,
shaping_override=dvs_pg.config.policy.shapingOverrideAllowed,
traffic_filter_override=dvs_pg.config.policy.trafficFilterOverrideAllowed,
uplink_teaming_override=dvs_pg.config.policy.uplinkTeamingOverrideAllowed,
vendor_config_override=dvs_pg.config.policy.vendorConfigOverrideAllowed,
vlan_override=dvs_pg.config.policy.vlanOverrideAllowed
)
if self.params['show_vlan_info']:
vlan_info = self.get_vlan_info(dvs_pg.config.defaultPortConfig.vlan)
dvpg_details = dict(
portgroup_name=dvs_pg.name,
num_ports=dvs_pg.config.numPorts,
dvswitch_name=dvs_pg.config.distributedVirtualSwitch.name,
description=dvs_pg.config.description,
type=dvs_pg.config.type,
teaming_policy=teaming_policy,
port_policy=port_policy,
network_policy=network_policy,
vlan_info=vlan_info,
)
result[dvs.name].append(dvpg_details)
return result
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
datacenter=dict(type='str', required=True),
show_network_policy=dict(type='bool', default=True),
show_teaming_policy=dict(type='bool', default=True),
show_port_policy=dict(type='bool', default=True),
dvswitch=dict(),
show_vlan_info=dict(type='bool', default=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
dvs_pg_mgr = DVSPortgroupInfoManager(module)
module.exit_json(changed=False,
dvs_portgroup_info=dvs_pg_mgr.gather_dvs_portgroup_info())
if __name__ == "__main__":
main()
| gpl-3.0 |
davibe/gstreamersdk_pygobject | tests/test_everything.py | 3 | 16643 | # -*- Mode: Python; py-indent-offset: 4 -*-
# coding=utf-8
# vim: tabstop=4 shiftwidth=4 expandtab
import unittest
import sys
sys.path.insert(0, "../")
from sys import getrefcount
import cairo
from gi.repository import GObject
from gi.repository import Regress as Everything
if sys.version_info < (3, 0):
UNICHAR = "\xe2\x99\xa5"
PY2_UNICODE_UNICHAR = unicode(UNICHAR, 'UTF-8')
else:
UNICHAR = "♥"
class TestEverything(unittest.TestCase):
def test_cairo_context(self):
context = Everything.test_cairo_context_full_return()
self.assertTrue(isinstance(context, cairo.Context))
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 10, 10)
context = cairo.Context(surface)
Everything.test_cairo_context_none_in(context)
def test_cairo_surface(self):
surface = Everything.test_cairo_surface_none_return()
self.assertTrue(isinstance(surface, cairo.ImageSurface))
self.assertTrue(isinstance(surface, cairo.Surface))
self.assertEquals(surface.get_format(), cairo.FORMAT_ARGB32)
self.assertEquals(surface.get_width(), 10)
self.assertEquals(surface.get_height(), 10)
surface = Everything.test_cairo_surface_full_return()
self.assertTrue(isinstance(surface, cairo.ImageSurface))
self.assertTrue(isinstance(surface, cairo.Surface))
self.assertEquals(surface.get_format(), cairo.FORMAT_ARGB32)
self.assertEquals(surface.get_width(), 10)
self.assertEquals(surface.get_height(), 10)
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 10, 10)
Everything.test_cairo_surface_none_in(surface)
surface = Everything.test_cairo_surface_full_out()
self.assertTrue(isinstance(surface, cairo.ImageSurface))
self.assertTrue(isinstance(surface, cairo.Surface))
self.assertEquals(surface.get_format(), cairo.FORMAT_ARGB32)
self.assertEquals(surface.get_width(), 10)
self.assertEquals(surface.get_height(), 10)
def test_unichar(self):
self.assertEquals("c", Everything.test_unichar("c"))
if sys.version_info < (3, 0):
self.assertEquals(UNICHAR, Everything.test_unichar(PY2_UNICODE_UNICHAR))
self.assertEquals(UNICHAR, Everything.test_unichar(UNICHAR))
self.assertRaises(TypeError, Everything.test_unichar, "")
self.assertRaises(TypeError, Everything.test_unichar, "morethanonechar")
def test_floating(self):
e = Everything.TestFloating()
self.assertEquals(e.__grefcount__, 1)
e = GObject.new(Everything.TestFloating)
self.assertEquals(e.__grefcount__, 1)
e = Everything.TestFloating.new()
self.assertEquals(e.__grefcount__, 1)
def test_caller_allocates(self):
struct_a = Everything.TestStructA()
struct_a.some_int = 10
struct_a.some_int8 = 21
struct_a.some_double = 3.14
struct_a.some_enum = Everything.TestEnum.VALUE3
struct_a_clone = struct_a.clone()
self.assertTrue(struct_a != struct_a_clone)
self.assertEquals(struct_a.some_int, struct_a_clone.some_int)
self.assertEquals(struct_a.some_int8, struct_a_clone.some_int8)
self.assertEquals(struct_a.some_double, struct_a_clone.some_double)
self.assertEquals(struct_a.some_enum, struct_a_clone.some_enum)
struct_b = Everything.TestStructB()
struct_b.some_int8 = 8
struct_b.nested_a.some_int = 20
struct_b.nested_a.some_int8 = 12
struct_b.nested_a.some_double = 333.3333
struct_b.nested_a.some_enum = Everything.TestEnum.VALUE2
struct_b_clone = struct_b.clone()
self.assertTrue(struct_b != struct_b_clone)
self.assertEquals(struct_b.some_int8, struct_b_clone.some_int8)
self.assertEquals(struct_b.nested_a.some_int, struct_b_clone.nested_a.some_int)
self.assertEquals(struct_b.nested_a.some_int8, struct_b_clone.nested_a.some_int8)
self.assertEquals(struct_b.nested_a.some_double, struct_b_clone.nested_a.some_double)
self.assertEquals(struct_b.nested_a.some_enum, struct_b_clone.nested_a.some_enum)
def test_wrong_type_of_arguments(self):
try:
Everything.test_int8()
except TypeError:
(e_type, e) = sys.exc_info()[:2]
self.assertEquals(e.args, ("test_int8() takes exactly 1 argument(s) (0 given)",))
def test_gtypes(self):
gchararray_gtype = GObject.type_from_name('gchararray')
gtype = Everything.test_gtype(str)
self.assertEquals(gchararray_gtype, gtype)
gtype = Everything.test_gtype('gchararray')
self.assertEquals(gchararray_gtype, gtype)
gobject_gtype = GObject.GObject.__gtype__
gtype = Everything.test_gtype(GObject.GObject)
self.assertEquals(gobject_gtype, gtype)
gtype = Everything.test_gtype('GObject')
self.assertEquals(gobject_gtype, gtype)
self.assertRaises(TypeError, Everything.test_gtype, 'invalidgtype')
class NotARegisteredClass(object):
pass
self.assertRaises(TypeError, Everything.test_gtype, NotARegisteredClass)
class ARegisteredClass(GObject.GObject):
__gtype_name__ = 'EverythingTestsARegisteredClass'
gtype = Everything.test_gtype('EverythingTestsARegisteredClass')
self.assertEquals(ARegisteredClass.__gtype__, gtype)
gtype = Everything.test_gtype(ARegisteredClass)
self.assertEquals(ARegisteredClass.__gtype__, gtype)
self.assertRaises(TypeError, Everything.test_gtype, 'ARegisteredClass')
def test_dir(self):
attr_list = dir(Everything)
# test that typelib attributes are listed
self.assertTrue('TestStructA' in attr_list)
# test that class attributes and methods are listed
self.assertTrue('__class__' in attr_list)
self.assertTrue('__dir__' in attr_list)
self.assertTrue('__repr__' in attr_list)
# test that instance members are listed
self.assertTrue('_namespace' in attr_list)
self.assertTrue('_version' in attr_list)
# test that there are no duplicates returned
self.assertEqual(len(attr_list), len(set(attr_list)))
class TestNullableArgs(unittest.TestCase):
def test_in_nullable_hash(self):
Everything.test_ghash_null_in(None)
def test_in_nullable_list(self):
Everything.test_gslist_null_in(None)
Everything.test_glist_null_in(None)
Everything.test_gslist_null_in([])
Everything.test_glist_null_in([])
def test_in_nullable_array(self):
Everything.test_array_int_null_in(None)
Everything.test_array_int_null_in([])
def test_in_nullable_string(self):
Everything.test_utf8_null_in(None)
def test_in_nullable_object(self):
Everything.func_obj_null_in(None)
def test_out_nullable_hash(self):
self.assertEqual(None, Everything.test_ghash_null_out())
def test_out_nullable_list(self):
self.assertEqual([], Everything.test_gslist_null_out())
self.assertEqual([], Everything.test_glist_null_out())
def test_out_nullable_array(self):
self.assertEqual([], Everything.test_array_int_null_out())
def test_out_nullable_string(self):
self.assertEqual(None, Everything.test_utf8_null_out())
def test_out_nullable_object(self):
self.assertEqual(None, Everything.TestObj.null_out())
class TestCallbacks(unittest.TestCase):
called = False
main_loop = GObject.MainLoop()
def testCallback(self):
TestCallbacks.called = False
def callback():
TestCallbacks.called = True
Everything.test_simple_callback(callback)
self.assertTrue(TestCallbacks.called)
def testCallbackException(self):
"""
This test ensures that we get errors from callbacks correctly
and in particular that we do not segv when callbacks fail
"""
def callback():
x = 1 / 0
try:
Everything.test_simple_callback(callback)
except ZeroDivisionError:
pass
def testDoubleCallbackException(self):
"""
This test ensures that we get errors from callbacks correctly
and in particular that we do not segv when callbacks fail
"""
def badcallback():
x = 1 / 0
def callback():
Everything.test_boolean(True)
Everything.test_boolean(False)
Everything.test_simple_callback(badcallback())
try:
Everything.test_simple_callback(callback)
except ZeroDivisionError:
pass
def testReturnValueCallback(self):
TestCallbacks.called = False
def callback():
TestCallbacks.called = True
return 44
self.assertEquals(Everything.test_callback(callback), 44)
self.assertTrue(TestCallbacks.called)
def testCallbackAsync(self):
TestCallbacks.called = False
def callback(foo):
TestCallbacks.called = True
return foo
Everything.test_callback_async(callback, 44);
i = Everything.test_callback_thaw_async();
self.assertEquals(44, i);
self.assertTrue(TestCallbacks.called)
def testCallbackScopeCall(self):
TestCallbacks.called = 0
def callback():
TestCallbacks.called += 1
return 0
Everything.test_multi_callback(callback)
self.assertEquals(TestCallbacks.called, 2)
def testCallbackUserdata(self):
TestCallbacks.called = 0
def callback(userdata):
self.assertEquals(userdata, "Test%d" % TestCallbacks.called)
TestCallbacks.called += 1
return TestCallbacks.called
for i in range(100):
val = Everything.test_callback_user_data(callback, "Test%d" % i)
self.assertEquals(val, i+1)
self.assertEquals(TestCallbacks.called, 100)
def testCallbackUserdataRefCount(self):
TestCallbacks.called = False
def callback(userdata):
TestCallbacks.called = True
return 1
ud = "Test User Data"
start_ref_count = getrefcount(ud)
for i in range(100):
Everything.test_callback_destroy_notify(callback, ud)
Everything.test_callback_thaw_notifications()
end_ref_count = getrefcount(ud)
self.assertEquals(start_ref_count, end_ref_count)
def testAsyncReadyCallback(self):
TestCallbacks.called = False
TestCallbacks.main_loop = GObject.MainLoop()
def callback(obj, result, user_data):
TestCallbacks.main_loop.quit()
TestCallbacks.called = True
Everything.test_async_ready_callback(callback)
TestCallbacks.main_loop.run()
self.assertTrue(TestCallbacks.called)
def testCallbackDestroyNotify(self):
def callback(user_data):
TestCallbacks.called = True
return 42
TestCallbacks.called = False
self.assertEquals(Everything.test_callback_destroy_notify(callback, 42), 42)
self.assertTrue(TestCallbacks.called)
self.assertEquals(Everything.test_callback_thaw_notifications(), 42)
def testCallbackInMethods(self):
object_ = Everything.TestObj()
def callback():
TestCallbacks.called = True
return 42
TestCallbacks.called = False
object_.instance_method_callback(callback)
self.assertTrue(TestCallbacks.called)
TestCallbacks.called = False
Everything.TestObj.static_method_callback(callback)
self.assertTrue(TestCallbacks.called)
def callbackWithUserData(user_data):
TestCallbacks.called = True
return 42
TestCallbacks.called = False
obj_ = Everything.TestObj.new_callback(callbackWithUserData, None)
self.assertTrue(TestCallbacks.called)
def testCallbackNone(self):
# make sure this doesn't assert or crash
Everything.test_simple_callback(None)
class TestProperties(unittest.TestCase):
def test_basic(self):
object_ = Everything.TestObj()
self.assertEquals(object_.props.int, 0)
object_.props.int = 42
self.assertTrue(isinstance(object_.props.int, int))
self.assertEquals(object_.props.int, 42)
self.assertEquals(object_.props.float, 0.0)
object_.props.float = 42.42
self.assertTrue(isinstance(object_.props.float, float))
self.assertAlmostEquals(object_.props.float, 42.42, places=5)
self.assertEquals(object_.props.double, 0.0)
object_.props.double = 42.42
self.assertTrue(isinstance(object_.props.double, float))
self.assertAlmostEquals(object_.props.double, 42.42, places=5)
self.assertEquals(object_.props.string, None)
object_.props.string = 'mec'
self.assertTrue(isinstance(object_.props.string, str))
self.assertEquals(object_.props.string, 'mec')
def test_hash_table(self):
object_ = Everything.TestObj()
self.assertEquals(object_.props.hash_table, None)
object_.props.hash_table = {'mec': 56}
self.assertTrue(isinstance(object_.props.hash_table, dict))
self.assertEquals(list(object_.props.hash_table.items())[0], ('mec', 56))
def test_list(self):
object_ = Everything.TestObj()
self.assertEquals(object_.props.list, [])
object_.props.list = ['1', '2', '3']
self.assertTrue(isinstance(object_.props.list, list))
self.assertEquals(object_.props.list, ['1', '2', '3'])
def test_boxed(self):
object_ = Everything.TestObj()
self.assertEquals(object_.props.boxed, None)
boxed = Everything.TestBoxed()
boxed.some_int8 = 42
object_.props.boxed = boxed
self.assertTrue(isinstance(object_.props.boxed, Everything.TestBoxed))
self.assertEquals(object_.props.boxed.some_int8, 42)
class TestTortureProfile(unittest.TestCase):
def test_torture_profile(self):
import time
total_time = 0
print("")
object_ = Everything.TestObj()
sys.stdout.write("\ttorture test 1 (10000 iterations): ")
start_time = time.clock()
for i in range(10000):
(y,z,q) = object_.torture_signature_0(5000,
"Torture Test 1",
12345)
end_time = time.clock()
delta_time = end_time - start_time
total_time += delta_time
print("%f secs" % delta_time)
sys.stdout.write("\ttorture test 2 (10000 iterations): ")
start_time = time.clock()
for i in range(10000):
(y,z,q) = Everything.TestObj().torture_signature_0(5000,
"Torture Test 2",
12345)
end_time = time.clock()
delta_time = end_time - start_time
total_time += delta_time
print("%f secs" % delta_time)
sys.stdout.write("\ttorture test 3 (10000 iterations): ")
start_time = time.clock()
for i in range(10000):
try:
(y,z,q) = object_.torture_signature_1(5000,
"Torture Test 3",
12345)
except:
pass
end_time = time.clock()
delta_time = end_time - start_time
total_time += delta_time
print("%f secs" % delta_time)
sys.stdout.write("\ttorture test 4 (10000 iterations): ")
def callback(userdata):
pass
userdata = [1,2,3,4]
start_time = time.clock()
for i in range(10000):
(y,z,q) = Everything.test_torture_signature_2(5000,
callback,
userdata,
"Torture Test 4",
12345)
end_time = time.clock()
delta_time = end_time - start_time
total_time += delta_time
print("%f secs" % delta_time)
print("\t====")
print("\tTotal: %f sec" % total_time)
| lgpl-2.1 |
italomaia/turtle-linux | games/BubbleKing/lib/menu.py | 1 | 13774 | import os
import pygame
from pygame.locals import *
from pgu import engine
import data
from cnst import *
import levels
class Menu(engine.State):
def __init__(self,game):
self.game = game
def init(self):
self.font = self.game.font
self.bkgr = pygame.image.load(data.filepath(os.path.join('bkgr','2.png')))
self.cur = 0
self.game.lcur = 0
self.levels = []
#for fname in os.listdir(data.filepath('levels')):
#if fname[0]=='.': continue
#self.levels.append((fname,fname.replace('.tga','')))
#self.levels.sort()
for fname,title in levels.LEVELS:
self.levels.append((fname,title))
self.items = [
('play the game!','start'),
('select <L>','play'),
('help','help'),
('credits','credits'),
('quit','quit'),
]
self.rects = []
self.frame = 0
def paint(self,screen):
x = self.frame%(self.bkgr.get_width())
screen.blit(self.bkgr,(-x,0))
screen.blit(self.bkgr,(-x+self.bkgr.get_width(),0))
x,y = 0,4
fnt = self.game.fonts['title']
c =(0,0,0)
text = TITLE
img = fnt.render(text,1,c)
screen.blit(img,((SW-img.get_width())/2,y))
y += 48
fnt = self.font
text = 'high: %05d'%self.game.high
c = (0x00,0x00,0x00)
img = fnt.render(text,1,c)
x = (SW-img.get_width())/2
screen.blit(img,(x+1,y+1))
c = (0xff,0xff,0xff)
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 36
x = 90
for n in xrange(0,len(self.items)):
text,value = self.items[n]
text = text.replace('L',self.levels[self.game.lcur][1])
c = (0x00,0x00,0x00)
img = fnt.render(text,1,c)
x = (SW-img.get_width())/2
screen.blit(img,(x+1,y+1))
c = (0xff,0xff,0xff)
if n == self.cur: c = (0xaa,0xaa,0xaa)
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 24
text = 'www.imitationpickles.org'
c = (0x00,0x00,0x00)
img = fnt.render(text,1,c)
x = (SW-img.get_width())/2
y = SH-(img.get_height()+4)
screen.blit(img,(x+1,y+1))
c = (0xff,0xff,0xff)
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
self.game.flip()
def update(self,screen):
return self.paint(screen)
def loop(self):
self.game.music_play('title')
self.frame += 1
def event(self,e):
if e.type is USEREVENT and e.action == 'down':
self.cur = (self.cur+1)%len(self.items)
self.repaint()
elif e.type is USEREVENT and e.action == 'up':
self.cur = (self.cur-1+len(self.items))%len(self.items)
self.repaint()
elif e.type is USEREVENT and e.action == 'left':
self.game.lcur = (self.game.lcur-1+len(self.levels))%len(self.levels)
self.repaint()
elif e.type is USEREVENT and e.action == 'right':
self.game.lcur = (self.game.lcur+1)%len(self.levels)
self.repaint()
elif e.type is USEREVENT and e.action == 'exit':
return engine.Quit(self.game)
elif e.type is USEREVENT and (e.action == 'menu' or e.action == 'jump'):
text,value = self.items[self.cur]
if value == 'start':
self.game.init_play()
self.game.lcur = 0
import level
l = level.Level(self.game,None,self)
return Transition(self.game,l)
elif value == 'play':
self.game.init_play()
import level
l = level.Level(self.game,None,self)
return Transition(self.game,l)
elif value == 'quit':
return engine.Quit(self.game)
elif value == 'credits':
return Transition(self.game,Credits(self.game,self))
elif value == 'help':
return Transition(self.game,Help(self.game,self))
class Transition(engine.State):
def __init__(self,game,next):
self.game,self.next = game,next
def init(self):
self.s1 = self.game.screen.convert()
self.init2()
self.frame = 0
self.total = FPS
self.inc = 0
def init2(self):
if hasattr(self.next,'init') and not hasattr(self.next,'_init'):
self.next._init = 0
self.next.init()
self.s2 = self.game.screen.convert()
self.next.paint(self.s2)
def loop(self):
#self.frame += 1
self.inc += 1
#if (self.inc%2) == 0: self.frame += 1
self.frame += 1
if self.frame == self.total:
self.game.screen.blit(self.s2,(0,0))
self.game.flip()
return self.next
def update(self,screen):
return self.paint(screen)
def paint(self,screen):
f = self.frame
t = self.total
t2 = t/2
if f < t2:
i = self.s1
w = max(2,SW * (t2-f) / t2)
i = pygame.transform.scale(i,(w,SH*w/SW))
else:
f = t2-(f-t2)
i = self.s2
w = max(2,SW * (t2-f) / t2)
i = pygame.transform.scale(i,(w,SH*w/SW))
i = pygame.transform.scale(i,(SW,SH))
screen.blit(i,(0,0))
self.game.flip()
class Intro(engine.State):
def __init__(self,game,next):
self.game = game
self.next = next
def init(self):
self.frame = FPS
self.moon = pygame.image.load(data.filepath(os.path.join('intro','moon2.png'))).convert()
self.black = self.moon.convert()
self.black.fill((0,0,0))
def update(self,screen):
return self.paint(screen)
def loop(self):
self.frame += 1
if self.frame == FPS*7:
return Transition(self.game,Intro2(self.game,self.next))
def event(self,e):
if e.type is KEYDOWN or (e.type is USEREVENT and e.action in ('jump','bubble','menu','exit')):
return Transition(self.game,self.next)
def paint(self,screen):
screen.fill((0,0,0))
f = self.frame
inc = FPS
if 0 < f < inc:
pass
f -= inc
inc = FPS*7
if 0 < f < inc:
a = 255
if f > FPS*2:
screen.blit(self.moon,(0,0))
a = 255- ((f-FPS*2)*255/(FPS*2))
self.black.set_alpha(a)
screen.blit(self.black,(0,0))
fnt = self.game.fonts['intro']
x,y = 8,0
for text in ['... July 20, 1969','man first','walked on','the moon.']:
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 36
if f < FPS:
a = 255-(f*255/FPS)
self.black.set_alpha(a)
screen.blit(self.black,(0,0))
self.game.flip()
class Intro2(engine.State):
def __init__(self,game,next):
self.game = game
self.next = next
def init(self):
self.moon = pygame.image.load(data.filepath(os.path.join('intro','moon2.png'))).convert()
img = pygame.image.load(data.filepath(os.path.join('images','player','right.png')))
w = 160
self.player = pygame.transform.scale(img,(w,img.get_height()*w/img.get_width()))
self.bkgr = pygame.image.load(data.filepath(os.path.join('bkgr','2.png')))
self.frame = 0
def loop(self):
self.frame += 1
if self.frame == FPS*2:
return Transition(self.game,self.next)
def event(self,e):
if e.type is KEYDOWN or (e.type is USEREVENT and e.action in ('jump','bubble','menu','exit')):
return Transition(self.game,self.next)
def paint(self,screen):
#screen.fill((0,0,0))
screen.blit(self.bkgr,(0,0))
fnt = self.game.fonts['intro']
x,y = 8,0
for text in ['This is','the year','of the','seahorse!']:
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 36
screen.blit(self.player,(130,0))
self.game.flip()
class Prompt(engine.State):
def __init__(self,game,text,yes,no):
self.game = game
self.text = text
self.yes = yes
self.no = no
def init(self):
self.font = self.game.fonts['pause']
self.bkgr = self.game.screen.convert()
def event(self,e):
if e.type is KEYDOWN and e.key == K_y:
return self.yes
if e.type is KEYDOWN and e.key == K_n:
return self.no
def paint(self,screen):
screen.blit(self.bkgr,(0,0))
text = self.text
fnt = self.font
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
x,y = (SW-img.get_width())/2,(SH-img.get_height())/2
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
self.game.flip()
class Pause(engine.State):
def __init__(self,game,text,next):
self.game = game
self.text = text
self.next = next
def init(self):
self.font = self.game.fonts['pause']
self.bkgr = self.game.screen.convert()
def event(self,e):
if e.type is KEYDOWN or (e.type is USEREVENT and e.action in ('jump','bubble','menu','exit')):
return self.next
def paint(self,screen):
screen.blit(self.bkgr,(0,0))
text = self.text
fnt = self.font
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
x,y = (SW-img.get_width())/2,(SH-img.get_height())/2
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
self.game.flip()
class Credits(engine.State):
def __init__(self,game,next):
self.game = game
self.next = next
def init(self):
self.frame = 0
self.bkgr = pygame.image.load(data.filepath(os.path.join('bkgr',"5.png"))).convert()
def update(self,screen):
return self.paint(screen)
pass
def loop(self):
self.frame += 1
#if self.frame == FPS*7:
#return Transition(self.game,Intro2(self.game,self.next))
def event(self,e):
if e.type is KEYDOWN or (e.type is USEREVENT and e.action in ('jump','bubble','menu','exit')):
return Transition(self.game,self.next)
def paint(self,screen):
x = self.frame%(self.bkgr.get_width())
screen.blit(self.bkgr,(-x,0))
screen.blit(self.bkgr,(-x+self.bkgr.get_width(),0))
fnt = self.game.fonts['help']
x,y = 8,10
for text in [
'Core Team',
'',
'philhassey - director, code, levels',
'trick - tiles, sprites',
'pekuja - code, levels',
'tim - music, levels',
'DrPetter - backgrounds, sfx',
'',
'Also thanks to:',
'fydo (level), Lerc (gfx), Tee (level)',
]:
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
x = (SW-img.get_width())/2
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 20
self.game.flip()
class Help(engine.State):
def __init__(self,game,next):
self.game = game
self.next = next
def init(self):
self.frame = 0
self.bkgr = pygame.image.load(data.filepath(os.path.join('bkgr',"5.png"))).convert()
def update(self,screen):
return self.paint(screen)
pass
def loop(self):
self.frame += 1
#if self.frame == FPS*7:
#return Transition(self.game,Intro2(self.game,self.next))
def event(self,e):
if e.type is KEYDOWN or (e.type is USEREVENT and e.action in ('jump','bubble','menu','exit')):
return Transition(self.game,self.next)
def paint(self,screen):
x = self.frame%(self.bkgr.get_width())
screen.blit(self.bkgr,(-x,0))
screen.blit(self.bkgr,(-x+self.bkgr.get_width(),0))
fnt = self.game.fonts['help']
x,y = 8,10
for text in [
'Help',
'',
'Use your arrow keys to',
'move the seahorse.',
'Button 1 - Jump',
'Button 2 - Shoot',
'',
'Enemies take 3 shots unless',
'you are powered up! You can',
'ride enemy bubbles.',
]:
c = (255,255,255)
img = fnt.render(text,1,(0,0,0))
x = (SW-img.get_width())/2
screen.blit(img,(x+2,y+2))
img = fnt.render(text,1,c)
screen.blit(img,(x,y))
y += 20
self.game.flip()
| gpl-3.0 |
cybersiddhu/biogo.boom | samtools-0.1.18/misc/varfilter.py | 80 | 5783 | #!/software/bin/python
# Author: lh3, converted to python and modified to add -C option by Aylwyn Scally
#
# About:
# varfilter.py is a port of Heng's samtools.pl varFilter script into
# python, with an additional -C INT option. This option sets a minimum
# consensus score, above which the script will output a pileup line
# wherever it _could have_ called a variant, even if none is actually
# called (i.e. hom-ref positions). This is important if you want to
# subsequently merge the calls with those for another individual to get a
# synoptic view of calls at each site. Without this option, and in all
# other respects, it behaves like samtools.pl varFilter.
#
# Aylwyn Scally as6@sanger.ac.uk
# Filtration code:
#
# C low CNS quality (hom-ref only)
# d low depth
# D high depth
# W too many SNPs in a window (SNP only)
# G close to a high-quality indel (SNP only)
# Q low RMS mapping quality (SNP only)
# g close to another indel with higher quality (indel only)
# s low SNP quality (SNP only)
# i low indel quality (indel only)
import sys
import getopt
def usage():
print '''usage: varfilter.py [options] [cns-pileup]
Options: -Q INT minimum RMS mapping quality for SNPs
-q INT minimum RMS mapping quality for gaps
-d INT minimum read depth
-D INT maximum read depth
-S INT minimum SNP quality
-i INT minimum indel quality
-C INT minimum consensus quality for hom-ref sites
-G INT min indel score for nearby SNP filtering
-w INT SNP within INT bp around a gap to be filtered
-W INT window size for filtering dense SNPs
-N INT max number of SNPs in a window
-l INT window size for filtering adjacent gaps
-p print filtered variants'''
def varFilter_aux(first, is_print):
try:
if first[1] == 0:
sys.stdout.write("\t".join(first[4:]) + "\n")
elif is_print:
sys.stderr.write("\t".join(["UQdDWGgsiCX"[first[1]]] + first[4:]) + "\n")
except IOError:
sys.exit()
mindepth = 3
maxdepth = 100
gapgapwin = 30
minsnpmapq = 25
mingapmapq = 10
minindelscore = 25
scorefactor = 100
snpgapwin = 10
densesnpwin = 10
densesnps = 2
printfilt = False
minsnpq = 0
minindelq = 0
mincnsq = 0
try:
options, args = getopt.gnu_getopt(sys.argv[1:], 'pq:d:D:l:Q:w:W:N:G:S:i:C:', [])
except getopt.GetoptError:
usage()
sys.exit(2)
for (oflag, oarg) in options:
if oflag == '-d': mindepth = int(oarg)
if oflag == '-D': maxdepth = int(oarg)
if oflag == '-l': gapgapwin = int(oarg)
if oflag == '-Q': minsnpmapq = int(oarg)
if oflag == '-q': mingapmapq = int(oarg)
if oflag == '-G': minindelscore = int(oarg)
if oflag == '-s': scorefactor = int(oarg)
if oflag == '-w': snpgapwin = int(oarg)
if oflag == '-W': densesnpwin = int(oarg)
if oflag == '-C': mincnsq = int(oarg)
if oflag == '-N': densesnps = int(oarg)
if oflag == '-p': printfilt = True
if oflag == '-S': minsnpq = int(oarg)
if oflag == '-i': minindelq = int(oarg)
if len(args) < 1:
inp = sys.stdin
else:
inp = open(args[0])
# calculate the window size
max_dist = max(gapgapwin, snpgapwin, densesnpwin)
staging = []
for t in (line.strip().split() for line in inp):
(flt, score) = (0, -1)
# non-var sites
if t[3] == '*/*':
continue
is_snp = t[2].upper() != t[3].upper()
if not (is_snp or mincnsq):
continue
# clear the out-of-range elements
while staging:
# Still on the same chromosome and the first element's window still affects this position?
if staging[0][4] == t[0] and int(staging[0][5]) + staging[0][2] + max_dist >= int(t[1]):
break
varFilter_aux(staging.pop(0), printfilt)
# first a simple filter
if int(t[7]) < mindepth:
flt = 2
elif int(t[7]) > maxdepth:
flt = 3
if t[2] == '*': # an indel
if minindelq and minindelq > int(t[5]):
flt = 8
elif is_snp:
if minsnpq and minsnpq> int(t[5]):
flt = 7
else:
if mincnsq and mincnsq > int(t[4]):
flt = 9
# site dependent filters
dlen = 0
if flt == 0:
if t[2] == '*': # an indel
# If deletion, remember the length of the deletion
(a,b) = t[3].split('/')
alen = len(a) - 1
blen = len(b) - 1
if alen>blen:
if a[0] == '-': dlen=alen
elif b[0] == '-': dlen=blen
if int(t[6]) < mingapmapq:
flt = 1
# filtering SNPs
if int(t[5]) >= minindelscore:
for x in (y for y in staging if y[3]):
# Is it a SNP and is it outside the SNP filter window?
if x[0] >= 0 or int(x[5]) + x[2] + snpgapwin < int(t[1]):
continue
if x[1] == 0:
x[1] = 5
# calculate the filtering score (different from indel quality)
score = int(t[5])
if t[8] != '*':
score += scorefactor * int(t[10])
if t[9] != '*':
score += scorefactor * int(t[11])
# check the staging list for indel filtering
for x in (y for y in staging if y[3]):
# Is it a SNP and is it outside the gap filter window
if x[0] < 0 or int(x[5]) + x[2] + gapgapwin < int(t[1]):
continue
if x[0] < score:
x[1] = 6
else:
flt = 6
break
else: # a SNP or hom-ref
if int(t[6]) < minsnpmapq:
flt = 1
# check adjacent SNPs
k = 1
for x in (y for y in staging if y[3]):
if x[0] < 0 and int(x[5]) + x[2] + densesnpwin >= int(t[1]) and (x[1] == 0 or x[1] == 4 or x[1] == 5):
k += 1
# filtering is necessary
if k > densesnps:
flt = 4
for x in (y for y in staging if y[3]):
if x[0] < 0 and int(x[5]) + x[2] + densesnpwin >= int(t[1]) and x[1] == 0:
x[1] = 4
else: # then check gap filter
for x in (y for y in staging if y[3]):
if x[0] < 0 or int(x[5]) + x[2] + snpgapwin < int(t[1]):
continue
if x[0] >= minindelscore:
flt = 5
break
staging.append([score, flt, dlen, is_snp] + t)
# output the last few elements in the staging list
while staging:
varFilter_aux(staging.pop(0), printfilt)
| bsd-3-clause |
davidzchen/tensorflow | tensorflow/python/profiler/pprof_profiler.py | 23 | 15276 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Profiler for TensorFlow models that outputs data in pprof format.
See https://github.com/google/pprof/blob/master/proto/profile.proto for pprof
profile format.
The following needs to be set for profiler to work:
* trace_level needs to be set to FULL_TRACE
* run_metadata object should be passed in to session.run call
Sample usage:
options = tf.compat.v1.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.compat.v1.RunMetadata()
with tf.compat.v1.Session as sess:
...
sess.run(computation, run_metadata=run_metadata, options=options)
pprof_profiler.profile(sess.graph, run_metadata, output_dir)
The code above would output a pprof profile to separate output_dir/.*.pb.gz
file for each device. These files can be passed to pprof for formatting.
For e.g.:
pprof -png --nodecount=100 --sample_index=1 output_dir/profile_output.pb.gz
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
from collections import namedtuple
import gzip
import os
import string
import sys
import time
from proto import profile_pb2
if sys.version_info < (3,):
maketrans = string.maketrans
else:
maketrans = str.maketrans
ProfileDatum = namedtuple('ProfileDatum', [
'node_exec_stats', 'op_type', 'traceback'])
class StringTable(object):
"""Keeps track of strings to add to string_table in pprof proto."""
def __init__(self):
# Pprof requires first entry in string_table to be ''.
self._string_table = ['']
self._string_to_index = {'': 0}
def index_of(self, value_str):
"""Get index of value_str in the string table.
If value_str is not in the string table, we will add it at the end
and then return the new index.
Args:
value_str: (string) Value to lookup/add in/to the string table.
Returns:
Index of value_str in the string table.
"""
if value_str is None:
value_str = ''
if value_str in self._string_to_index:
return self._string_to_index[value_str]
index = len(self._string_table)
self._string_table.append(value_str)
self._string_to_index[value_str] = index
return index
def next_index(self):
"""Gets index that would be assigned to the next added string.
Returns:
Index of the next string if it was added.
"""
return len(self._string_table)
def string_table(self):
"""Returns a list of strings to store in pprof's string_table."""
return self._string_table
class Functions(object):
"""Keeps track of `Function` protos for pprof profile."""
def __init__(self, string_table):
"""Constructor.
Args:
string_table: A `StringTable` object.
"""
self._string_table = string_table
# Maps tuples in the form (file_path, function_name, start_line_number)
# to `Function` protos.
self._function_key_to_function = {}
def index_of(self, file_path, function_name, function_start_line):
"""Returns index of the function, adding the function if needed.
Args:
file_path: (string) Path to file where the function is defined.
function_name: (string) Function name.
function_start_line: (integer) Start line number of function definition.
Returns:
Function index.
"""
function_key = (file_path, function_name, function_start_line)
if function_key in self._function_key_to_function:
return self._function_key_to_function[function_key].id
else:
# Function indexes should start from 1
function_index = len(self._function_key_to_function) + 1
function = profile_pb2.Function()
function.id = function_index
function.name = self._string_table.index_of(function_name)
function.filename = self._string_table.index_of(file_path)
function.start_line = function_start_line
self._function_key_to_function[function_key] = function
return function_index
def function_protos(self):
"""Returns list of `profile_pb2.Function` protos."""
return self._function_key_to_function.values()
class Locations(object):
"""Keeps track of `Location` protos for pprof profile.
`Locations` store information about function call locations.
"""
def __init__(self, functions):
"""Constructor.
Args:
functions: A `Functions` object.
"""
self._functions = functions
# Maps tuples in the form (file_path, called_function_name, line_number)
# to `Location` protos.
self._location_key_to_location = {}
def index_of(
self, file_path, line_number, called_function_name, called_file_path,
called_function_start_line):
"""Returns index of the location, adding the location if needed.
Args:
file_path: (string) Path to file that makes the call.
line_number: (integer) Call line number.
called_function_name: (string) Function name of the function called at
`file_path` and `line_number`.
called_file_path: (string) Path to file where the called function is
defined.
called_function_start_line: (integer) Start line number of called
function definition in `called_file_path` file.
Returns:
Index of location.
"""
location_key = (file_path, called_function_name, line_number)
if location_key in self._location_key_to_location:
location = self._location_key_to_location[location_key]
return location.id
else:
# Location indexes should start from 1
location_index = len(self._location_key_to_location) + 1
location = profile_pb2.Location()
location.id = location_index
self._location_key_to_location[location_key] = location
line = location.line.add()
line.function_id = self._functions.index_of(
called_file_path, called_function_name, called_function_start_line)
line.line = line_number
return location_index
def location_protos(self):
"""Returns list of `profile_pb2.Location` protos."""
return self._location_key_to_location.values()
class Samples(object):
"""Keeps track of `Sample` protos for pprof profile.
Samples store the following statistics in order:
count, all_time, op_time
"""
def __init__(self, string_table):
"""Constructor.
Args:
string_table: A `StringTable` object.
"""
self._string_table = string_table
# TODO(annarev): figure out if location is unique for each node name.
# If not, also key this dictionary based on location ids.
self._node_name_to_sample = {}
def add(self, datum, location_ids):
"""Adds a sample data point.
Args:
datum: `ProfileDatum` to add a sample for.
location_ids: List of numberic location ids for this
sample.
"""
node_name = datum.node_exec_stats.node_name
if node_name in self._node_name_to_sample:
sample = self._node_name_to_sample[node_name]
sample.location_id.extend(location_ids)
else:
sample = profile_pb2.Sample()
# Sample stores 3 values: count, all_time, op_time
sample.value.extend([0, 0, 0])
label = sample.label.add()
label.key = self._string_table.index_of('node_name')
label.str = self._string_table.index_of(node_name)
label = sample.label.add()
label.key = self._string_table.index_of('op_type')
label.str = self._string_table.index_of(datum.op_type)
self._node_name_to_sample[node_name] = sample
sample.value[0] += 1
sample.value[1] += datum.node_exec_stats.all_end_rel_micros
sample.value[2] += (
datum.node_exec_stats.op_end_rel_micros -
datum.node_exec_stats.op_start_rel_micros)
def get_sample_protos(self):
"""Returns list of `Sample` protos for pprof profile."""
return self._node_name_to_sample.values()
class PprofProfiler(object):
"""Creates profiles in pprof format."""
def __init__(self, graph, run_metadata):
"""Constructor.
Args:
graph: A `Graph` instance.
run_metadata: A list of `RunMetadata` objects.
"""
self._graph = graph
self._run_metadata = run_metadata
self._string_table = StringTable()
self._functions = Functions(self._string_table)
self._locations = Locations(self._functions)
def profile(self):
"""Generates pprof profiles.
Returns:
Dictionary mapping from device name to proto in `profile_pb2.Profile`
format.
"""
profiles = {}
data_generator_func = self._get_profile_data_generator()
for device_index, device_stats in enumerate(
self._run_metadata.step_stats.dev_stats):
# Create profile
pprof_proto = self._get_pprof_proto(data_generator_func(device_stats))
if not pprof_proto.sample:
print(
'Not enough data to create profile for device %s. Did you pass '
'RunMetadata to session.run call?' % device_stats.device)
continue
# Add device name comment
device_count = len(self._run_metadata.step_stats.dev_stats)
device_description = (
'Device %d of %d: %s' %
(device_index + 1, device_count, device_stats.device))
device_description_str_index = self._string_table.next_index()
pprof_proto.string_table.append(device_description)
pprof_proto.comment.append(device_description_str_index)
profiles[device_stats.device] = pprof_proto
return profiles
def _get_pprof_proto(self, profile_datum_generator):
"""Returns profile data in pprof proto format.
Args:
profile_datum_generator: Generator outputting `ProfileDatum` objects.
Returns:
A proto in pprof format.
"""
pprof_profile = profile_pb2.Profile()
samples = Samples(self._string_table)
for datum in profile_datum_generator:
if not datum.traceback:
continue
stack_frame = datum.traceback[-1]
after_apply_op = False
location_ids = []
# We add locations from stack trace in bottom-up order.
for stack_frame_index in reversed(range(len(datum.traceback) - 1)):
prev_stack_frame = stack_frame
stack_frame = datum.traceback[stack_frame_index]
# Call at current frame calls function at previous frame.
prev_file_path = prev_stack_frame[0]
prev_function = prev_stack_frame[2]
prev_function_start_line = -1
curr_file_path = stack_frame[0]
curr_line_number = stack_frame[1]
# Skip all calls up to apply_op since they are the same for all ops.
if not after_apply_op:
if prev_function == 'apply_op':
after_apply_op = True
continue
location_index = self._locations.index_of(
curr_file_path, curr_line_number,
prev_function, prev_file_path, prev_function_start_line)
location_ids.append(location_index)
samples.add(datum, location_ids)
sample_type_description = 'count'
sample_type = pprof_profile.sample_type.add()
sample_type.type = self._string_table.index_of(sample_type_description)
sample_type.unit = self._string_table.index_of('count')
sample_type_description = 'all_time'
sample_type = pprof_profile.sample_type.add()
sample_type.type = self._string_table.index_of(sample_type_description)
sample_type.unit = self._string_table.index_of('nanoseconds')
sample_type_description = 'op_time'
sample_type = pprof_profile.sample_type.add()
sample_type.type = self._string_table.index_of(sample_type_description)
sample_type.unit = self._string_table.index_of('nanoseconds')
pprof_profile.string_table.extend(self._string_table.string_table())
pprof_profile.sample.extend(samples.get_sample_protos())
pprof_profile.function.extend(self._functions.function_protos())
pprof_profile.location.extend(self._locations.location_protos())
return pprof_profile
def _get_profile_data_generator(self):
"""Get function that generates `ProfileDatum` objects.
Returns:
A function that generates `ProfileDatum` objects.
"""
node_to_traceback = defaultdict(list)
node_to_op_type = defaultdict(str)
for op in self._graph.get_operations():
node_to_traceback[op.name] = op.traceback
node_to_op_type[op.name] = op.type
def profile_data_generator(device_step_stats):
for node_stats in device_step_stats.node_stats:
if node_stats.node_name == '_SOURCE' or node_stats.node_name == '_SINK':
continue
yield ProfileDatum(
node_stats,
node_to_op_type[node_stats.node_name],
node_to_traceback[node_stats.node_name])
return profile_data_generator
def get_profiles(graph, run_metadata):
"""Generate profiles in pprof format.
See https://github.com/google/pprof/blob/master/proto/profile.proto
for pprof proto format.
Args:
graph: A `Graph` object.
run_metadata: A `RunMetadata` proto.
Returns:
A dictionary mapping from device name to pprof proto for that device.
"""
return PprofProfiler(graph, run_metadata).profile()
def profile(graph, run_metadata, output_dir=None):
"""Generate profiles in pprof format.
See https://github.com/google/pprof/blob/master/proto/profile.proto
for pprof proto format.
Args:
graph: A `Graph` object.
run_metadata: A `RunMetadata` proto.
output_dir: (string) Directory to output pprof profile to.
Profile files for each device will be stored in compressed
serialized proto format. If output_dir is None, profile protos
will be printed to stdout instead.
Returns:
List of output files created by this profile call.
(Note: this list will be empty if output_dir is None)
"""
profiles = get_profiles(graph, run_metadata)
output_file_template = None
if output_dir:
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
time_suffix = time.strftime('%Y%m%d%H%M%S')
output_file_template = os.path.join(
output_dir, '%s_' + time_suffix + '.pb.gz')
profile_files = []
for device, pprof_proto in profiles.items():
if output_file_template is None:
print('No output directory specified, printing to stdout instead.')
print(pprof_proto)
else:
device_name = str(device).strip('/').translate(
maketrans('/:', '__'))
profile_file = output_file_template % device_name
profile_files.append(profile_file)
with gzip.open(profile_file, 'w') as output_file:
print('Writing profile to %s...' % profile_file)
output_file.write(pprof_proto.SerializeToString())
return profile_files
| apache-2.0 |
sedden/pkg-python-django-rcsfield | rcsfield/backends/__init__.py | 3 | 1361 | import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
__all__ = ('backend')
RCS_BACKEND = getattr(settings, 'RCS_BACKEND', 'dummy')
def get_backend(import_path):
if not '.' in import_path:
import_path = "rcsfield.backends.%s" % import_path
try:
mod = __import__(import_path, {}, {}, [''])
except ImportError, e_user:
# No backend found, display an error message and a list of all
# bundled backends.
backend_dir = __path__[0]
available_backends = [f.split('.py')[0] for f in os.listdir(backend_dir) if not f.startswith('_') and not f.startswith('.') and not f.endswith('.pyc')]
available_backends.sort()
if RCS_BACKEND not in available_backends:
raise ImproperlyConfigured("%s isn't an available revision control (rcsfield) backend. Available options are: %s" % \
(RCS_BACKEND, ', '.join(map(repr, available_backends))))
# if the RCS_BACKEND is available in the backend directory
# and an ImportError is raised, don't suppress it
else:
raise
try:
return getattr(mod, 'rcs')
except AttributeError:
raise ImproperlyConfigured('Backend "%s" does not define a "rcs" instance.' % import_path)
backend = get_backend(RCS_BACKEND)
| bsd-3-clause |
mathiasertl/fabric | fabric/context_managers.py | 1 | 20926 | """
Context managers for use with the ``with`` statement.
.. note:: If you are using multiple directly nested ``with`` statements, it can
be convenient to use multiple context expressions in one single with
statement. Instead of writing::
with cd('/path/to/app'):
with prefix('workon myvenv'):
run('./manage.py syncdb')
run('./manage.py loaddata myfixture')
you can write::
with cd('/path/to/app'), prefix('workon myvenv'):
run('./manage.py syncdb')
run('./manage.py loaddata myfixture')
"""
from contextlib import contextmanager
import six
import socket
import select
from fabric.thread_handling import ThreadHandler
from fabric.state import output, win32, connections, env
from fabric import state
from fabric.utils import isatty
if six.PY2 is True:
from contextlib import nested
else:
from contextlib import ExitStack
class nested(ExitStack):
def __init__(self, *managers):
super(nested, self).__init__()
for manager in managers:
self.enter_context(manager)
if not win32:
import termios
import tty
def _set_output(groups, which):
"""
Refactored subroutine used by ``hide`` and ``show``.
"""
previous = {}
try:
# Preserve original values, pull in new given value to use
for group in output.expand_aliases(groups):
previous[group] = output[group]
output[group] = which
# Yield control
yield
finally:
# Restore original values
output.update(previous)
def documented_contextmanager(func):
wrapper = contextmanager(func)
wrapper.undecorated = func
return wrapper
@documented_contextmanager
def show(*groups):
"""
Context manager for setting the given output ``groups`` to True.
``groups`` must be one or more strings naming the output groups defined in
`~fabric.state.output`. The given groups will be set to True for the
duration of the enclosed block, and restored to their previous value
afterwards.
For example, to turn on debug output (which is typically off by default)::
def my_task():
with show('debug'):
run('ls /var/www')
As almost all output groups are displayed by default, `show` is most useful
for turning on the normally-hidden ``debug`` group, or when you know or
suspect that code calling your own code is trying to hide output with
`hide`.
"""
return _set_output(groups, True)
@documented_contextmanager
def hide(*groups):
"""
Context manager for setting the given output ``groups`` to False.
``groups`` must be one or more strings naming the output groups defined in
`~fabric.state.output`. The given groups will be set to False for the
duration of the enclosed block, and restored to their previous value
afterwards.
For example, to hide the "[hostname] run:" status lines, as well as
preventing printout of stdout and stderr, one might use `hide` as follows::
def my_task():
with hide('running', 'stdout', 'stderr'):
run('ls /var/www')
"""
return _set_output(groups, False)
@documented_contextmanager
def _setenv(variables):
"""
Context manager temporarily overriding ``env`` with given key/value pairs.
A callable that returns a dict can also be passed. This is necessary when
new values are being calculated from current values, in order to ensure that
the "current" value is current at the time that the context is entered, not
when the context manager is initialized. (See Issue #736.)
This context manager is used internally by `settings` and is not intended
to be used directly.
"""
if callable(variables):
variables = variables()
clean_revert = variables.pop('clean_revert', False)
previous = {}
new = []
for key, value in six.iteritems(variables):
if key in state.env:
previous[key] = state.env[key]
else:
new.append(key)
state.env[key] = value
try:
yield
finally:
if clean_revert:
for key, value in six.iteritems(variables):
# If the current env value for this key still matches the
# value we set it to beforehand, we are OK to revert it to the
# pre-block value.
if key in state.env and value == state.env[key]:
if key in previous:
state.env[key] = previous[key]
else:
del state.env[key]
else:
state.env.update(previous)
for key in new:
del state.env[key]
def settings(*args, **kwargs):
"""
Nest context managers and/or override ``env`` variables.
`settings` serves two purposes:
* Most usefully, it allows temporary overriding/updating of ``env`` with
any provided keyword arguments, e.g. ``with settings(user='foo'):``.
Original values, if any, will be restored once the ``with`` block closes.
* The keyword argument ``clean_revert`` has special meaning for
``settings`` itself (see below) and will be stripped out before
execution.
* In addition, it will use `contextlib.nested`_ to nest any given
non-keyword arguments, which should be other context managers, e.g.
``with settings(hide('stderr'), show('stdout')):``.
.. _contextlib.nested: http://docs.python.org/library/contextlib.html#contextlib.nested
These behaviors may be specified at the same time if desired. An example
will hopefully illustrate why this is considered useful::
def my_task():
with settings(
hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True
):
if run('ls /etc/lsb-release'):
return 'Ubuntu'
elif run('ls /etc/redhat-release'):
return 'RedHat'
The above task executes a `run` statement, but will warn instead of
aborting if the ``ls`` fails, and all output -- including the warning
itself -- is prevented from printing to the user. The end result, in this
scenario, is a completely silent task that allows the caller to figure out
what type of system the remote host is, without incurring the handful of
output that would normally occur.
Thus, `settings` may be used to set any combination of environment
variables in tandem with hiding (or showing) specific levels of output, or
in tandem with any other piece of Fabric functionality implemented as a
context manager.
If ``clean_revert`` is set to ``True``, ``settings`` will **not** revert
keys which are altered within the nested block, instead only reverting keys
whose values remain the same as those given. More examples will make this
clear; below is how ``settings`` operates normally::
# Before the block, env.parallel defaults to False, host_string to None
with settings(parallel=True, host_string='myhost'):
# env.parallel is True
# env.host_string is 'myhost'
env.host_string = 'otherhost'
# env.host_string is now 'otherhost'
# Outside the block:
# * env.parallel is False again
# * env.host_string is None again
The internal modification of ``env.host_string`` is nullified -- not always
desirable. That's where ``clean_revert`` comes in::
# Before the block, env.parallel defaults to False, host_string to None
with settings(parallel=True, host_string='myhost', clean_revert=True):
# env.parallel is True
# env.host_string is 'myhost'
env.host_string = 'otherhost'
# env.host_string is now 'otherhost'
# Outside the block:
# * env.parallel is False again
# * env.host_string remains 'otherhost'
Brand new keys which did not exist in ``env`` prior to using ``settings``
are also preserved if ``clean_revert`` is active. When ``False``, such keys
are removed when the block exits.
.. versionadded:: 1.4.1
The ``clean_revert`` kwarg.
"""
managers = list(args)
if kwargs:
managers.append(_setenv(kwargs))
return nested(*managers)
def cd(path):
"""
Context manager that keeps directory state when calling remote operations.
Any calls to `run`, `sudo`, `get`, or `put` within the wrapped block will
implicitly have a string similar to ``"cd <path> && "`` prefixed in order
to give the sense that there is actually statefulness involved.
.. note::
`cd` only affects *remote* paths -- to modify *local* paths, use
`~fabric.context_managers.lcd`.
Because use of `cd` affects all such invocations, any code making use of
those operations, such as much of the ``contrib`` section, will also be
affected by use of `cd`.
Like the actual 'cd' shell builtin, `cd` may be called with relative paths
(keep in mind that your default starting directory is your remote user's
``$HOME``) and may be nested as well.
Below is a "normal" attempt at using the shell 'cd', which doesn't work due
to how shell-less SSH connections are implemented -- state is **not** kept
between invocations of `run` or `sudo`::
run('cd /var/www')
run('ls')
The above snippet will list the contents of the remote user's ``$HOME``
instead of ``/var/www``. With `cd`, however, it will work as expected::
with cd('/var/www'):
run('ls') # Turns into "cd /var/www && ls"
Finally, a demonstration (see inline comments) of nesting::
with cd('/var/www'):
run('ls') # cd /var/www && ls
with cd('website1'):
run('ls') # cd /var/www/website1 && ls
.. note::
This context manager is currently implemented by appending to (and, as
always, restoring afterwards) the current value of an environment
variable, ``env.cwd``. However, this implementation may change in the
future, so we do not recommend manually altering ``env.cwd`` -- only
the *behavior* of `cd` will have any guarantee of backwards
compatibility.
.. note::
Space characters will be escaped automatically to make dealing with
such directory names easier.
.. versionchanged:: 1.0
Applies to `get` and `put` in addition to the command-running
operations.
.. seealso:: `~fabric.context_managers.lcd`
"""
return _change_cwd('cwd', path)
def lcd(path):
"""
Context manager for updating local current working directory.
This context manager is identical to `~fabric.context_managers.cd`, except
that it changes a different env var (`lcwd`, instead of `cwd`) and thus
only affects the invocation of `~fabric.operations.local` and the local
arguments to `~fabric.operations.get`/`~fabric.operations.put`.
Relative path arguments are relative to the local user's current working
directory, which will vary depending on where Fabric (or Fabric-using code)
was invoked. You can check what this is with `os.getcwd
<http://docs.python.org/release/2.6/library/os.html#os.getcwd>`_. It may be
useful to pin things relative to the location of the fabfile in use, which
may be found in :ref:`env.real_fabfile <real-fabfile>`
.. versionadded:: 1.0
"""
return _change_cwd('lcwd', path)
def _change_cwd(which, path):
path = path.replace(' ', r'\ ')
if state.env.get(which) and not path.startswith('/') and not path.startswith('~'):
new_cwd = state.env.get(which) + '/' + path
else:
new_cwd = path
return _setenv({which: new_cwd})
def path(path, behavior='append'):
"""
Append the given ``path`` to the PATH used to execute any wrapped commands.
Any calls to `run` or `sudo` within the wrapped block will implicitly have
a string similar to ``"PATH=$PATH:<path> "`` prepended before the given
command.
You may customize the behavior of `path` by specifying the optional
``behavior`` keyword argument, as follows:
* ``'append'``: append given path to the current ``$PATH``, e.g.
``PATH=$PATH:<path>``. This is the default behavior.
* ``'prepend'``: prepend given path to the current ``$PATH``, e.g.
``PATH=<path>:$PATH``.
* ``'replace'``: ignore previous value of ``$PATH`` altogether, e.g.
``PATH=<path>``.
.. note::
This context manager is currently implemented by modifying (and, as
always, restoring afterwards) the current value of environment
variables, ``env.path`` and ``env.path_behavior``. However, this
implementation may change in the future, so we do not recommend
manually altering them directly.
.. versionadded:: 1.0
"""
return _setenv({'path': path, 'path_behavior': behavior})
def prefix(command):
"""
Prefix all wrapped `run`/`sudo` commands with given command plus ``&&``.
This is nearly identical to `~fabric.operations.cd`, except that nested
invocations append to a list of command strings instead of modifying a
single string.
Most of the time, you'll want to be using this alongside a shell script
which alters shell state, such as ones which export or alter shell
environment variables.
For example, one of the most common uses of this tool is with the
``workon`` command from `virtualenvwrapper
<http://www.doughellmann.com/projects/virtualenvwrapper/>`_::
with prefix('workon myvenv'):
run('./manage.py syncdb')
In the above snippet, the actual shell command run would be this::
$ workon myvenv && ./manage.py syncdb
This context manager is compatible with `~fabric.context_managers.cd`, so
if your virtualenv doesn't ``cd`` in its ``postactivate`` script, you could
do the following::
with cd('/path/to/app'):
with prefix('workon myvenv'):
run('./manage.py syncdb')
run('./manage.py loaddata myfixture')
Which would result in executions like so::
$ cd /path/to/app && workon myvenv && ./manage.py syncdb
$ cd /path/to/app && workon myvenv && ./manage.py loaddata myfixture
Finally, as alluded to near the beginning,
`~fabric.context_managers.prefix` may be nested if desired, e.g.::
with prefix('workon myenv'):
run('ls')
with prefix('source /some/script'):
run('touch a_file')
The result::
$ workon myenv && ls
$ workon myenv && source /some/script && touch a_file
Contrived, but hopefully illustrative.
"""
return _setenv(lambda: {'command_prefixes': state.env.command_prefixes + [command]})
@documented_contextmanager
def char_buffered(pipe):
"""
Force local terminal ``pipe`` be character, not line, buffered.
Only applies on Unix-based systems; on Windows this is a no-op.
"""
if win32 or not isatty(pipe):
yield
else:
old_settings = termios.tcgetattr(pipe)
tty.setcbreak(pipe)
try:
yield
finally:
termios.tcsetattr(pipe, termios.TCSADRAIN, old_settings)
def shell_env(**kw):
"""
Set shell environment variables for wrapped commands.
For example, the below shows how you might set a ZeroMQ related environment
variable when installing a Python ZMQ library::
with shell_env(ZMQ_DIR='/home/user/local'):
run('pip install pyzmq')
As with `~fabric.context_managers.prefix`, this effectively turns the
``run`` command into::
$ export ZMQ_DIR='/home/user/local' && pip install pyzmq
Multiple key-value pairs may be given simultaneously.
.. note::
If used to affect the behavior of `~fabric.operations.local` when
running from a Windows localhost, ``SET`` commands will be used to
implement this feature.
"""
return _setenv({'shell_env': kw})
def _forwarder(chan, sock):
# Bidirectionally forward data between a socket and a Paramiko channel.
while True:
r, w, x = select.select([sock, chan], [], [])
if sock in r:
data = sock.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
sock.send(data)
chan.close()
sock.close()
@documented_contextmanager
def remote_tunnel(remote_port, local_port=None, local_host="localhost",
remote_bind_address="127.0.0.1"):
"""
Create a tunnel forwarding a locally-visible port to the remote target.
For example, you can let the remote host access a database that is
installed on the client host::
# Map localhost:6379 on the server to localhost:6379 on the client,
# so that the remote 'redis-cli' program ends up speaking to the local
# redis-server.
with remote_tunnel(6379):
run("redis-cli -i")
The database might be installed on a client only reachable from the client
host (as opposed to *on* the client itself)::
# Map localhost:6379 on the server to redis.internal:6379 on the client
with remote_tunnel(6379, local_host="redis.internal")
run("redis-cli -i")
``remote_tunnel`` accepts up to four arguments:
* ``remote_port`` (mandatory) is the remote port to listen to.
* ``local_port`` (optional) is the local port to connect to; the default is
the same port as the remote one.
* ``local_host`` (optional) is the locally-reachable computer (DNS name or
IP address) to connect to; the default is ``localhost`` (that is, the
same computer Fabric is running on).
* ``remote_bind_address`` (optional) is the remote IP address to bind to
for listening, on the current target. It should be an IP address assigned
to an interface on the target (or a DNS name that resolves to such IP).
You can use "0.0.0.0" to bind to all interfaces.
.. note::
By default, most SSH servers only allow remote tunnels to listen to the
localhost interface (127.0.0.1). In these cases, `remote_bind_address`
is ignored by the server, and the tunnel will listen only to 127.0.0.1.
.. versionadded: 1.6
"""
if local_port is None:
local_port = remote_port
sockets = []
channels = []
threads = []
def accept(channel, src, dest):
src_addr, src_port = src
dest_addr, dest_port = dest
channels.append(channel)
sock = socket.socket()
sockets.append(sock)
try:
sock.connect((local_host, local_port))
except Exception:
print("[%s] rtunnel: cannot connect to %s:%d (from local)" %
(env.host_string, local_host, local_port))
channel.close()
return
print("[%s] rtunnel: opened reverse tunnel: %r -> %r -> %r"
% (env.host_string, channel.origin_addr,
channel.getpeername(), (local_host, local_port)))
th = ThreadHandler('fwd', _forwarder, channel, sock)
threads.append(th)
transport = connections[env.host_string].get_transport()
transport.request_port_forward(remote_bind_address, remote_port, handler=accept)
try:
yield
finally:
for sock, chan, th in zip(sockets, channels, threads):
sock.close()
chan.close()
th.thread.join()
th.raise_if_needed()
transport.cancel_port_forward(remote_bind_address, remote_port)
quiet = lambda: settings(hide('everything'), warn_only=True)
quiet.__doc__ = """
Alias to ``settings(hide('everything'), warn_only=True)``.
Useful for wrapping remote interrogative commands which you expect to fail
occasionally, and/or which you want to silence.
Example::
with quiet():
have_build_dir = run("test -e /tmp/build").succeeded
When used in a task, the above snippet will not produce any ``run: test -e
/tmp/build`` line, nor will any stdout/stderr display, and command failure
is ignored.
.. seealso::
:ref:`env.warn_only <warn_only>`,
`~fabric.context_managers.settings`,
`~fabric.context_managers.hide`
.. versionadded:: 1.5
"""
warn_only = lambda: settings(warn_only=True)
warn_only.__doc__ = """
Alias to ``settings(warn_only=True)``.
.. seealso::
:ref:`env.warn_only <warn_only>`,
`~fabric.context_managers.settings`,
`~fabric.context_managers.quiet`
"""
| bsd-2-clause |
LokiW/extendable-cards | extendable_cards/view/game_view.py | 1 | 6424 | from extendable_cards.view.graphics import Rectangle, Point, Text
from tkinter import Button
class GameOutline(object):
def __init__(self, window, dx, dy, w, h):
self.top_y = dy
self.bottom_y = dy+h
self.right_x = dx+w
self.left_x = dx
self.discard_end_x = dx + (w/6.0)
self.discard_top_y = self.bottom_y - (h/3.0)
discard_p_b = Point(dx+1, self.bottom_y-1)
discard_p_t = Point(self.discard_end_x, self.discard_top_y)
discard_text_p = Point((2*dx + (w/6.0))/2, (self.bottom_y - (h / 6.0)))
self.discard = Rectangle(discard_p_b, discard_p_t)
self.discard.setFill("grey")
self.discard_text = Text(discard_text_p, "DISCARD PILE")
self.deck_begin_x = self.right_x - (w/6.0)
deck_p_b = Point(self.right_x-1, self.bottom_y-1)
deck_p_t = Point(self.deck_begin_x, self.bottom_y - (h / 3.0))
deck_text_p = Point(self.right_x - (w / 12.0), self.bottom_y - (h / 6.0))
self.deck = Rectangle(deck_p_b, deck_p_t)
self.deck.setFill("grey")
self.deck_text = Text(deck_text_p, "DECK")
self.hand = []
self.in_play = []
self.selected = []
self.win = window
def display_outline(self):
self.discard.draw(self.win)
self.deck.draw(self.win)
def display_outline_with_labels(self):
self.display_outline()
self.deck_text.draw(self.win)
self.discard_text.draw(self.win)
def undisplay_labels(self):
self.deck_text.undraw()
self.discard_text.undraw()
def display_hand_area(self):
self._display_card_list(self.hand, PlayArea.HAND)
def undisplay_hand_area(self):
for card in self.hand:
card.undisplay()
def display_play_area(self):
self._display_card_list(self.in_play, PlayArea.IN_PLAY)
def _display_card_list(self, cards, play_area):
card_num = len(cards)
if card_num == 0:
return False
cur_card = 0
lx, by, rx, ty = self.get_area_points(play_area)
y_unit = (by - ty) / 50.0
card_height = by - ty - 2*y_unit
card_width = card_height * (5.0/7.0)
x_unit = ((rx - card_width) - lx)/card_num
for card in cards:
card.display_card(lx + (cur_card*x_unit), ty + y_unit, w=card_width, h=card_height)
cur_card += 1
def undisplay_play_area(self):
for card in self.in_play:
card.undisplay()
def select_cards(self, cards, play_area):
self.selected.append({'card': card_type(card), 'origin': play_area})
def select_card(self, card, play_area):
for card in cards:
if play_area == PlayArea.HAND:
for hc in self.hand[:]:
if hc.card.name == card.card.name:
self.selected.append({'card':hc, 'origin': play_area})
self.hand.remove(hc)
return
elif play_area == PlayArea.IN_PLAY:
for ipc in self.in_play[:]:
if ipc.card.name == card.card.name:
self.selected.append({'card':ipc, 'origin': play_area})
self.in_play.remove(ipc)
return
elif play_area == PlayArea.DECK or play_area == PlayArea.DISCARD:
self.selected.append({'card': card_type(card), 'origin': play_area})
elif play_area == PlayArea.SELECTION:
for sc, origin in self.selected:
if sc.card.name == card.card.name:
self.return_selections()
self.selected.append({'card': sc, 'origin': origin})
return
def return_selections(self):
self.undisplay_selection()
for card in self.selected[:]:
if card['origin'] == PlayArea.HAND:
self.hand.append(card)
self.selected.remove(card)
elif card['origin'] == PlayArea.IN_PLAY:
self.in_play.append(card)
self.selected.remove(card)
else:
self.selected.remove(card)
def display_selection(self):
self._display_card_list([item['card'] for item in self.selected], PlayArea.SELECTION)
def undisplay_selection(self):
for card in self.selected:
card.undisplay()
def add_to_hand_area(self, card_view):
self.hand.append(card_view)
def add_to_play_area(self, card_view):
self.in_play.append(card_view)
def get_card_at_point(self, point, area):
x = point.getX()
y = point.getY()
if area == PlayArea.HAND:
last_seen = None
for card in self.hand:
lx = min(card.card.getP1().getX(), card.card.getP2().getX())
if lx < x:
last_seen = card
else:
return last_seen
return last_seen
def get_area(self, point):
x = point.getX()
y = point.getY()
if y < self.discard_top_y:
return PlayArea.IN_PLAY
elif x < self.discard_end_x:
return PlayArea.DISCARD
elif x > self.deck_begin_x:
return PlayArea.DECK
elif len(self.selected) > 0:
return PlayArea.HAND
else:
return PlayArea.HAND
def get_area_points(self, area):
if area == PlayArea.IN_PLAY:
return (self.left_x, self.discard_top_y, self.right_x, self.top_y)
elif area == PlayArea.DISCARD:
return (self.left_x, self.bottom_y, self.discard_end_x, self.discard_top_y)
elif area == PlayArea.HAND:
return (self.discard_end_x, self.bottom_y, self.deck_begin_x, self.discard_top_y)
elif area == PlayArea.DECK:
return (self.deck_begin_x, self.bottom_y, self.right_x, self.discard_top_y)
elif area == PlayArea.SELECTION:
return (self.discard_end_x, self.bottom_y - (self.bottom_y - self.discard_top_y)*(2.0/3.0),
self.deck_begin_x, self.bottom_y - (self.bottom_y - self.discard_top_y)*(5.0/3.0))
class PlayArea(object):
IN_PLAY = "play"
DISCARD = "discard"
DECK = "deck"
HAND = "hand"
SELECTION = "selection"
| bsd-2-clause |
ArcherSys/ArcherSys | eclipse/plugins/org.python.pydev_4.5.5.201603221110/pysrc/_pydevd_bundle/pydevd_vm_type.py | 49 | 1578 | import sys
#=======================================================================================================================
# PydevdVmType
#=======================================================================================================================
class PydevdVmType:
PYTHON = 'python'
JYTHON = 'jython'
vm_type = None
#=======================================================================================================================
# set_vm_type
#=======================================================================================================================
def set_vm_type(vm_type):
PydevdVmType.vm_type = vm_type
#=======================================================================================================================
# get_vm_type
#=======================================================================================================================
def get_vm_type():
if PydevdVmType.vm_type is None:
setup_type()
return PydevdVmType.vm_type
#=======================================================================================================================
# setup_type
#=======================================================================================================================
def setup_type(str=None):
if str is not None:
PydevdVmType.vm_type = str
return
if sys.platform.startswith("java"):
PydevdVmType.vm_type = PydevdVmType.JYTHON
else:
PydevdVmType.vm_type = PydevdVmType.PYTHON
| mit |
andyzsf/django | tests/urlpatterns_reverse/namespace_urls.py | 35 | 2458 | from django.conf.urls import url, include
from . import views
class URLObject(object):
def __init__(self, app_name, namespace):
self.app_name = app_name
self.namespace = namespace
def urls(self):
return ([
url(r'^inner/$', views.empty_view, name='urlobject-view'),
url(r'^inner/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='urlobject-view'),
url(r'^inner/\+\\\$\*/$', views.empty_view, name='urlobject-special-view'),
], self.app_name, self.namespace)
urls = property(urls)
testobj1 = URLObject('testapp', 'test-ns1')
testobj2 = URLObject('testapp', 'test-ns2')
default_testobj = URLObject('testapp', 'testapp')
otherobj1 = URLObject('nodefault', 'other-ns1')
otherobj2 = URLObject('nodefault', 'other-ns2')
urlpatterns = [
url(r'^normal/$', views.empty_view, name='normal-view'),
url(r'^normal/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='normal-view'),
url(r'^resolver_match/$', views.pass_resolver_match_view, name='test-resolver-match'),
url(r'^\+\\\$\*/$', views.empty_view, name='special-view'),
url(r'^mixed_args/([0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='mixed-args'),
url(r'^no_kwargs/([0-9]+)/([0-9]+)/$', views.empty_view, name='no-kwargs'),
url(r'^view_class/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.view_class_instance, name='view-class'),
url(r'^unnamed/normal/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view),
url(r'^unnamed/view_class/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.view_class_instance),
url(r'^test1/', include(testobj1.urls)),
url(r'^test2/', include(testobj2.urls)),
url(r'^default/', include(default_testobj.urls)),
url(r'^other1/', include(otherobj1.urls)),
url(r'^other[246]/', include(otherobj2.urls)),
url(r'^ns-included[135]/', include('urlpatterns_reverse.included_namespace_urls', namespace='inc-ns1')),
url(r'^ns-included2/', include('urlpatterns_reverse.included_namespace_urls', namespace='inc-ns2')),
url(r'^included/', include('urlpatterns_reverse.included_namespace_urls')),
url(r'^inc(?P<outer>[0-9]+)/', include('urlpatterns_reverse.included_urls', namespace='inc-ns5')),
url(r'^ns-outer/(?P<outer>[0-9]+)/', include('urlpatterns_reverse.included_namespace_urls', namespace='inc-outer')),
url(r'^\+\\\$\*/', include('urlpatterns_reverse.namespace_urls', namespace='special')),
]
| bsd-3-clause |
shenlong3030/asv-django-guestbook | django/contrib/gis/gdal/prototypes/ds.py | 12 | 4315 | """
This module houses the ctypes function prototypes for OGR DataSource
related data structures. OGR_Dr_*, OGR_DS_*, OGR_L_*, OGR_F_*,
OGR_Fld_* routines are relevant here.
"""
from ctypes import c_char_p, c_double, c_int, c_long, c_void_p, POINTER
from django.contrib.gis.gdal.envelope import OGREnvelope
from django.contrib.gis.gdal.libgdal import lgdal
from django.contrib.gis.gdal.prototypes.generation import \
const_string_output, double_output, geom_output, int_output, \
srs_output, void_output, voidptr_output
c_int_p = POINTER(c_int) # shortcut type
### Driver Routines ###
register_all = void_output(lgdal.OGRRegisterAll, [], errcheck=False)
cleanup_all = void_output(lgdal.OGRCleanupAll, [], errcheck=False)
get_driver = voidptr_output(lgdal.OGRGetDriver, [c_int])
get_driver_by_name = voidptr_output(lgdal.OGRGetDriverByName, [c_char_p])
get_driver_count = int_output(lgdal.OGRGetDriverCount, [])
get_driver_name = const_string_output(lgdal.OGR_Dr_GetName, [c_void_p])
### DataSource ###
open_ds = voidptr_output(lgdal.OGROpen, [c_char_p, c_int, POINTER(c_void_p)])
destroy_ds = void_output(lgdal.OGR_DS_Destroy, [c_void_p], errcheck=False)
release_ds = void_output(lgdal.OGRReleaseDataSource, [c_void_p])
get_ds_name = const_string_output(lgdal.OGR_DS_GetName, [c_void_p])
get_layer = voidptr_output(lgdal.OGR_DS_GetLayer, [c_void_p, c_int])
get_layer_by_name = voidptr_output(lgdal.OGR_DS_GetLayerByName, [c_void_p, c_char_p])
get_layer_count = int_output(lgdal.OGR_DS_GetLayerCount, [c_void_p])
### Layer Routines ###
get_extent = void_output(lgdal.OGR_L_GetExtent, [c_void_p, POINTER(OGREnvelope), c_int])
get_feature = voidptr_output(lgdal.OGR_L_GetFeature, [c_void_p, c_long])
get_feature_count = int_output(lgdal.OGR_L_GetFeatureCount, [c_void_p, c_int])
get_layer_defn = voidptr_output(lgdal.OGR_L_GetLayerDefn, [c_void_p])
get_layer_srs = srs_output(lgdal.OGR_L_GetSpatialRef, [c_void_p])
get_next_feature = voidptr_output(lgdal.OGR_L_GetNextFeature, [c_void_p])
reset_reading = void_output(lgdal.OGR_L_ResetReading, [c_void_p], errcheck=False)
test_capability = int_output(lgdal.OGR_L_TestCapability, [c_void_p, c_char_p])
get_spatial_filter = geom_output(lgdal.OGR_L_GetSpatialFilter, [c_void_p])
set_spatial_filter = void_output(lgdal.OGR_L_SetSpatialFilter, [c_void_p, c_void_p], errcheck=False)
set_spatial_filter_rect = void_output(lgdal.OGR_L_SetSpatialFilterRect, [c_void_p, c_double, c_double, c_double, c_double], errcheck=False)
### Feature Definition Routines ###
get_fd_geom_type = int_output(lgdal.OGR_FD_GetGeomType, [c_void_p])
get_fd_name = const_string_output(lgdal.OGR_FD_GetName, [c_void_p])
get_feat_name = const_string_output(lgdal.OGR_FD_GetName, [c_void_p])
get_field_count = int_output(lgdal.OGR_FD_GetFieldCount, [c_void_p])
get_field_defn = voidptr_output(lgdal.OGR_FD_GetFieldDefn, [c_void_p, c_int])
### Feature Routines ###
clone_feature = voidptr_output(lgdal.OGR_F_Clone, [c_void_p])
destroy_feature = void_output(lgdal.OGR_F_Destroy, [c_void_p], errcheck=False)
feature_equal = int_output(lgdal.OGR_F_Equal, [c_void_p, c_void_p])
get_feat_geom_ref = geom_output(lgdal.OGR_F_GetGeometryRef, [c_void_p])
get_feat_field_count = int_output(lgdal.OGR_F_GetFieldCount, [c_void_p])
get_feat_field_defn = voidptr_output(lgdal.OGR_F_GetFieldDefnRef, [c_void_p, c_int])
get_fid = int_output(lgdal.OGR_F_GetFID, [c_void_p])
get_field_as_datetime = int_output(lgdal.OGR_F_GetFieldAsDateTime, [c_void_p, c_int, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p])
get_field_as_double = double_output(lgdal.OGR_F_GetFieldAsDouble, [c_void_p, c_int])
get_field_as_integer = int_output(lgdal.OGR_F_GetFieldAsInteger, [c_void_p, c_int])
get_field_as_string = const_string_output(lgdal.OGR_F_GetFieldAsString, [c_void_p, c_int])
get_field_index = int_output(lgdal.OGR_F_GetFieldIndex, [c_void_p, c_char_p])
### Field Routines ###
get_field_name = const_string_output(lgdal.OGR_Fld_GetNameRef, [c_void_p])
get_field_precision = int_output(lgdal.OGR_Fld_GetPrecision, [c_void_p])
get_field_type = int_output(lgdal.OGR_Fld_GetType, [c_void_p])
get_field_type_name = const_string_output(lgdal.OGR_GetFieldTypeName, [c_int])
get_field_width = int_output(lgdal.OGR_Fld_GetWidth, [c_void_p])
| bsd-3-clause |
hinsenchan/iOS_clockapp_emberjs_demo | node_modules/ember-cli/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/input_test.py | 604 | 3207 | #!/usr/bin/env python
# Copyright 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the input.py file."""
import gyp.input
import unittest
import sys
class TestFindCycles(unittest.TestCase):
def setUp(self):
self.nodes = {}
for x in ('a', 'b', 'c', 'd', 'e'):
self.nodes[x] = gyp.input.DependencyGraphNode(x)
def _create_dependency(self, dependent, dependency):
dependent.dependencies.append(dependency)
dependency.dependents.append(dependent)
def test_no_cycle_empty_graph(self):
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_line(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_dag(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['a'], self.nodes['c'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_cycle_self_reference(self):
self._create_dependency(self.nodes['a'], self.nodes['a'])
self.assertEquals([(self.nodes['a'], self.nodes['a'])],
self.nodes['a'].FindCycles())
def test_cycle_two_nodes(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self.assertEquals([(self.nodes['a'], self.nodes['b'], self.nodes['a'])],
self.nodes['a'].FindCycles())
self.assertEquals([(self.nodes['b'], self.nodes['a'], self.nodes['b'])],
self.nodes['b'].FindCycles())
def test_two_cycles(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['b'])
cycles = self.nodes['a'].FindCycles()
self.assertTrue(
(self.nodes['a'], self.nodes['b'], self.nodes['a']) in cycles)
self.assertTrue(
(self.nodes['b'], self.nodes['c'], self.nodes['b']) in cycles)
self.assertEquals(2, len(cycles))
def test_big_cycle(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
self._create_dependency(self.nodes['d'], self.nodes['e'])
self._create_dependency(self.nodes['e'], self.nodes['a'])
self.assertEquals([(self.nodes['a'],
self.nodes['b'],
self.nodes['c'],
self.nodes['d'],
self.nodes['e'],
self.nodes['a'])],
self.nodes['a'].FindCycles())
if __name__ == '__main__':
unittest.main()
| mit |
netzkolchose/django-cms | cms/migrations/0005_auto_20140924_1039.py | 57 | 4895 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.db.models import F
from treebeard.numconv import NumConv
STEPLEN = 4
ALPHABET = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
class MP_AddHandler(object):
def __init__(self):
self.stmts = []
NUM = NumConv(len(ALPHABET), ALPHABET)
def _int2str(num):
return NUM.int2str(num)
def _str2int(num):
return NUM.str2int(num)
def _get_basepath(path, depth):
""":returns: The base path of another path up to a given depth"""
if path:
return path[0:depth * STEPLEN]
return ''
def _get_path(path, depth, newstep):
"""
Builds a path given some values
:param path: the base path
:param depth: the depth of the node
:param newstep: the value (integer) of the new step
"""
parentpath = _get_basepath(path, depth - 1)
key = _int2str(newstep)
return '{0}{1}{2}'.format(
parentpath,
ALPHABET[0] * (STEPLEN - len(key)),
key
)
def _inc_path(obj):
""":returns: The path of the next sibling of a given node path."""
newpos = _str2int(obj.path[-STEPLEN:]) + 1
key = _int2str(newpos)
if len(key) > STEPLEN:
raise Exception("Path Overflow from: '%s'" % (obj.path, ))
return '{0}{1}{2}'.format(
obj.path[:-STEPLEN],
ALPHABET[0] * (STEPLEN - len(key)),
key
)
class MP_AddRootHandler(MP_AddHandler):
def __init__(self, **kwargs):
super(MP_AddRootHandler, self).__init__()
self.kwargs = kwargs
def process(self):
# do we have a root node already?
last_root = self.kwargs['last_root']
if last_root:
# adding the new root node as the last one
newpath = _inc_path(last_root)
else:
# adding the first root node
newpath = _get_path(None, 1, 1)
newobj = self.kwargs['instance']
newobj.depth = 1
newobj.path = newpath
# saving the instance before returning it
newobj.save()
return newobj
class MP_AddChildHandler(MP_AddHandler):
def __init__(self, node, model, **kwargs):
super(MP_AddChildHandler, self).__init__()
self.node = node
self.node_cls = node.__class__
self.kwargs = kwargs
self.model = model
def process(self):
newobj = self.kwargs['instance']
newobj.depth = self.node.depth + 1
if self.node.numchild == 0:
# the node had no children, adding the first child
newobj.path = _get_path(
self.node.path, newobj.depth, 1)
max_length = self.node_cls._meta.get_field('path').max_length
if len(newobj.path) > max_length:
raise Exception(
'The new node is too deep in the tree, try'
' increasing the path.max_length property'
' and UPDATE your database')
else:
# adding the new child as the last one
newobj.path = _inc_path(self.node.last_child)
# saving the instance before returning it
newobj.save()
newobj._cached_parent_obj = self.node
self.model.objects.filter(
path=self.node.path).update(numchild=F('numchild') + 1)
# we increase the numchild value of the object in memory
self.node.numchild += 1
return newobj
def move_to_mp(apps, schema_editor):
Page = apps.get_model("cms", "Page")
CMSPlugin = apps.get_model("cms", "CMSPlugin")
pages = Page.objects.all().order_by('tree_id', 'level', 'lft')
cache = {}
last_root = None
for page in pages:
if not page.parent_id:
handler = MP_AddRootHandler(instance=page, last_root=last_root)
handler.process()
last_root = page
page.last_child = None
else:
parent = cache[page.parent_id]
handler = MP_AddChildHandler(parent, Page, instance=page)
handler.process()
parent.last_child = page
cache[page.pk] = page
plugins = CMSPlugin.objects.all().order_by('tree_id', 'level', 'lft')
cache = {}
last_root = None
for plugin in plugins:
if not plugin.parent_id:
handler = MP_AddRootHandler(instance=plugin, last_root=last_root)
handler.process()
last_root = plugin
plugin.last_child = None
else:
parent = cache[plugin.parent_id]
handler = MP_AddChildHandler(parent, CMSPlugin, instance=plugin)
handler.process()
parent.last_child = plugin
cache[plugin.pk] = plugin
class Migration(migrations.Migration):
dependencies = [
('cms', '0004_auto_20140924_1038'),
]
operations = [
migrations.RunPython(move_to_mp),
]
| bsd-3-clause |
lorenzo-desantis/mne-python | mne/externals/tempita/__init__.py | 31 | 43854 | """
A small templating language
This implements a small templating language. This language implements
if/elif/else, for/continue/break, expressions, and blocks of Python
code. The syntax is::
{{any expression (function calls etc)}}
{{any expression | filter}}
{{for x in y}}...{{endfor}}
{{if x}}x{{elif y}}y{{else}}z{{endif}}
{{py:x=1}}
{{py:
def foo(bar):
return 'baz'
}}
{{default var = default_value}}
{{# comment}}
You use this with the ``Template`` class or the ``sub`` shortcut.
The ``Template`` class takes the template string and the name of
the template (for errors) and a default namespace. Then (like
``string.Template``) you can call the ``tmpl.substitute(**kw)``
method to make a substitution (or ``tmpl.substitute(a_dict)``).
``sub(content, **kw)`` substitutes the template immediately. You
can use ``__name='tmpl.html'`` to set the name of the template.
If there are syntax errors ``TemplateError`` will be raised.
"""
import warnings
import re
import sys
import cgi
from ..six.moves.urllib.parse import quote as url_quote
import os
import tokenize
from ..six.moves import cStringIO as StringIO
from ._looper import looper
from .compat3 import PY3, bytes, basestring_, next, is_unicode, coerce_text
__all__ = ['TemplateError', 'Template', 'sub', 'HTMLTemplate',
'sub_html', 'html', 'bunch']
in_re = re.compile(r'\s+in\s+')
var_re = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
class TemplateError(Exception):
"""Exception raised while parsing a template
"""
def __init__(self, message, position, name=None):
Exception.__init__(self, message)
self.position = position
self.name = name
def __str__(self):
msg = ' '.join(self.args)
if self.position:
msg = '%s at line %s column %s' % (
msg, self.position[0], self.position[1])
if self.name:
msg += ' in %s' % self.name
return msg
class _TemplateContinue(Exception):
pass
class _TemplateBreak(Exception):
pass
def get_file_template(name, from_template):
path = os.path.join(os.path.dirname(from_template.name), name)
return from_template.__class__.from_filename(
path, namespace=from_template.namespace,
get_template=from_template.get_template)
class Template(object):
default_namespace = {
'start_braces': '{{',
'end_braces': '}}',
'looper': looper,
}
default_encoding = 'utf8'
default_inherit = None
def __init__(self, content, name=None, namespace=None, stacklevel=None,
get_template=None, default_inherit=None, line_offset=0,
delimeters=None):
self.content = content
# set delimeters
if delimeters is None:
delimeters = (self.default_namespace['start_braces'],
self.default_namespace['end_braces'])
else:
assert len(delimeters) == 2 and all(
[isinstance(delimeter, basestring)
for delimeter in delimeters])
self.default_namespace = self.__class__.default_namespace.copy()
self.default_namespace['start_braces'] = delimeters[0]
self.default_namespace['end_braces'] = delimeters[1]
self.delimeters = delimeters
self._unicode = is_unicode(content)
if name is None and stacklevel is not None:
try:
caller = sys._getframe(stacklevel)
except ValueError:
pass
else:
globals = caller.f_globals
lineno = caller.f_lineno
if '__file__' in globals:
name = globals['__file__']
if name.endswith('.pyc') or name.endswith('.pyo'):
name = name[:-1]
elif '__name__' in globals:
name = globals['__name__']
else:
name = '<string>'
if lineno:
name += ':%s' % lineno
self.name = name
self._parsed = parse(
content, name=name, line_offset=line_offset,
delimeters=self.delimeters)
if namespace is None:
namespace = {}
self.namespace = namespace
self.get_template = get_template
if default_inherit is not None:
self.default_inherit = default_inherit
def from_filename(cls, filename, namespace=None, encoding=None,
default_inherit=None, get_template=get_file_template):
f = open(filename, 'rb')
c = f.read()
f.close()
if encoding:
c = c.decode(encoding)
return cls(content=c, name=filename, namespace=namespace,
default_inherit=default_inherit, get_template=get_template)
from_filename = classmethod(from_filename)
def __repr__(self):
return '<%s %s name=%r>' % (
self.__class__.__name__,
hex(id(self))[2:], self.name)
def substitute(self, *args, **kw):
if args:
if kw:
raise TypeError(
"You can only give positional *or* keyword arguments")
if len(args) > 1:
raise TypeError(
"You can only give one positional argument")
if not hasattr(args[0], 'items'):
raise TypeError(
("If you pass in a single argument, you must pass in a ",
"dict-like object (with a .items() method); you gave %r")
% (args[0],))
kw = args[0]
ns = kw
ns['__template_name__'] = self.name
if self.namespace:
ns.update(self.namespace)
result, defs, inherit = self._interpret(ns)
if not inherit:
inherit = self.default_inherit
if inherit:
result = self._interpret_inherit(result, defs, inherit, ns)
return result
def _interpret(self, ns):
# __traceback_hide__ = True
parts = []
defs = {}
self._interpret_codes(self._parsed, ns, out=parts, defs=defs)
if '__inherit__' in defs:
inherit = defs.pop('__inherit__')
else:
inherit = None
return ''.join(parts), defs, inherit
def _interpret_inherit(self, body, defs, inherit_template, ns):
# __traceback_hide__ = True
if not self.get_template:
raise TemplateError(
'You cannot use inheritance without passing in get_template',
position=None, name=self.name)
templ = self.get_template(inherit_template, self)
self_ = TemplateObject(self.name)
for name, value in defs.iteritems():
setattr(self_, name, value)
self_.body = body
ns = ns.copy()
ns['self'] = self_
return templ.substitute(ns)
def _interpret_codes(self, codes, ns, out, defs):
# __traceback_hide__ = True
for item in codes:
if isinstance(item, basestring_):
out.append(item)
else:
self._interpret_code(item, ns, out, defs)
def _interpret_code(self, code, ns, out, defs):
# __traceback_hide__ = True
name, pos = code[0], code[1]
if name == 'py':
self._exec(code[2], ns, pos)
elif name == 'continue':
raise _TemplateContinue()
elif name == 'break':
raise _TemplateBreak()
elif name == 'for':
vars, expr, content = code[2], code[3], code[4]
expr = self._eval(expr, ns, pos)
self._interpret_for(vars, expr, content, ns, out, defs)
elif name == 'cond':
parts = code[2:]
self._interpret_if(parts, ns, out, defs)
elif name == 'expr':
parts = code[2].split('|')
base = self._eval(parts[0], ns, pos)
for part in parts[1:]:
func = self._eval(part, ns, pos)
base = func(base)
out.append(self._repr(base, pos))
elif name == 'default':
var, expr = code[2], code[3]
if var not in ns:
result = self._eval(expr, ns, pos)
ns[var] = result
elif name == 'inherit':
expr = code[2]
value = self._eval(expr, ns, pos)
defs['__inherit__'] = value
elif name == 'def':
name = code[2]
signature = code[3]
parts = code[4]
ns[name] = defs[name] = TemplateDef(
self, name, signature, body=parts, ns=ns, pos=pos)
elif name == 'comment':
return
else:
assert 0, "Unknown code: %r" % name
def _interpret_for(self, vars, expr, content, ns, out, defs):
# __traceback_hide__ = True
for item in expr:
if len(vars) == 1:
ns[vars[0]] = item
else:
if len(vars) != len(item):
raise ValueError(
'Need %i items to unpack (got %i items)'
% (len(vars), len(item)))
for name, value in zip(vars, item):
ns[name] = value
try:
self._interpret_codes(content, ns, out, defs)
except _TemplateContinue:
continue
except _TemplateBreak:
break
def _interpret_if(self, parts, ns, out, defs):
# __traceback_hide__ = True
# @@: if/else/else gets through
for part in parts:
assert not isinstance(part, basestring_)
name, pos = part[0], part[1]
if name == 'else':
result = True
else:
result = self._eval(part[2], ns, pos)
if result:
self._interpret_codes(part[3], ns, out, defs)
break
def _eval(self, code, ns, pos):
# __traceback_hide__ = True
try:
try:
value = eval(code, self.default_namespace, ns)
except SyntaxError as e:
raise SyntaxError(
'invalid syntax in expression: %s' % code)
return value
except:
exc_info = sys.exc_info()
e = exc_info[1]
if getattr(e, 'args', None):
arg0 = e.args[0]
else:
arg0 = coerce_text(e)
e.args = (self._add_line_info(arg0, pos),)
raise (exc_info[1], e, exc_info[2])
def _exec(self, code, ns, pos):
# __traceback_hide__ = True
try:
exec(code, self.default_namespace, ns)
except:
exc_info = sys.exc_info()
e = exc_info[1]
if e.args:
e.args = (self._add_line_info(e.args[0], pos),)
else:
e.args = (self._add_line_info(None, pos),)
raise(exc_info[1], e, exc_info[2])
def _repr(self, value, pos):
# __traceback_hide__ = True
try:
if value is None:
return ''
if self._unicode:
try:
value = str(value)
if not is_unicode(value):
value = value.decode('utf-8')
except UnicodeDecodeError:
value = bytes(value)
else:
if not isinstance(value, basestring_):
value = coerce_text(value)
if (is_unicode(value) and self.default_encoding):
value = value.encode(self.default_encoding)
except:
exc_info = sys.exc_info()
e = exc_info[1]
e.args = (self._add_line_info(e.args[0], pos),)
# raise(exc_info[1], e, exc_info[2])
raise(e)
else:
if self._unicode and isinstance(value, bytes):
if not self.default_encoding:
raise UnicodeDecodeError(
'Cannot decode bytes value %r into unicode '
'(no default_encoding provided)' % value)
try:
value = value.decode(self.default_encoding)
except UnicodeDecodeError as e:
raise UnicodeDecodeError(
e.encoding,
e.object,
e.start,
e.end,
e.reason + ' in string %r' % value)
elif not self._unicode and is_unicode(value):
if not self.default_encoding:
raise UnicodeEncodeError(
'Cannot encode unicode value %r into bytes '
'(no default_encoding provided)' % value)
value = value.encode(self.default_encoding)
return value
def _add_line_info(self, msg, pos):
msg = "%s at line %s column %s" % (
msg, pos[0], pos[1])
if self.name:
msg += " in file %s" % self.name
return msg
def sub(content, delimeters=None, **kw):
name = kw.get('__name')
tmpl = Template(content, name=name, delimeters=delimeters)
return tmpl.substitute(kw)
def paste_script_template_renderer(content, vars, filename=None):
tmpl = Template(content, name=filename)
return tmpl.substitute(vars)
class bunch(dict):
def __init__(self, **kw):
for name, value in kw.iteritems():
setattr(self, name, value)
def __setattr__(self, name, value):
self[name] = value
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __getitem__(self, key):
if 'default' in self:
try:
return dict.__getitem__(self, key)
except KeyError:
return dict.__getitem__(self, 'default')
else:
return dict.__getitem__(self, key)
def __repr__(self):
items = [
(k, v) for k, v in self.iteritems()]
items.sort()
return '<%s %s>' % (
self.__class__.__name__,
' '.join(['%s=%r' % (k, v) for k, v in items]))
############################################################
## HTML Templating
############################################################
class html(object):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def __html__(self):
return self.value
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__, self.value)
def html_quote(value, force=True):
if not force and hasattr(value, '__html__'):
return value.__html__()
if value is None:
return ''
if not isinstance(value, basestring_):
value = coerce_text(value)
if sys.version >= "3" and isinstance(value, bytes):
value = cgi.escape(value.decode('latin1'), 1)
value = value.encode('latin1')
else:
with warnings.catch_warnings(record=True): # annoying
value = cgi.escape(value, 1)
if sys.version < "3":
if is_unicode(value):
value = value.encode('ascii', 'xmlcharrefreplace')
return value
def url(v):
v = coerce_text(v)
if is_unicode(v):
v = v.encode('utf8')
return url_quote(v)
def attr(**kw):
kw = list(kw.iteritems())
kw.sort()
parts = []
for name, value in kw:
if value is None:
continue
if name.endswith('_'):
name = name[:-1]
parts.append('%s="%s"' % (html_quote(name), html_quote(value)))
return html(' '.join(parts))
class HTMLTemplate(Template):
default_namespace = Template.default_namespace.copy()
default_namespace.update(dict(
html=html,
attr=attr,
url=url,
html_quote=html_quote))
def _repr(self, value, pos):
if hasattr(value, '__html__'):
value = value.__html__()
quote = False
else:
quote = True
plain = Template._repr(self, value, pos)
if quote:
return html_quote(plain)
else:
return plain
def sub_html(content, **kw):
name = kw.get('__name')
tmpl = HTMLTemplate(content, name=name)
return tmpl.substitute(kw)
class TemplateDef(object):
def __init__(self, template, func_name, func_signature,
body, ns, pos, bound_self=None):
self._template = template
self._func_name = func_name
self._func_signature = func_signature
self._body = body
self._ns = ns
self._pos = pos
self._bound_self = bound_self
def __repr__(self):
return '<mne.externals.tempita function %s(%s) at %s:%s>' % (
self._func_name, self._func_signature,
self._template.name, self._pos)
def __str__(self):
return self()
def __call__(self, *args, **kw):
values = self._parse_signature(args, kw)
ns = self._ns.copy()
ns.update(values)
if self._bound_self is not None:
ns['self'] = self._bound_self
out = []
subdefs = {}
self._template._interpret_codes(self._body, ns, out, subdefs)
return ''.join(out)
def __get__(self, obj, type=None):
if obj is None:
return self
return self.__class__(
self._template, self._func_name, self._func_signature,
self._body, self._ns, self._pos, bound_self=obj)
def _parse_signature(self, args, kw):
values = {}
sig_args, var_args, var_kw, defaults = self._func_signature
extra_kw = {}
for name, value in kw.iteritems():
if not var_kw and name not in sig_args:
raise TypeError(
'Unexpected argument %s' % name)
if name in sig_args:
values[sig_args] = value
else:
extra_kw[name] = value
args = list(args)
sig_args = list(sig_args)
while args:
while sig_args and sig_args[0] in values:
sig_args.pop(0)
if sig_args:
name = sig_args.pop(0)
values[name] = args.pop(0)
elif var_args:
values[var_args] = tuple(args)
break
else:
raise TypeError(
'Extra position arguments: %s'
% ', '.join(repr(v) for v in args))
for name, value_expr in defaults.iteritems():
if name not in values:
values[name] = self._template._eval(
value_expr, self._ns, self._pos)
for name in sig_args:
if name not in values:
raise TypeError(
'Missing argument: %s' % name)
if var_kw:
values[var_kw] = extra_kw
return values
class TemplateObject(object):
def __init__(self, name):
self.__name = name
self.get = TemplateObjectGetter(self)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.__name)
class TemplateObjectGetter(object):
def __init__(self, template_obj):
self.__template_obj = template_obj
def __getattr__(self, attr):
return getattr(self.__template_obj, attr, Empty)
def __repr__(self):
return '<%s around %r>' % (
self.__class__.__name__, self.__template_obj)
class _Empty(object):
def __call__(self, *args, **kw):
return self
def __str__(self):
return ''
def __repr__(self):
return 'Empty'
def __unicode__(self):
if PY3:
return str('')
else:
return unicode('')
def __iter__(self):
return iter(())
def __bool__(self):
return False
if sys.version < "3":
__nonzero__ = __bool__
Empty = _Empty()
del _Empty
############################################################
## Lexing and Parsing
############################################################
def lex(s, name=None, trim_whitespace=True, line_offset=0, delimeters=None):
if delimeters is None:
delimeters = (Template.default_namespace['start_braces'],
Template.default_namespace['end_braces'])
in_expr = False
chunks = []
last = 0
last_pos = (line_offset + 1, 1)
token_re = re.compile(r'%s|%s' % (re.escape(delimeters[0]),
re.escape(delimeters[1])))
for match in token_re.finditer(s):
expr = match.group(0)
pos = find_position(s, match.end(), last, last_pos)
if expr == delimeters[0] and in_expr:
raise TemplateError('%s inside expression' % delimeters[0],
position=pos,
name=name)
elif expr == delimeters[1] and not in_expr:
raise TemplateError('%s outside expression' % delimeters[1],
position=pos,
name=name)
if expr == delimeters[0]:
part = s[last:match.start()]
if part:
chunks.append(part)
in_expr = True
else:
chunks.append((s[last:match.start()], last_pos))
in_expr = False
last = match.end()
last_pos = pos
if in_expr:
raise TemplateError('No %s to finish last expression' % delimeters[1],
name=name, position=last_pos)
part = s[last:]
if part:
chunks.append(part)
if trim_whitespace:
chunks = trim_lex(chunks)
return chunks
lex.__doc__ = """
Lex a string into chunks:
>>> lex('hey')
['hey']
>>> lex('hey {{you}}')
['hey ', ('you', (1, 7))]
>>> lex('hey {{')
Traceback (most recent call last):
...
mne.externals.tempita.TemplateError: No }} to finish last expression at line 1 column 7
>>> lex('hey }}')
Traceback (most recent call last):
...
mne.externals.tempita.TemplateError: }} outside expression at line 1 column 7
>>> lex('hey {{ {{')
Traceback (most recent call last):
...
mne.externals.tempita.TemplateError: {{ inside expression at line 1 column 10
""" if PY3 else """
Lex a string into chunks:
>>> lex('hey')
['hey']
>>> lex('hey {{you}}')
['hey ', ('you', (1, 7))]
>>> lex('hey {{')
Traceback (most recent call last):
...
TemplateError: No }} to finish last expression at line 1 column 7
>>> lex('hey }}')
Traceback (most recent call last):
...
TemplateError: }} outside expression at line 1 column 7
>>> lex('hey {{ {{')
Traceback (most recent call last):
...
TemplateError: {{ inside expression at line 1 column 10
"""
statement_re = re.compile(r'^(?:if |elif |for |def |inherit |default |py:)')
single_statements = ['else', 'endif', 'endfor', 'enddef', 'continue', 'break']
trail_whitespace_re = re.compile(r'\n\r?[\t ]*$')
lead_whitespace_re = re.compile(r'^[\t ]*\n')
def trim_lex(tokens):
last_trim = None
for i in range(len(tokens)):
current = tokens[i]
if isinstance(tokens[i], basestring_):
# we don't trim this
continue
item = current[0]
if not statement_re.search(item) and item not in single_statements:
continue
if not i:
prev = ''
else:
prev = tokens[i - 1]
if i + 1 >= len(tokens):
next_chunk = ''
else:
next_chunk = tokens[i + 1]
if (not
isinstance(next_chunk, basestring_)
or not isinstance(prev, basestring_)):
continue
prev_ok = not prev or trail_whitespace_re.search(prev)
if i == 1 and not prev.strip():
prev_ok = True
if last_trim is not None and last_trim + 2 == i and not prev.strip():
prev_ok = 'last'
if (prev_ok
and (not next_chunk or lead_whitespace_re.search(next_chunk)
or (i == len(tokens) - 2 and not next_chunk.strip()))):
if prev:
if ((i == 1 and not prev.strip()) or prev_ok == 'last'):
tokens[i - 1] = ''
else:
m = trail_whitespace_re.search(prev)
# +1 to leave the leading \n on:
prev = prev[:m.start() + 1]
tokens[i - 1] = prev
if next_chunk:
last_trim = i
if i == len(tokens) - 2 and not next_chunk.strip():
tokens[i + 1] = ''
else:
m = lead_whitespace_re.search(next_chunk)
next_chunk = next_chunk[m.end():]
tokens[i + 1] = next_chunk
return tokens
trim_lex.__doc__ = r"""
Takes a lexed set of tokens, and removes whitespace when there is
a directive on a line by itself:
>>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False)
>>> tokens
[('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny']
>>> trim_lex(tokens)
[('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y']
""" if PY3 else r"""
Takes a lexed set of tokens, and removes whitespace when there is
a directive on a line by itself:
>>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False)
>>> tokens
[('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny']
>>> trim_lex(tokens)
[('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y']
"""
def find_position(string, index, last_index, last_pos):
"""
Given a string and index, return (line, column)
"""
lines = string.count('\n', last_index, index)
if lines > 0:
column = index - string.rfind('\n', last_index, index)
else:
column = last_pos[1] + (index - last_index)
return (last_pos[0] + lines, column)
def parse(s, name=None, line_offset=0, delimeters=None):
if delimeters is None:
delimeters = (Template.default_namespace['start_braces'],
Template.default_namespace['end_braces'])
tokens = lex(s, name=name, line_offset=line_offset, delimeters=delimeters)
result = []
while tokens:
next_chunk, tokens = parse_expr(tokens, name)
result.append(next_chunk)
return result
parse.__doc__ = r"""
Parses a string into a kind of AST
>>> parse('{{x}}')
[('expr', (1, 3), 'x')]
>>> parse('foo')
['foo']
>>> parse('{{if x}}test{{endif}}')
[('cond', (1, 3), ('if', (1, 3), 'x', ['test']))]
>>> parse(
... 'series->{{for x in y}}x={{x}}{{endfor}}'
... ) #doctest: +NORMALIZE_WHITESPACE
['series->',
('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])]
>>> parse('{{for x, y in z:}}{{continue}}{{endfor}}')
[('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])]
>>> parse('{{py:x=1}}')
[('py', (1, 3), 'x=1')]
>>> parse(
... '{{if x}}a{{elif y}}b{{else}}c{{endif}}'
... ) #doctest: +NORMALIZE_WHITESPACE
[('cond', (1, 3), ('if', (1, 3), 'x', ['a']),
('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))]
Some exceptions::
>>> parse('{{continue}}')
Traceback (most recent call last):
...
mne.externals.tempita.TemplateError: continue outside of for loop at line 1 column 3
>>> parse('{{if x}}foo')
Traceback (most recent call last):
...
mne.externals.tempita.TemplateError: No {{endif}} at line 1 column 3
>>> parse('{{else}}')
Traceback (most recent call last):
...
mne.externals.tempita.TemplateError: else outside of an if block at line 1 column 3
>>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}')
Traceback (most recent call last):
...
mne.externals.tempita.TemplateError: Unexpected endif at line 1 column 25
>>> parse('{{if}}{{endif}}')
Traceback (most recent call last):
...
mne.externals.tempita.TemplateError: if with no expression at line 1 column 3
>>> parse('{{for x y}}{{endfor}}')
Traceback (most recent call last):
...
mne.externals.tempita.TemplateError: Bad for (no "in") in 'x y' at line 1 column 3
>>> parse('{{py:x=1\ny=2}}') #doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
mne.externals.tempita.TemplateError: Multi-line py blocks must start
with a newline at line 1 column 3
""" if PY3 else r"""
Parses a string into a kind of AST
>>> parse('{{x}}')
[('expr', (1, 3), 'x')]
>>> parse('foo')
['foo']
>>> parse('{{if x}}test{{endif}}')
[('cond', (1, 3), ('if', (1, 3), 'x', ['test']))]
>>> parse(
... 'series->{{for x in y}}x={{x}}{{endfor}}'
... ) #doctest: +NORMALIZE_WHITESPACE
['series->',
('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])]
>>> parse('{{for x, y in z:}}{{continue}}{{endfor}}')
[('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])]
>>> parse('{{py:x=1}}')
[('py', (1, 3), 'x=1')]
>>> parse(
... '{{if x}}a{{elif y}}b{{else}}c{{endif}}'
... ) #doctest: +NORMALIZE_WHITESPACE
[('cond', (1, 3), ('if', (1, 3), 'x', ['a']),
('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))]
Some exceptions::
>>> parse('{{continue}}')
Traceback (most recent call last):
...
TemplateError: continue outside of for loop at line 1 column 3
>>> parse('{{if x}}foo')
Traceback (most recent call last):
...
TemplateError: No {{endif}} at line 1 column 3
>>> parse('{{else}}')
Traceback (most recent call last):
...
TemplateError: else outside of an if block at line 1 column 3
>>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}')
Traceback (most recent call last):
...
TemplateError: Unexpected endif at line 1 column 25
>>> parse('{{if}}{{endif}}')
Traceback (most recent call last):
...
TemplateError: if with no expression at line 1 column 3
>>> parse('{{for x y}}{{endfor}}')
Traceback (most recent call last):
...
TemplateError: Bad for (no "in") in 'x y' at line 1 column 3
>>> parse('{{py:x=1\ny=2}}') #doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
TemplateError: Multi-line py blocks must start
with a newline at line 1 column 3
"""
def parse_expr(tokens, name, context=()):
if isinstance(tokens[0], basestring_):
return tokens[0], tokens[1:]
expr, pos = tokens[0]
expr = expr.strip()
if expr.startswith('py:'):
expr = expr[3:].lstrip(' \t')
if expr.startswith('\n') or expr.startswith('\r'):
expr = expr.lstrip('\r\n')
if '\r' in expr:
expr = expr.replace('\r\n', '\n')
expr = expr.replace('\r', '')
expr += '\n'
else:
if '\n' in expr:
raise TemplateError(
'Multi-line py blocks must start with a newline',
position=pos, name=name)
return ('py', pos, expr), tokens[1:]
elif expr in ('continue', 'break'):
if 'for' not in context:
raise TemplateError(
'continue outside of for loop',
position=pos, name=name)
return (expr, pos), tokens[1:]
elif expr.startswith('if '):
return parse_cond(tokens, name, context)
elif (expr.startswith('elif ')
or expr == 'else'):
raise TemplateError(
'%s outside of an if block' % expr.split()[0],
position=pos, name=name)
elif expr in ('if', 'elif', 'for'):
raise TemplateError(
'%s with no expression' % expr,
position=pos, name=name)
elif expr in ('endif', 'endfor', 'enddef'):
raise TemplateError(
'Unexpected %s' % expr,
position=pos, name=name)
elif expr.startswith('for '):
return parse_for(tokens, name, context)
elif expr.startswith('default '):
return parse_default(tokens, name, context)
elif expr.startswith('inherit '):
return parse_inherit(tokens, name, context)
elif expr.startswith('def '):
return parse_def(tokens, name, context)
elif expr.startswith('#'):
return ('comment', pos, tokens[0][0]), tokens[1:]
return ('expr', pos, tokens[0][0]), tokens[1:]
def parse_cond(tokens, name, context):
start = tokens[0][1]
pieces = []
context = context + ('if',)
while 1:
if not tokens:
raise TemplateError(
'Missing {{endif}}',
position=start, name=name)
if (isinstance(tokens[0], tuple) and tokens[0][0] == 'endif'):
return ('cond', start) + tuple(pieces), tokens[1:]
next_chunk, tokens = parse_one_cond(tokens, name, context)
pieces.append(next_chunk)
def parse_one_cond(tokens, name, context):
(first, pos), tokens = tokens[0], tokens[1:]
content = []
if first.endswith(':'):
first = first[:-1]
if first.startswith('if '):
part = ('if', pos, first[3:].lstrip(), content)
elif first.startswith('elif '):
part = ('elif', pos, first[5:].lstrip(), content)
elif first == 'else':
part = ('else', pos, None, content)
else:
assert 0, "Unexpected token %r at %s" % (first, pos)
while 1:
if not tokens:
raise TemplateError(
'No {{endif}}',
position=pos, name=name)
if (isinstance(tokens[0], tuple)
and (tokens[0][0] == 'endif'
or tokens[0][0].startswith('elif ')
or tokens[0][0] == 'else')):
return part, tokens
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_for(tokens, name, context):
first, pos = tokens[0]
tokens = tokens[1:]
context = ('for',) + context
content = []
assert first.startswith('for ')
if first.endswith(':'):
first = first[:-1]
first = first[3:].strip()
match = in_re.search(first)
if not match:
raise TemplateError(
'Bad for (no "in") in %r' % first,
position=pos, name=name)
vars = first[:match.start()]
if '(' in vars:
raise TemplateError(
'You cannot have () in the variable section of a for loop (%r)'
% vars, position=pos, name=name)
vars = tuple([
v.strip() for v in first[:match.start()].split(',')
if v.strip()])
expr = first[match.end():]
while 1:
if not tokens:
raise TemplateError(
'No {{endfor}}',
position=pos, name=name)
if (isinstance(tokens[0], tuple) and tokens[0][0] == 'endfor'):
return ('for', pos, vars, expr, content), tokens[1:]
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_default(tokens, name, context):
first, pos = tokens[0]
assert first.startswith('default ')
first = first.split(None, 1)[1]
parts = first.split('=', 1)
if len(parts) == 1:
raise TemplateError(
"Expression must be {{default var=value}}; no = found in %r" %
first, position=pos, name=name)
var = parts[0].strip()
if ',' in var:
raise TemplateError(
"{{default x, y = ...}} is not supported",
position=pos, name=name)
if not var_re.search(var):
raise TemplateError(
"Not a valid variable name for {{default}}: %r"
% var, position=pos, name=name)
expr = parts[1].strip()
return ('default', pos, var, expr), tokens[1:]
def parse_inherit(tokens, name, context):
first, pos = tokens[0]
assert first.startswith('inherit ')
expr = first.split(None, 1)[1]
return ('inherit', pos, expr), tokens[1:]
def parse_def(tokens, name, context):
first, start = tokens[0]
tokens = tokens[1:]
assert first.startswith('def ')
first = first.split(None, 1)[1]
if first.endswith(':'):
first = first[:-1]
if '(' not in first:
func_name = first
sig = ((), None, None, {})
elif not first.endswith(')'):
raise TemplateError("Function definition doesn't end with ): %s" %
first, position=start, name=name)
else:
first = first[:-1]
func_name, sig_text = first.split('(', 1)
sig = parse_signature(sig_text, name, start)
context = context + ('def',)
content = []
while 1:
if not tokens:
raise TemplateError(
'Missing {{enddef}}',
position=start, name=name)
if (isinstance(tokens[0], tuple) and tokens[0][0] == 'enddef'):
return ('def', start, func_name, sig, content), tokens[1:]
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_signature(sig_text, name, pos):
tokens = tokenize.generate_tokens(StringIO(sig_text).readline)
sig_args = []
var_arg = None
var_kw = None
defaults = {}
def get_token(pos=False):
try:
tok_type, tok_string, (srow, scol), (erow, ecol), line = next(
tokens)
except StopIteration:
return tokenize.ENDMARKER, ''
if pos:
return tok_type, tok_string, (srow, scol), (erow, ecol)
else:
return tok_type, tok_string
while 1:
var_arg_type = None
tok_type, tok_string = get_token()
if tok_type == tokenize.ENDMARKER:
break
if tok_type == tokenize.OP and (
tok_string == '*' or tok_string == '**'):
var_arg_type = tok_string
tok_type, tok_string = get_token()
if tok_type != tokenize.NAME:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
var_name = tok_string
tok_type, tok_string = get_token()
if tok_type == tokenize.ENDMARKER or (
tok_type == tokenize.OP and tok_string == ','):
if var_arg_type == '*':
var_arg = var_name
elif var_arg_type == '**':
var_kw = var_name
else:
sig_args.append(var_name)
if tok_type == tokenize.ENDMARKER:
break
continue
if var_arg_type is not None:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
if tok_type == tokenize.OP and tok_string == '=':
nest_type = None
unnest_type = None
nest_count = 0
start_pos = end_pos = None
parts = []
while 1:
tok_type, tok_string, s, e = get_token(True)
if start_pos is None:
start_pos = s
end_pos = e
if tok_type == tokenize.ENDMARKER and nest_count:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
if (not nest_count and
(tok_type == tokenize.ENDMARKER or
(tok_type == tokenize.OP and tok_string == ','))):
default_expr = isolate_expression(
sig_text, start_pos, end_pos)
defaults[var_name] = default_expr
sig_args.append(var_name)
break
parts.append((tok_type, tok_string))
if nest_count \
and tok_type == tokenize.OP \
and tok_string == nest_type:
nest_count += 1
elif nest_count \
and tok_type == tokenize.OP \
and tok_string == unnest_type:
nest_count -= 1
if not nest_count:
nest_type = unnest_type = None
elif not nest_count \
and tok_type == tokenize.OP \
and tok_string in ('(', '[', '{'):
nest_type = tok_string
nest_count = 1
unnest_type = {'(': ')', '[': ']', '{': '}'}[nest_type]
return sig_args, var_arg, var_kw, defaults
def isolate_expression(string, start_pos, end_pos):
srow, scol = start_pos
srow -= 1
erow, ecol = end_pos
erow -= 1
lines = string.splitlines(True)
if srow == erow:
return lines[srow][scol:ecol]
parts = [lines[srow][scol:]]
parts.extend(lines[srow + 1:erow])
if erow < len(lines):
# It'll sometimes give (end_row_past_finish, 0)
parts.append(lines[erow][:ecol])
return ''.join(parts)
_fill_command_usage = """\
%prog [OPTIONS] TEMPLATE arg=value
Use py:arg=value to set a Python value; otherwise all values are
strings.
"""
def fill_command(args=None):
import sys
import optparse
import pkg_resources
import os
if args is None:
args = sys.argv[1:]
dist = pkg_resources.get_distribution('Paste')
parser = optparse.OptionParser(
version=coerce_text(dist),
usage=_fill_command_usage)
parser.add_option(
'-o', '--output',
dest='output',
metavar="FILENAME",
help="File to write output to (default stdout)")
parser.add_option(
'--html',
dest='use_html',
action='store_true',
help="Use HTML style filling (including automatic HTML quoting)")
parser.add_option(
'--env',
dest='use_env',
action='store_true',
help="Put the environment in as top-level variables")
options, args = parser.parse_args(args)
if len(args) < 1:
print('You must give a template filename')
sys.exit(2)
template_name = args[0]
args = args[1:]
vars = {}
if options.use_env:
vars.update(os.environ)
for value in args:
if '=' not in value:
print('Bad argument: %r' % value)
sys.exit(2)
name, value = value.split('=', 1)
if name.startswith('py:'):
name = name[:3]
value = eval(value)
vars[name] = value
if template_name == '-':
template_content = sys.stdin.read()
template_name = '<stdin>'
else:
f = open(template_name, 'rb')
template_content = f.read()
f.close()
if options.use_html:
TemplateClass = HTMLTemplate
else:
TemplateClass = Template
template = TemplateClass(template_content, name=template_name)
result = template.substitute(vars)
if options.output:
f = open(options.output, 'wb')
f.write(result)
f.close()
else:
sys.stdout.write(result)
if __name__ == '__main__':
fill_command()
| bsd-3-clause |
google/tf-quant-finance | tf_quant_finance/experimental/pricing_platform/framework/market_data/rate_curve.py | 1 | 13760 | # Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of RateCurve object."""
from typing import Optional, Tuple
import tensorflow.compat.v2 as tf
from tf_quant_finance import datetime as dateslib
from tf_quant_finance import math
from tf_quant_finance import rates as rates_lib
from tf_quant_finance.experimental.pricing_platform.framework.core import curve_types
from tf_quant_finance.experimental.pricing_platform.framework.core import daycount_conventions
from tf_quant_finance.experimental.pricing_platform.framework.core import interpolation_method
from tf_quant_finance.experimental.pricing_platform.framework.core import processed_market_data as pmd
from tf_quant_finance.experimental.pricing_platform.framework.core import types
from tf_quant_finance.experimental.pricing_platform.framework.market_data import utils
_DayCountConventions = daycount_conventions.DayCountConventions
_InterpolationMethod = interpolation_method.InterpolationMethod
_DayCountConventionsProtoType = types.DayCountConventionsProtoType
class RateCurve(pmd.RateCurve):
"""Represents an interest rate curve."""
def __init__(
self,
maturity_dates: types.DateTensor,
discount_factors: tf.Tensor,
valuation_date: types.DateTensor,
interpolator: Optional[_InterpolationMethod] = None,
interpolate_rates: Optional[bool] = True,
daycount_convention: Optional[_DayCountConventionsProtoType] = None,
curve_type: Optional[curve_types.CurveType] = None,
dtype: Optional[tf.DType] = None,
name: Optional[str] = None):
"""Initializes the interest rate curve.
Args:
maturity_dates: A `DateTensor` containing the maturity dates on which the
curve is specified.
discount_factors: A `Tensor` of real dtype specifying the discount factors
corresponding to the input maturities. The shape of this input should
match the shape of `maturity_dates`.
valuation_date: A scalar `DateTensor` specifying the valuation (or
settlement) date for the curve.
interpolator: An instance of `InterpolationMethod`.
Default value: `None` in which case cubic interpolation is used.
interpolate_rates: A boolean specifying whether the interpolation should
be done in discount rates or discount factors space.
Default value: `True`, i.e., interpolation is done in the discount
factors space.
daycount_convention: `DayCountConventions` to use for the interpolation
purpose.
Default value: `None` which maps to actual/365 day count convention.
curve_type: An instance of `CurveTypes` to mark the rate curve.
Default value: `None` which means that the curve does not have the
marker.
dtype: `tf.Dtype`. Optional input specifying the dtype of the `rates`
input.
name: Python str. The name to give to the ops created by this function.
Default value: `None` which maps to 'rate_curve'.
"""
self._name = name or "rate_curve"
with tf.compat.v1.name_scope(self._name):
self._discount_factor_nodes = tf.convert_to_tensor(
discount_factors, dtype=dtype,
name="curve_discount_factors")
self._dtype = dtype or self._discount_factor_nodes.dtype
if interpolator is None or interpolator == _InterpolationMethod.CUBIC:
def cubic_interpolator(xi, x, y):
spline_coeffs = math.interpolation.cubic.build_spline(x, y)
return math.interpolation.cubic.interpolate(xi, spline_coeffs,
dtype=dtype)
interpolator = cubic_interpolator
self._interpolation_method = _InterpolationMethod.CUBIC
elif interpolator == _InterpolationMethod.LINEAR:
def linear_interpolator(xi, x, y):
return math.interpolation.linear.interpolate(xi, x, y,
dtype=dtype)
interpolator = linear_interpolator
self._interpolation_method = _InterpolationMethod.LINEAR
elif interpolator == _InterpolationMethod.CONSTANT_FORWARD:
def constant_fwd(xi, x, y):
return rates_lib.constant_fwd.interpolate(xi, x, y, dtype=dtype)
interpolator = constant_fwd
self._interpolation_method = _InterpolationMethod.CONSTANT_FORWARD
else:
raise ValueError(f"Unknown interpolation method {interpolator}.")
self._dates = dateslib.convert_to_date_tensor(maturity_dates)
self._valuation_date = dateslib.convert_to_date_tensor(
valuation_date)
self._daycount_convention = (
daycount_convention or _DayCountConventions.ACTUAL_365)
self._day_count_fn = utils.get_daycount_fn(self._daycount_convention)
self._times = self._get_time(self._dates)
self._interpolator = interpolator
self._interpolate_rates = interpolate_rates
# Precompute discount rates:
self._curve_type = curve_type
@property
def daycount_convention(self) -> types.DayCountConventionsProtoType:
"""Daycount convention."""
return self._daycount_convention
def daycount_fn(self):
"""Daycount function."""
return self._day_count_fn
@property
def discount_factor_nodes(self) -> types.FloatTensor:
"""Discount factors at the interpolation nodes."""
return self._discount_factor_nodes
@property
def node_dates(self) -> types.DateTensor:
"""Dates at which the discount factors and rates are specified."""
return self._dates
@property
def discount_rate_nodes(self) -> types.FloatTensor:
"""Discount rates at the interpolation nodes."""
discount_rates = tf.math.divide_no_nan(
-tf.math.log(self.discount_factor_nodes), self._times,
name="discount_rate_nodes")
return discount_rates
def set_discount_factor_nodes(self, values: types.FloatTensor):
"""Update discount factors at the interpolation nodes with new values."""
values = tf.convert_to_tensor(values, dtype=self._dtype)
values_shape = values.shape.as_list()
nodes_shape = self.discount_factor_nodes.shape.as_list()
if values_shape != nodes_shape:
raise ValueError("New values should have shape {0} but are of "
"shape {1}".format(nodes_shape, values_shape))
self._discount_factor_nodes = values
def discount_rate(self,
interpolation_dates: Optional[types.DateTensor] = None,
interpolation_times: Optional[types.FloatTensor] = None,
name: Optional[str] = None):
"""Returns interpolated rates at `interpolation_dates`."""
if interpolation_dates is None and interpolation_times is None:
raise ValueError("Either interpolation_dates or interpolation times "
"must be supplied.")
if interpolation_dates is not None:
interpolation_dates = dateslib.convert_to_date_tensor(
interpolation_dates)
times = self._get_time(interpolation_dates)
else:
times = tf.convert_to_tensor(interpolation_times, self._dtype)
rates = self._interpolator(times, self._times,
self.discount_rate_nodes)
if self._interpolate_rates:
rates = self._interpolator(times, self._times,
self.discount_rate_nodes)
else:
discount_factor = self._interpolator(
times, self._times, self.discount_factor_nodes)
rates = -tf.math.divide_no_nan(
tf.math.log(discount_factor), times)
return tf.identity(rates, name=name or "discount_rate")
def discount_factor(self,
interpolation_dates: Optional[types.DateTensor] = None,
interpolation_times: Optional[types.FloatTensor] = None,
name: Optional[str] = None):
"""Returns discount factors at `interpolation_dates`."""
if interpolation_dates is None and interpolation_times is None:
raise ValueError("Either interpolation_dates or interpolation times "
"must be supplied.")
if interpolation_dates is not None:
interpolation_dates = dateslib.convert_to_date_tensor(
interpolation_dates)
times = self._get_time(interpolation_dates)
else:
times = tf.convert_to_tensor(interpolation_times, self._dtype)
if self._interpolate_rates:
rates = self._interpolator(times, self._times,
self.discount_rate_nodes)
discount_factor = tf.math.exp(-rates * times)
else:
discount_factor = self._interpolator(
times, self._times, self.discount_factor_nodes)
return tf.identity(discount_factor, name=name or "discount_factor")
def forward_rate(
self,
start_date: Optional[types.DateTensor] = None,
maturity_date: Optional[types.DateTensor] = None,
start_time: Optional[types.FloatTensor] = None,
maturity_time: Optional[types.FloatTensor] = None,
day_count_fraction: Optional[tf.Tensor] = None):
"""Returns the simply accrued forward rate between [start_dt, maturity_dt].
Args:
start_date: A `DateTensor` specifying the start of the accrual period
for the forward rate. The function expects either `start_date` or
`start_time` to be specified.
maturity_date: A `DateTensor` specifying the end of the accrual period
for the forward rate. The shape of `end_date` must be broadcastable
with the shape of `start_date`. The function expects either `end_date`
or `end_time` to be specified.
start_time: A real `Tensor` specifying the start of the accrual period
for the forward rate. The function expects either `start_date` or
`start_time` to be specified.
maturity_time: A real `Tensor` specifying the end of the accrual period
for the forward rate. The shape of `end_date` must be broadcastable
with the shape of `start_date`. The function expects either `end_date`
or `end_time` to be specified.
day_count_fraction: An optional `Tensor` of real dtype specifying the
time between `start_date` and `maturity_date` in years computed using
the forward rate's day count basis. The shape of the input should be
the same as that of `start_date` and `maturity_date`.
Default value: `None`, in which case the daycount fraction is computed
using `daycount_convention`.
Returns:
A real `Tensor` of same shape as the inputs containing the simply
compounded forward rate.
"""
if start_date is None and start_time is None:
raise ValueError("Either start_date or start_times "
"must be supplied.")
if maturity_date is None and maturity_time is None:
raise ValueError("Either maturity_date or maturity_time must be "
"supplied.")
if start_date is not None and maturity_date is not None:
start_date = dateslib.convert_to_date_tensor(start_date)
maturity_date = dateslib.convert_to_date_tensor(maturity_date)
if day_count_fraction is None:
day_count_fn = self._day_count_fn
day_count_fraction = day_count_fn(
start_date=start_date, end_date=maturity_date, dtype=self._dtype)
else:
day_count_fraction = tf.convert_to_tensor(day_count_fraction,
self._dtype,
name="day_count_fraction")
start_time = self._get_time(start_date)
maturity_time = self._get_time(maturity_date)
else:
start_time = tf.convert_to_tensor(start_time, dtype=self._dtype)
maturity_time = tf.convert_to_tensor(maturity_time, dtype=self._dtype)
day_count_fraction = maturity_time - start_time
dfstart = self.discount_factor(interpolation_times=start_time)
dfmaturity = self.discount_factor(interpolation_times=maturity_time)
return tf.math.divide_no_nan(
tf.math.divide_no_nan(dfstart, dfmaturity) - 1., day_count_fraction)
@property
def valuation_date(self) -> types.DateTensor:
return self._valuation_date
@property
def interpolation_method(self) -> _InterpolationMethod:
return self._interpolation_method
def _get_time(self,
dates: types.DateTensor) -> types.FloatTensor:
"""Computes the year fraction from the curve's valuation date."""
return self._day_count_fn(start_date=self._valuation_date,
end_date=dates,
dtype=self._dtype)
@property
def curve_type(self) -> curve_types.CurveType:
return self._curve_type
def discount_factors_and_dates(self) -> Tuple[types.FloatTensor,
types.DateTensor]:
"""Returns discount factors and dates at which the discount curve is fitted.
"""
return (self._discount_factor_nodes, self._dates)
@property
def dtype(self) -> types.Dtype:
return self._dtype
@property
def interpolate_rates(self) -> bool:
"""Returns `True` if the interpolation is on rates and not on discounts."""
return self._interpolate_rates
__all__ = ["RateCurve"]
| apache-2.0 |
Davasny/CCAS | ccas/models/exchanges/__init__.py | 1 | 1783 | from . import poloniex, btc_e, bittrex, bitfinex
from ccas.models import database, coinmarketcap
def get_balances(exchange, public_key, secret_key):
if exchange == "poloniex":
return poloniex.get_balances(public_key, secret_key)
if exchange == "btc-e":
return btc_e.get_balances(public_key, secret_key)
if exchange == "bittrex":
return bittrex.get_balances(public_key, secret_key)
if exchange == "bitfinex":
return bitfinex.get_balances(public_key, secret_key)
def get_exchanges():
response = database.new_query("SELECT id, exchange FROM exchanges_api_keys;")
return list(response)
def get_btc_price():
exchange = database.new_query("SELECT `exchange` FROM `coins_prices` WHERE `name`='btc';")
if exchange:
exchange = exchange[0][0]
if exchange == "poloniex":
return poloniex.get_btc_price()
if exchange == "btc-e":
return btc_e.get_btc_price()
if exchange == "bittrex":
return bittrex.get_btc_price()
if exchange == "bitfinex":
return bitfinex.get_btc_price()
else:
return -1
def get_price(currency):
exchange = database.new_query("SELECT `exchange` FROM `coins_prices` WHERE `name`='"+ currency.lower() +"';")
if exchange:
exchange = exchange[0][0]
if exchange == "poloniex":
return poloniex.get_price(currency)
if exchange == "btc-e":
return btc_e.get_price(currency)
if exchange == "bittrex":
return bittrex.get_price(currency)
if exchange == "bitfinex":
return bitfinex.get_price(currency)
if exchange == "coinmarketcap":
return coinmarketcap.get_price(currency)
else:
return -1
| mit |
johankaito/fufuka | microblog/flask/venv/lib/python2.7/site-packages/scipy/stats/_multivariate.py | 17 | 69089 | #
# Author: Joris Vankerschaver 2013
#
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.linalg
from scipy.misc import doccer
from scipy.special import gammaln, psi, multigammaln
from scipy._lib._util import check_random_state
__all__ = ['multivariate_normal', 'dirichlet', 'wishart', 'invwishart']
_LOG_2PI = np.log(2 * np.pi)
_LOG_2 = np.log(2)
_LOG_PI = np.log(np.pi)
def _process_parameters(dim, mean, cov):
"""
Infer dimensionality from mean or covariance matrix, ensure that
mean and covariance are full vector resp. matrix.
"""
# Try to infer dimensionality
if dim is None:
if mean is None:
if cov is None:
dim = 1
else:
cov = np.asarray(cov, dtype=float)
if cov.ndim < 2:
dim = 1
else:
dim = cov.shape[0]
else:
mean = np.asarray(mean, dtype=float)
dim = mean.size
else:
if not np.isscalar(dim):
raise ValueError("Dimension of random variable must be a scalar.")
# Check input sizes and return full arrays for mean and cov if necessary
if mean is None:
mean = np.zeros(dim)
mean = np.asarray(mean, dtype=float)
if cov is None:
cov = 1.0
cov = np.asarray(cov, dtype=float)
if dim == 1:
mean.shape = (1,)
cov.shape = (1, 1)
if mean.ndim != 1 or mean.shape[0] != dim:
raise ValueError("Array 'mean' must be a vector of length %d." % dim)
if cov.ndim == 0:
cov = cov * np.eye(dim)
elif cov.ndim == 1:
cov = np.diag(cov)
elif cov.ndim == 2 and cov.shape != (dim, dim):
rows, cols = cov.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(cov.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'mean' is a vector of length %d.")
msg = msg % (str(cov.shape), len(mean))
raise ValueError(msg)
elif cov.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % cov.ndim)
return dim, mean, cov
def _process_quantiles(x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _squeeze_output(out):
"""
Remove single-dimensional entries from array and convert to scalar,
if necessary.
"""
out = out.squeeze()
if out.ndim == 0:
out = out[()]
return out
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
"""
Determine which eigenvalues are "small" given the spectrum.
This is for compatibility across various linear algebra functions
that should agree about whether or not a Hermitian matrix is numerically
singular and what is its numerical matrix rank.
This is designed to be compatible with scipy.linalg.pinvh.
Parameters
----------
spectrum : 1d ndarray
Array of eigenvalues of a Hermitian matrix.
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
eps : float
Magnitude cutoff for numerical negligibility.
"""
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = spectrum.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
eps = cond * np.max(abs(spectrum))
return eps
def _pinv_1d(v, eps=1e-5):
"""
A helper function for computing the pseudoinverse.
Parameters
----------
v : iterable of numbers
This may be thought of as a vector of eigenvalues or singular values.
eps : float
Values with magnitude no greater than eps are considered negligible.
Returns
-------
v_pinv : 1d float ndarray
A vector of pseudo-inverted numbers.
"""
return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
class _PSD(object):
"""
Compute coordinated functions of a symmetric positive semidefinite matrix.
This class addresses two issues. Firstly it allows the pseudoinverse,
the logarithm of the pseudo-determinant, and the rank of the matrix
to be computed using one call to eigh instead of three.
Secondly it allows these functions to be computed in a way
that gives mutually compatible results.
All of the functions are computed with a common understanding as to
which of the eigenvalues are to be considered negligibly small.
The functions are designed to coordinate with scipy.linalg.pinvh()
but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
Parameters
----------
M : array_like
Symmetric positive semidefinite matrix (2-D).
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower
or upper triangle of M. (Default: lower)
check_finite : bool, optional
Whether to check that the input matrices contain only finite
numbers. Disabling may give a performance gain, but may result
in problems (crashes, non-termination) if the inputs do contain
infinities or NaNs.
allow_singular : bool, optional
Whether to allow a singular matrix. (Default: True)
Notes
-----
The arguments are similar to those of scipy.linalg.pinvh().
"""
def __init__(self, M, cond=None, rcond=None, lower=True,
check_finite=True, allow_singular=True):
# Compute the symmetric eigendecomposition.
# Note that eigh takes care of array conversion, chkfinite,
# and assertion that the matrix is square.
s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
eps = _eigvalsh_to_eps(s, cond, rcond)
if np.min(s) < -eps:
raise ValueError('the input matrix must be positive semidefinite')
d = s[s > eps]
if len(d) < len(s) and not allow_singular:
raise np.linalg.LinAlgError('singular matrix')
s_pinv = _pinv_1d(s, eps)
U = np.multiply(u, np.sqrt(s_pinv))
# Initialize the eagerly precomputed attributes.
self.rank = len(d)
self.U = U
self.log_pdet = np.sum(np.log(d))
# Initialize an attribute to be lazily computed.
self._pinv = None
@property
def pinv(self):
if self._pinv is None:
self._pinv = np.dot(self.U, self.U.T)
return self._pinv
_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
Whether to allow a singular covariance matrix. (Default: False)
"""
_doc_callparams_note = \
"""Setting the parameter `mean` to `None` is equivalent to having `mean`
be the zero-vector. The parameter `cov` can be a scalar, in which case
the covariance matrix is the identity times that value, a vector of
diagonal entries for the covariance matrix, or a two-dimensional
array_like.
"""
_doc_random_state = """\
random_state : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates.
If None (or np.random), the global np.random state is used.
Default is None.
"""
_doc_frozen_callparams = ""
_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
docdict_params = {
'_doc_default_callparams': _doc_default_callparams,
'_doc_callparams_note': _doc_callparams_note,
'_doc_random_state': _doc_random_state
}
docdict_noparams = {
'_doc_default_callparams': _doc_frozen_callparams,
'_doc_callparams_note': _doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multi_rv_generic(object):
"""
Class which encapsulates common functionality between all multivariate
distributions.
"""
def __init__(self, seed=None):
super(multi_rv_generic, self).__init__()
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _get_random_state(self, random_state):
if random_state is not None:
return check_random_state(random_state)
else:
return self._random_state
class multi_rv_frozen(object):
"""
Class which encapsulates common functionality between all frozen
multivariate distributions.
"""
@property
def random_state(self):
return self._dist._random_state
@random_state.setter
def random_state(self, seed):
self._dist._random_state = check_random_state(seed)
class multivariate_normal_gen(multi_rv_generic):
r"""
A multivariate normal random variable.
The `mean` keyword specifies the mean. The `cov` keyword specifies the
covariance matrix.
Methods
-------
``pdf(x, mean=None, cov=1, allow_singular=False)``
Probability density function.
``logpdf(x, mean=None, cov=1, allow_singular=False)``
Log of the probability density function.
``rvs(mean=None, cov=1, size=1, random_state=None)``
Draw random samples from a multivariate normal distribution.
``entropy()``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" multivariate normal
random variable:
rv = multivariate_normal(mean=None, cov=1, allow_singular=False)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_doc_callparams_note)s
The covariance matrix `cov` must be a (symmetric) positive
semi-definite matrix. The determinant and inverse of `cov` are computed
as the pseudo-determinant and pseudo-inverse, respectively, so
that `cov` does not need to have full rank.
The probability density function for `multivariate_normal` is
.. math::
f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}}
\exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right),
where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix,
and :math:`k` is the dimension of the space where :math:`x` takes values.
.. versionadded:: 0.14.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_normal
>>> x = np.linspace(0, 5, 10, endpoint=False)
>>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129,
0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349])
>>> fig1 = plt.figure()
>>> ax = fig1.add_subplot(111)
>>> ax.plot(x, y)
The input quantiles can be any shape of array, as long as the last
axis labels the components. This allows us for instance to
display the frozen pdf for a non-isotropic random variable in 2D as
follows:
>>> x, y = np.mgrid[-1:1:.01, -1:1:.01]
>>> pos = np.empty(x.shape + (2,))
>>> pos[:, :, 0] = x; pos[:, :, 1] = y
>>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
>>> fig2 = plt.figure()
>>> ax2 = fig2.add_subplot(111)
>>> ax2.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
super(multivariate_normal_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, docdict_params)
def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
See `multivariate_normal_frozen` for more information.
"""
return multivariate_normal_frozen(mean, cov,
allow_singular=allow_singular,
seed=seed)
def _logpdf(self, x, mean, prec_U, log_det_cov, rank):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
prec_U : ndarray
A decomposition such that np.dot(prec_U, prec_U.T)
is the precision matrix, i.e. inverse of the covariance matrix.
log_det_cov : float
Logarithm of the determinant of the covariance matrix
rank : int
Rank of the covariance matrix.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
dev = x - mean
maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
def logpdf(self, x, mean, cov, allow_singular=False):
"""
Log of the multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
x = _process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)
return _squeeze_output(out)
def pdf(self, x, mean, cov, allow_singular=False):
"""
Multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
x = _process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))
return _squeeze_output(out)
def rvs(self, mean=None, cov=1, size=1, random_state=None):
"""
Draw random samples from a multivariate normal distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
random_state = self._get_random_state(random_state)
out = random_state.multivariate_normal(mean, cov, size)
return _squeeze_output(out)
def entropy(self, mean=None, cov=1):
"""
Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
_, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
return 0.5 * logdet
multivariate_normal = multivariate_normal_gen()
class multivariate_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
Parameters
----------
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
If this flag is True then tolerate a singular
covariance matrix (default False).
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Examples
--------
When called with the default parameters, this will create a 1D random
variable with mean 0 and covariance 1:
>>> from scipy.stats import multivariate_normal
>>> r = multivariate_normal()
>>> r.mean
array([ 0.])
>>> r.cov
array([[1.]])
"""
self.dim, self.mean, self.cov = _process_parameters(None, mean, cov)
self.cov_info = _PSD(self.cov, allow_singular=allow_singular)
self._dist = multivariate_normal_gen(seed)
def logpdf(self, x):
x = _process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.mean, self.cov_info.U,
self.cov_info.log_pdet, self.cov_info.rank)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.cov, size, random_state)
def entropy(self):
"""
Computes the differential entropy of the multivariate normal.
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
"""
log_pdet = self.cov_info.log_pdet
rank = self.cov_info.rank
return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = multivariate_normal_gen.__dict__[name]
method_frozen = multivariate_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__, docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, docdict_params)
_dirichlet_doc_default_callparams = """\
alpha : array_like
The concentration parameters. The number of entries determines the
dimensionality of the distribution.
"""
_dirichlet_doc_frozen_callparams = ""
_dirichlet_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
dirichlet_docdict_params = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,
'_doc_random_state': _doc_random_state
}
dirichlet_docdict_noparams = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,
'_doc_random_state': _doc_random_state
}
def _dirichlet_check_parameters(alpha):
alpha = np.asarray(alpha)
if np.min(alpha) <= 0:
raise ValueError("All parameters must be greater than 0")
elif alpha.ndim != 1:
raise ValueError("Parameter vector 'a' must be one dimensional, " +
"but a.shape = %s." % str(alpha.shape))
return alpha
def _dirichlet_check_input(alpha, x):
x = np.asarray(x)
if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:
raise ValueError("Vector 'x' must have one entry less then the" +
" parameter vector 'a', but alpha.shape = " +
"%s and " % alpha.shape +
"x.shape = %s." % x.shape)
if x.shape[0] != alpha.shape[0]:
xk = np.array([1 - np.sum(x, 0)])
if xk.ndim == 1:
x = np.append(x, xk)
elif xk.ndim == 2:
x = np.vstack((x, xk))
else:
raise ValueError("The input must be one dimensional or a two "
"dimensional matrix containing the entries.")
if np.min(x) < 0:
raise ValueError("Each entry in 'x' must be greater or equal zero.")
if np.max(x) > 1:
raise ValueError("Each entry in 'x' must be smaller or equal one.")
if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():
raise ValueError("The input vector 'x' must lie within the normal " +
"simplex. but sum(x)=%f." % np.sum(x, 0))
return x
def _lnB(alpha):
r"""
Internal helper function to compute the log of the useful quotient
.. math::
B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}{\Gamma\left(\sum_{i=1}^{K}\alpha_i\right)}
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
B : scalar
Helper quotient, internal use only
"""
return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))
class dirichlet_gen(multi_rv_generic):
r"""
A Dirichlet random variable.
The `alpha` keyword specifies the concentration parameters of the
distribution.
.. versionadded:: 0.15.0
Methods
-------
``pdf(x, alpha)``
Probability density function.
``logpdf(x, alpha)``
Log of the probability density function.
``rvs(alpha, size=1, random_state=None)``
Draw random samples from a Dirichlet distribution.
``mean(alpha)``
The mean of the Dirichlet distribution
``var(alpha)``
The variance of the Dirichlet distribution
``entropy(alpha)``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix
concentration parameters, returning a "frozen" Dirichlet
random variable:
rv = dirichlet(alpha)
- Frozen object with the same methods but holding the given
concentration parameters fixed.
Notes
-----
Each :math:`\alpha` entry must be positive. The distribution has only
support on the simplex defined by
.. math::
\sum_{i=1}^{K} x_i \le 1
The probability density function for `dirichlet` is
.. math::
f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1}
where
.. math::
\mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)}
{\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)}
and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the
concentration parameters and :math:`K` is the dimension of the space
where :math:`x` takes values.
"""
def __init__(self, seed=None):
super(dirichlet_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)
def __call__(self, alpha, seed=None):
return dirichlet_frozen(alpha, seed=seed)
def _logpdf(self, x, alpha):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
%(_dirichlet_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
lnB = _lnB(alpha)
return - lnB + np.sum((np.log(x.T) * (alpha - 1)).T, 0)
def logpdf(self, x, alpha):
"""
Log of the Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = self._logpdf(x, alpha)
return _squeeze_output(out)
def pdf(self, x, alpha):
"""
The Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
The probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = np.exp(self._logpdf(x, alpha))
return _squeeze_output(out)
def mean(self, alpha):
"""
Compute the mean of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
mu : scalar
Mean of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
out = alpha / (np.sum(alpha))
return _squeeze_output(out)
def var(self, alpha):
"""
Compute the variance of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
v : scalar
Variance of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))
return out
def entropy(self, alpha):
"""
Compute the differential entropy of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
lnB = _lnB(alpha)
K = alpha.shape[0]
out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
(alpha - 1) * scipy.special.psi(alpha))
return _squeeze_output(out)
def rvs(self, alpha, size=1, random_state=None):
"""
Draw random samples from a Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
size : int, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
"""
alpha = _dirichlet_check_parameters(alpha)
random_state = self._get_random_state(random_state)
return random_state.dirichlet(alpha, size=size)
dirichlet = dirichlet_gen()
class dirichlet_frozen(multi_rv_frozen):
def __init__(self, alpha, seed=None):
self.alpha = _dirichlet_check_parameters(alpha)
self._dist = dirichlet_gen(seed)
def logpdf(self, x):
return self._dist.logpdf(x, self.alpha)
def pdf(self, x):
return self._dist.pdf(x, self.alpha)
def mean(self):
return self._dist.mean(self.alpha)
def var(self):
return self._dist.var(self.alpha)
def entropy(self):
return self._dist.entropy(self.alpha)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.alpha, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']:
method = dirichlet_gen.__dict__[name]
method_frozen = dirichlet_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, dirichlet_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)
_wishart_doc_default_callparams = """\
df : int
Degrees of freedom, must be greater than or equal to dimension of the
scale matrix
scale : array_like
Symmetric positive definite scale matrix of the distribution
"""
_wishart_doc_callparams_note = ""
_wishart_doc_frozen_callparams = ""
_wishart_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
wishart_docdict_params = {
'_doc_default_callparams': _wishart_doc_default_callparams,
'_doc_callparams_note': _wishart_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
wishart_docdict_noparams = {
'_doc_default_callparams': _wishart_doc_frozen_callparams,
'_doc_callparams_note': _wishart_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class wishart_gen(multi_rv_generic):
r"""
A Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal precision matrix (the inverse of the covariance
matrix).
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from a Wishart distribution.
``entropy()``
Compute the differential entropy of the Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Wishart random
variable:
rv = wishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
invwishart, chi2
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The Wishart distribution is often denoted
.. math::
W_p(\nu, \Sigma)
where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the
:math:`p \times p` scale matrix.
The probability density function for `wishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then
its PDF is given by:
.. math::
f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} }
|\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )}
\exp\left( -tr(\Sigma^{-1} S) / 2 \right)
If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then
:math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart).
If the scale matrix is 1-dimensional and equal to one, then the Wishart
distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)`
distribution.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate
Generator", Applied Statistics, vol. 21, pp. 341-345, 1972.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import wishart, chi2
>>> x = np.linspace(1e-5, 8, 100)
>>> w = wishart.pdf(x, df=3, scale=1); w[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> c = chi2.pdf(x, 3); c[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> plt.plot(x, w)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(wishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen Wishart distribution.
See `wishart_frozen` for more information.
"""
return wishart_frozen(df, scale, seed)
def _process_parameters(self, df, scale):
if scale is None:
scale = 1.0
scale = np.asarray(scale, dtype=float)
if scale.ndim == 0:
scale = scale[np.newaxis,np.newaxis]
elif scale.ndim == 1:
scale = np.diag(scale)
elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:
raise ValueError("Array 'scale' must be square if it is two"
" dimensional, but scale.scale = %s."
% str(scale.shape))
elif scale.ndim > 2:
raise ValueError("Array 'scale' must be at most two-dimensional,"
" but scale.ndim = %d" % scale.ndim)
dim = scale.shape[0]
if df is None:
df = dim
elif not np.isscalar(df):
raise ValueError("Degrees of freedom must be a scalar.")
elif df < dim:
raise ValueError("Degrees of freedom cannot be less than dimension"
" of scale matrix, but df = %d" % df)
return dim, df, scale
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x * np.eye(dim)[:, :, np.newaxis]
if x.ndim == 1:
if dim == 1:
x = x[np.newaxis, np.newaxis, :]
else:
x = np.diag(x)[:, :, np.newaxis]
elif x.ndim == 2:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square if they are two"
" dimensional, but x.shape = %s."
% str(x.shape))
x = x[:, :, np.newaxis]
elif x.ndim == 3:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square in the first two"
" dimensions if they are three dimensional"
", but x.shape = %s." % str(x.shape))
elif x.ndim > 3:
raise ValueError("Quantiles must be at most two-dimensional with"
" an additional dimension for multiple"
"components, but x.ndim = %d" % x.ndim)
# Now we have 3-dim array; should have shape [dim, dim, *]
if not x.shape[0:2] == (dim, dim):
raise ValueError('Quantiles have incompatible dimensions: should'
' be %s, got %s.' % ((dim, dim), x.shape[0:2]))
return x
def _process_size(self, size):
size = np.asarray(size)
if size.ndim == 0:
size = size[np.newaxis]
elif size.ndim > 1:
raise ValueError('Size must be an integer or tuple of integers;'
' thus must have dimension <= 1.'
' Got size.ndim = %s' % str(tuple(size)))
n = size.prod()
shape = tuple(size)
return n, shape
def _logpdf(self, x, dim, df, scale, log_det_scale, C):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
# log determinant of x
# Note: x has components along the last axis, so that x.T has
# components alone the 0-th axis. Then since det(A) = det(A'), this
# gives us a 1-dim vector of determinants
# Retrieve tr(scale^{-1} x)
log_det_x = np.zeros(x.shape[-1])
scale_inv_x = np.zeros(x.shape)
tr_scale_inv_x = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
_, log_det_x[i] = self._cholesky_logdet(x[:,:,i])
scale_inv_x[:,:,i] = scipy.linalg.cho_solve((C, True), x[:,:,i])
tr_scale_inv_x[i] = scale_inv_x[:,:,i].trace()
# Log PDF
out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -
(0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +
multigammaln(0.5*df, dim)))
return out
def logpdf(self, x, df, scale):
"""
Log of the Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
# Cholesky decomposition of scale, get log(det(scale))
C, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale, C)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
return df * scale
def mean(self, df, scale):
"""
Mean of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out)
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
if df >= dim + 1:
out = (df-dim-1) * scale
else:
out = None
return out
def mode(self, df, scale):
"""
Mode of the Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float or None
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
var = scale**2
diag = scale.diagonal() # 1 x dim array
var += np.outer(diag, diag)
var *= df
return var
def var(self, df, scale):
"""
Variance of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out)
def _standard_rvs(self, n, shape, dim, df, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
random_state : np.random.RandomState instance
RandomState used for drawing the random variates.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
# Random normal variates for off-diagonal elements
n_tril = dim * (dim-1) // 2
covariances = random_state.normal(
size=n*n_tril).reshape(shape+(n_tril,))
# Random chi-square variates for diagonal elements
variances = np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5
for i in range(dim)]].reshape((dim,) + shape[::-1]).T
# Create the A matri(ces) - lower triangular
A = np.zeros(shape + (dim, dim))
# Input the covariances
size_idx = tuple([slice(None,None,None)]*len(shape))
tril_idx = np.tril_indices(dim, k=-1)
A[size_idx + tril_idx] = covariances
# Input the variances
diag_idx = np.diag_indices(dim)
A[size_idx + diag_idx] = variances
return A
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Calculate the matrices A, which are actually lower triangular
# Cholesky factorizations of a matrix B such that B ~ W(df, I)
A = self._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = C A A' C', where SA ~ W(df, scale)
# Note: this is the product of a (lower) (lower) (lower)' (lower)'
# or, denoting B = AA', it is C B C' where C is the lower
# triangular Cholesky factorization of the scale matrix.
# this appears to conflict with the instructions in [1]_, which
# suggest that it should be D' B D where D is the lower
# triangular factorization of the scale matrix. However, it is
# meant to refer to the Bartlett (1933) representation of a
# Wishart random variate as L A A' L' where L is lower triangular
# so it appears that understanding D' to be upper triangular
# is either a typo in or misreading of [1]_.
for index in np.ndindex(shape):
CA = np.dot(C, A[index])
A[index] = np.dot(CA, CA.T)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from a Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Cholesky decomposition of scale
C = scipy.linalg.cholesky(scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def _entropy(self, dim, df, log_det_scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'entropy' instead.
"""
return (
0.5 * (dim+1) * log_det_scale +
0.5 * dim * (dim+1) * _LOG_2 +
multigammaln(0.5*df, dim) -
0.5 * (df - dim - 1) * np.sum(
[psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
) +
0.5 * df * dim
)
def entropy(self, df, scale):
"""
Compute the differential entropy of the Wishart.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Wishart distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
_, log_det_scale = self._cholesky_logdet(scale)
return self._entropy(dim, df, log_det_scale)
def _cholesky_logdet(self, scale):
"""
Compute Cholesky decomposition and determine (log(det(scale)).
Parameters
----------
scale : ndarray
Scale matrix.
Returns
-------
c_decomp : ndarray
The Cholesky decomposition of `scale`.
logdet : scalar
The log of the determinant of `scale`.
Notes
-----
This computation of ``logdet`` is equivalent to
``np.linalg.slogdet(scale)``. It is ~2x faster though.
"""
c_decomp = scipy.linalg.cholesky(scale, lower=True)
logdet = 2 * np.sum(np.log(c_decomp.diagonal()))
return c_decomp, logdet
wishart = wishart_gen()
class wishart_frozen(multi_rv_frozen):
"""
Create a frozen Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
def __init__(self, df, scale, seed=None):
self._dist = wishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale)
self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale, self.C)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out)
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.log_det_scale)
# Set frozen generator docstrings from corresponding docstrings in
# Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:
method = wishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
from numpy import asarray_chkfinite, asarray
from scipy.linalg.misc import LinAlgError
from scipy.linalg.lapack import get_lapack_funcs
def _cho_inv_batch(a, check_finite=True):
"""
Invert the matrices a_i, using a Cholesky factorization of A, where
a_i resides in the last two dimensions of a and the other indices describe
the index i.
Overwrites the data in a.
Parameters
----------
a : array
Array of matrices to invert, where the matrices themselves are stored
in the last two dimensions.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Array of inverses of the matrices ``a_i``.
See also
--------
scipy.linalg.cholesky : Cholesky factorization of a matrix
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:
raise ValueError('expected square matrix in last two dimensions')
potrf, potri = get_lapack_funcs(('potrf','potri'), (a1,))
tril_idx = np.tril_indices(a.shape[-2], k=-1)
triu_idx = np.triu_indices(a.shape[-2], k=1)
for index in np.ndindex(a1.shape[:-2]):
# Cholesky decomposition
a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,
clean=False)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite"
% info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Inversion
a1[index], info = potri(a1[index], lower=True, overwrite_c=False)
if info > 0:
raise LinAlgError("the inverse could not be computed")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Make symmetric (dpotri only fills in the lower triangle)
a1[index][triu_idx] = a1[index][tril_idx]
return a1
class invwishart_gen(wishart_gen):
r"""
An inverse Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal covariance matrix.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from an inverse Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" inverse Wishart
random variable:
rv = invwishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
wishart
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The inverse Wishart distribution is often denoted
.. math::
W_p^{-1}(\nu, \Psi)
where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the
:math:`p \times p` scale matrix.
The probability density function for `invwishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`,
then its PDF is given by:
.. math::
f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} }
|S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)}
\exp\left( -tr(\Sigma S^{-1}) / 2 \right)
If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then
:math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart).
If the scale matrix is 1-dimensional and equal to one, then the inverse
Wishart distribution :math:`W_1(\nu, 1)` collapses to the
inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}`
and scale = :math:`\frac{1}{2}`.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications in
Statistics - Simulation and Computation, vol. 14.2, pp.511-514, 1985.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import invwishart, invgamma
>>> x = np.linspace(0.01, 1, 100)
>>> iw = invwishart.pdf(x, df=6, scale=1)
>>> iw[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> ig = invgamma.pdf(x, 6/2., scale=1./2)
>>> ig[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> plt.plot(x, iw)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(invwishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen inverse Wishart distribution.
See `invwishart_frozen` for more information.
"""
return invwishart_frozen(df, scale, seed)
def _logpdf(self, x, dim, df, scale, log_det_scale):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function.
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
log_det_x = np.zeros(x.shape[-1])
#scale_x_inv = np.zeros(x.shape)
x_inv = np.copy(x).T
if dim > 1:
_cho_inv_batch(x_inv) # works in-place
else:
x_inv = 1./x_inv
tr_scale_x_inv = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
C, lower = scipy.linalg.cho_factor(x[:,:,i], lower=True)
log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))
#scale_x_inv[:,:,i] = scipy.linalg.cho_solve((C, True), scale).T
tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()
# Log PDF
out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -
(0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -
multigammaln(0.5*df, dim))
return out
def logpdf(self, x, df, scale):
"""
Log of the inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
_, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
if df > dim + 1:
out = scale / (df - dim - 1)
else:
out = None
return out
def mean(self, df, scale):
"""
Mean of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus one.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float or None
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
return scale / (df + dim + 1)
def mode(self, df, scale):
"""
Mode of the inverse Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out)
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
if df > dim + 3:
var = (df - dim + 1) * scale**2
diag = scale.diagonal() # 1 x dim array
var += (df - dim - 1) * np.outer(diag, diag)
var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)
else:
var = None
return var
def var(self, df, scale):
"""
Variance of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus three.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Get random draws A such that A ~ W(df, I)
A = super(invwishart_gen, self)._standard_rvs(n, shape, dim,
df, random_state)
# Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)
eye = np.eye(dim)
trtrs = get_lapack_funcs(('trtrs'), (A,))
for index in np.ndindex(A.shape[:-2]):
# Calculate CA
CA = np.dot(C, A[index])
# Get (C A)^{-1} via triangular solver
if dim > 1:
CA, info = trtrs(CA, eye, lower=True)
if info > 0:
raise LinAlgError("Singular matrix.")
if info < 0:
raise ValueError('Illegal value in %d-th argument of'
' internal trtrs' % -info)
else:
CA = 1. / CA
# Get SA
A[index] = np.dot(CA.T, CA)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from an inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Invert the scale
eye = np.eye(dim)
L, lower = scipy.linalg.cho_factor(scale, lower=True)
inv_scale = scipy.linalg.cho_solve((L, lower), eye)
# Cholesky decomposition of inverted scale
C = scipy.linalg.cholesky(inv_scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
invwishart = invwishart_gen()
class invwishart_frozen(multi_rv_frozen):
def __init__(self, df, scale, seed=None):
"""
Create a frozen inverse Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
self._dist = invwishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale
)
# Get the determinant via Cholesky factorization
C, lower = scipy.linalg.cho_factor(self.scale, lower=True)
self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))
# Get the inverse using the Cholesky factorization
eye = np.eye(self.dim)
self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)
# Get the Cholesky factorization of the inverse scale
self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out)
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
# Set frozen generator docstrings from corresponding docstrings in
# inverse Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:
method = invwishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
| apache-2.0 |
willthames/ansible | lib/ansible/modules/inventory/group_by.py | 2 | 1901 | # -*- mode: python -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: group_by
short_description: Create Ansible groups based on facts
description:
- Use facts to create ad-hoc groups that can be used later in a playbook.
- This module is also supported for Windows targets.
version_added: "0.9"
options:
key:
description:
- The variables whose values will be used as groups
required: true
parents:
description:
- The list of the parent groups
required: false
default: "all"
version_added: "2.4"
author: "Jeroen Hoekx (@jhoekx)"
notes:
- Spaces in group names are converted to dashes '-'.
- This module is also supported for Windows targets.
'''
EXAMPLES = '''
# Create groups based on the machine architecture
- group_by:
key: machine_{{ ansible_machine }}
# Create groups like 'kvm-host'
- group_by:
key: virt_{{ ansible_virtualization_type }}_{{ ansible_virtualization_role }}
# Create nested groups
- group_by:
key: el{{ ansible_distribution_major_version }}-{{ ansible_architecture }}
parents:
- el{{ ansible_distribution_major_version }}
'''
| gpl-3.0 |
harshaneelhg/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
Jgarcia-IAS/localizacion | openerp/addons-extra/report_move_voucher/report/__init__.py | 7 | 1449 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
# Copyright (C) OpenERP Venezuela (<http://openerp.com.ve>).
# All Rights Reserved
###############Credits######################################################
# Coded by: María Gabriela Quilarque <gabrielaquilarque97@gmail.com>
# Luis Escobar <luis@vauxoo.com>
# Planified by: Nhomar Hernandez
# Finance by: Vauxoo, C.A. http://vauxoo.com
# Audited by: Humberto Arocha humberto@openerp.com.ve
#############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import account
| agpl-3.0 |
hlzz/dotfiles | graphics/VTK-7.0.0/ThirdParty/Twisted/twisted/test/test_dirdbm.py | 2 | 6029 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for dirdbm module.
"""
import os, shutil, glob
from twisted.trial import unittest
from twisted.persisted import dirdbm
class DirDbmTestCase(unittest.TestCase):
def setUp(self):
self.path = self.mktemp()
self.dbm = dirdbm.open(self.path)
self.items = (('abc', 'foo'), ('/lalal', '\000\001'), ('\000\012', 'baz'))
def testAll(self):
k = "//==".decode("base64")
self.dbm[k] = "a"
self.dbm[k] = "a"
self.assertEqual(self.dbm[k], "a")
def testRebuildInteraction(self):
from twisted.persisted import dirdbm
from twisted.python import rebuild
s = dirdbm.Shelf('dirdbm.rebuild.test')
s['key'] = 'value'
rebuild.rebuild(dirdbm)
# print s['key']
def testDbm(self):
d = self.dbm
# insert keys
keys = []
values = set()
for k, v in self.items:
d[k] = v
keys.append(k)
values.add(v)
keys.sort()
# check they exist
for k, v in self.items:
assert d.has_key(k), "has_key() failed"
assert d[k] == v, "database has wrong value"
# check non existent key
try:
d["XXX"]
except KeyError:
pass
else:
assert 0, "didn't raise KeyError on non-existent key"
# check keys(), values() and items()
dbkeys = list(d.keys())
dbvalues = set(d.values())
dbitems = set(d.items())
dbkeys.sort()
items = set(self.items)
assert keys == dbkeys, ".keys() output didn't match: %s != %s" % (repr(keys), repr(dbkeys))
assert values == dbvalues, ".values() output didn't match: %s != %s" % (repr(values), repr(dbvalues))
assert items == dbitems, "items() didn't match: %s != %s" % (repr(items), repr(dbitems))
copyPath = self.mktemp()
d2 = d.copyTo(copyPath)
copykeys = list(d.keys())
copyvalues = set(d.values())
copyitems = set(d.items())
copykeys.sort()
assert dbkeys == copykeys, ".copyTo().keys() didn't match: %s != %s" % (repr(dbkeys), repr(copykeys))
assert dbvalues == copyvalues, ".copyTo().values() didn't match: %s != %s" % (repr(dbvalues), repr(copyvalues))
assert dbitems == copyitems, ".copyTo().items() didn't match: %s != %s" % (repr(dbkeys), repr(copyitems))
d2.clear()
assert len(d2.keys()) == len(d2.values()) == len(d2.items()) == 0, ".clear() failed"
shutil.rmtree(copyPath)
# delete items
for k, v in self.items:
del d[k]
assert not d.has_key(k), "has_key() even though we deleted it"
assert len(d.keys()) == 0, "database has keys"
assert len(d.values()) == 0, "database has values"
assert len(d.items()) == 0, "database has items"
def testModificationTime(self):
import time
# the mtime value for files comes from a different place than the
# gettimeofday() system call. On linux, gettimeofday() can be
# slightly ahead (due to clock drift which gettimeofday() takes into
# account but which open()/write()/close() do not), and if we are
# close to the edge of the next second, time.time() can give a value
# which is larger than the mtime which results from a subsequent
# write(). I consider this a kernel bug, but it is beyond the scope
# of this test. Thus we keep the range of acceptability to 3 seconds time.
# -warner
self.dbm["k"] = "v"
self.assert_(abs(time.time() - self.dbm.getModificationTime("k")) <= 3)
def testRecovery(self):
"""DirDBM: test recovery from directory after a faked crash"""
k = self.dbm._encode("key1")
f = open(os.path.join(self.path, k + ".rpl"), "wb")
f.write("value")
f.close()
k2 = self.dbm._encode("key2")
f = open(os.path.join(self.path, k2), "wb")
f.write("correct")
f.close()
f = open(os.path.join(self.path, k2 + ".rpl"), "wb")
f.write("wrong")
f.close()
f = open(os.path.join(self.path, "aa.new"), "wb")
f.write("deleted")
f.close()
dbm = dirdbm.DirDBM(self.path)
assert dbm["key1"] == "value"
assert dbm["key2"] == "correct"
assert not glob.glob(os.path.join(self.path, "*.new"))
assert not glob.glob(os.path.join(self.path, "*.rpl"))
def test_nonStringKeys(self):
"""
L{dirdbm.DirDBM} operations only support string keys: other types
should raise a C{AssertionError}. This really ought to be a
C{TypeError}, but it'll stay like this for backward compatibility.
"""
self.assertRaises(AssertionError, self.dbm.__setitem__, 2, "3")
try:
self.assertRaises(AssertionError, self.dbm.__setitem__, "2", 3)
except unittest.FailTest:
# dirdbm.Shelf.__setitem__ supports non-string values
self.assertIsInstance(self.dbm, dirdbm.Shelf)
self.assertRaises(AssertionError, self.dbm.__getitem__, 2)
self.assertRaises(AssertionError, self.dbm.__delitem__, 2)
self.assertRaises(AssertionError, self.dbm.has_key, 2)
self.assertRaises(AssertionError, self.dbm.__contains__, 2)
self.assertRaises(AssertionError, self.dbm.getModificationTime, 2)
class ShelfTestCase(DirDbmTestCase):
def setUp(self):
self.path = self.mktemp()
self.dbm = dirdbm.Shelf(self.path)
self.items = (('abc', 'foo'), ('/lalal', '\000\001'), ('\000\012', 'baz'),
('int', 12), ('float', 12.0), ('tuple', (None, 12)))
testCases = [DirDbmTestCase, ShelfTestCase]
| bsd-3-clause |
veryhappythings/discord-gather | gather/discord_gather.py | 1 | 2123 | import asyncio
import logging
import discord
from .gatherbot import GatherBot
from .organiser import Organiser
from . import commands
logger = logging.getLogger(__name__)
class DiscordGather:
def __init__(self, token):
self.token = token
self.bot = None
self.client = discord.Client()
self.client.on_ready = self.on_ready
asyncio.get_event_loop().call_soon(self._report_loop)
def run(self):
self.client.run(self.token)
async def on_ready(self):
self.bot = GatherBot(self.client.user.name)
self.bot.register_message_handler(self.client.send_message)
self.bot.register_action('^!help$', commands.bot_help)
self.bot.register_action('^!(?:add|join|s)$', commands.add)
self.bot.register_action('^!(?:remove|rem|so)$', commands.remove)
self.bot.register_action('^!(?:game|status)$', commands.game_status)
self.bot.register_action('^!(?:reset)$', commands.reset)
self.client.on_member_update = self.on_member_update
self.client.on_message = self.bot.on_message
logger.info('Logged in as')
logger.info(self.bot.username)
logger.info('------')
async def on_member_update(self, before, after):
# Handle players going offline
if (before.status == discord.Status.online and
after.status == discord.Status.offline):
await self.bot.member_went_offline(before)
# Handle players going AFK
elif (before.status == discord.Status.online and
after.status == discord.Status.idle):
await self.bot.member_went_afk(before)
def _report_loop(self):
if self.bot:
logger.info(report(self.bot.organiser))
asyncio.get_event_loop().call_later(60 * 10, self._report_loop)
def report(organiser: Organiser) -> str:
report = ["Report:"]
for key, queue in organiser.queues.items():
report.append("{}-{}: {} current players - {} games to date".format(
key.server, key, len(queue), organiser.games_count[key]))
return "\n".join(report)
| mit |
jeenalee/servo | components/script/dom/bindings/codegen/parser/tests/test_distinguishability.py | 50 | 12785 | def firstArgType(method):
return method.signatures()[0][1][0].type
def WebIDLTest(parser, harness):
parser.parse("""
dictionary Dict {
};
callback interface Foo {
};
interface Bar {
// Bit of a pain to get things that have dictionary types
void passDict(optional Dict arg);
void passFoo(Foo arg);
void passNullableUnion((object? or DOMString) arg);
void passNullable(Foo? arg);
};
""")
results = parser.finish()
iface = results[2]
harness.ok(iface.isInterface(), "Should have interface")
dictMethod = iface.members[0]
ifaceMethod = iface.members[1]
nullableUnionMethod = iface.members[2]
nullableIfaceMethod = iface.members[3]
dictType = firstArgType(dictMethod)
ifaceType = firstArgType(ifaceMethod)
harness.ok(dictType.isDictionary(), "Should have dictionary type");
harness.ok(ifaceType.isInterface(), "Should have interface type");
harness.ok(ifaceType.isCallbackInterface(), "Should have callback interface type");
harness.ok(not dictType.isDistinguishableFrom(ifaceType),
"Dictionary not distinguishable from callback interface")
harness.ok(not ifaceType.isDistinguishableFrom(dictType),
"Callback interface not distinguishable from dictionary")
nullableUnionType = firstArgType(nullableUnionMethod)
nullableIfaceType = firstArgType(nullableIfaceMethod)
harness.ok(nullableUnionType.isUnion(), "Should have union type");
harness.ok(nullableIfaceType.isInterface(), "Should have interface type");
harness.ok(nullableIfaceType.nullable(), "Should have nullable type");
harness.ok(not nullableUnionType.isDistinguishableFrom(nullableIfaceType),
"Nullable type not distinguishable from union with nullable "
"member type")
harness.ok(not nullableIfaceType.isDistinguishableFrom(nullableUnionType),
"Union with nullable member type not distinguishable from "
"nullable type")
parser = parser.reset()
parser.parse("""
interface TestIface {
void passKid(Kid arg);
void passParent(Parent arg);
void passGrandparent(Grandparent arg);
void passImplemented(Implemented arg);
void passImplementedParent(ImplementedParent arg);
void passUnrelated1(Unrelated1 arg);
void passUnrelated2(Unrelated2 arg);
void passArrayBuffer(ArrayBuffer arg);
void passArrayBuffer(ArrayBufferView arg);
};
interface Kid : Parent {};
interface Parent : Grandparent {};
interface Grandparent {};
interface Implemented : ImplementedParent {};
Parent implements Implemented;
interface ImplementedParent {};
interface Unrelated1 {};
interface Unrelated2 {};
""")
results = parser.finish()
iface = results[0]
harness.ok(iface.isInterface(), "Should have interface")
argTypes = [firstArgType(method) for method in iface.members]
unrelatedTypes = [firstArgType(method) for method in iface.members[-3:]]
for type1 in argTypes:
for type2 in argTypes:
distinguishable = (type1 is not type2 and
(type1 in unrelatedTypes or
type2 in unrelatedTypes))
harness.check(type1.isDistinguishableFrom(type2),
distinguishable,
"Type %s should %sbe distinguishable from type %s" %
(type1, "" if distinguishable else "not ", type2))
harness.check(type2.isDistinguishableFrom(type1),
distinguishable,
"Type %s should %sbe distinguishable from type %s" %
(type2, "" if distinguishable else "not ", type1))
parser = parser.reset()
parser.parse("""
interface Dummy {};
interface TestIface {
void method(long arg1, TestIface arg2);
void method(long arg1, long arg2);
void method(long arg1, Dummy arg2);
void method(DOMString arg1, DOMString arg2, DOMString arg3);
};
""")
results = parser.finish()
harness.check(len(results[1].members), 1,
"Should look like we have one method")
harness.check(len(results[1].members[0].signatures()), 4,
"Should have four signatures")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface Dummy {};
interface TestIface {
void method(long arg1, TestIface arg2);
void method(long arg1, long arg2);
void method(any arg1, Dummy arg2);
void method(DOMString arg1, DOMString arg2, DOMString arg3);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should throw when args before the distinguishing arg are not "
"all the same type")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface Dummy {};
interface TestIface {
void method(long arg1, TestIface arg2);
void method(long arg1, long arg2);
void method(any arg1, DOMString arg2);
void method(DOMString arg1, DOMString arg2, DOMString arg3);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should throw when there is no distinguishing index")
# Now let's test our whole distinguishability table
argTypes = [ "long", "short", "long?", "short?", "boolean",
"boolean?", "DOMString", "ByteString", "Enum", "Enum2",
"Interface", "Interface?",
"AncestorInterface", "UnrelatedInterface",
"ImplementedInterface", "CallbackInterface",
"CallbackInterface?", "CallbackInterface2",
"object", "Callback", "Callback2", "optional Dict",
"optional Dict2", "sequence<long>", "sequence<short>",
"MozMap<object>", "MozMap<Dict>", "MozMap<long>",
"Date", "Date?", "any",
"Promise<any>", "Promise<any>?",
"USVString", "ArrayBuffer", "ArrayBufferView", "SharedArrayBuffer",
"Uint8Array", "Uint16Array" ]
# When we can parse Date and RegExp, we need to add them here.
# Try to categorize things a bit to keep list lengths down
def allBut(list1, list2):
return [a for a in list1 if a not in list2 and
(a != "any" and a != "Promise<any>" and a != "Promise<any>?")]
numerics = [ "long", "short", "long?", "short?" ]
booleans = [ "boolean", "boolean?" ]
primitives = numerics + booleans
nonNumerics = allBut(argTypes, numerics)
nonBooleans = allBut(argTypes, booleans)
strings = [ "DOMString", "ByteString", "Enum", "Enum2", "USVString" ]
nonStrings = allBut(argTypes, strings)
nonObjects = primitives + strings
objects = allBut(argTypes, nonObjects )
bufferSourceTypes = ["ArrayBuffer", "ArrayBufferView", "Uint8Array", "Uint16Array"]
sharedBufferSourceTypes = ["SharedArrayBuffer"]
interfaces = [ "Interface", "Interface?", "AncestorInterface",
"UnrelatedInterface", "ImplementedInterface" ] + bufferSourceTypes + sharedBufferSourceTypes
nullables = ["long?", "short?", "boolean?", "Interface?",
"CallbackInterface?", "optional Dict", "optional Dict2",
"Date?", "any", "Promise<any>?"]
dates = [ "Date", "Date?" ]
sequences = [ "sequence<long>", "sequence<short>" ]
nonUserObjects = nonObjects + interfaces + dates + sequences
otherObjects = allBut(argTypes, nonUserObjects + ["object"])
notRelatedInterfaces = (nonObjects + ["UnrelatedInterface"] +
otherObjects + dates + sequences + bufferSourceTypes + sharedBufferSourceTypes)
mozMaps = [ "MozMap<object>", "MozMap<Dict>", "MozMap<long>" ]
# Build a representation of the distinguishability table as a dict
# of dicts, holding True values where needed, holes elsewhere.
data = dict();
for type in argTypes:
data[type] = dict()
def setDistinguishable(type, types):
for other in types:
data[type][other] = True
setDistinguishable("long", nonNumerics)
setDistinguishable("short", nonNumerics)
setDistinguishable("long?", allBut(nonNumerics, nullables))
setDistinguishable("short?", allBut(nonNumerics, nullables))
setDistinguishable("boolean", nonBooleans)
setDistinguishable("boolean?", allBut(nonBooleans, nullables))
setDistinguishable("DOMString", nonStrings)
setDistinguishable("ByteString", nonStrings)
setDistinguishable("USVString", nonStrings)
setDistinguishable("Enum", nonStrings)
setDistinguishable("Enum2", nonStrings)
setDistinguishable("Interface", notRelatedInterfaces)
setDistinguishable("Interface?", allBut(notRelatedInterfaces, nullables))
setDistinguishable("AncestorInterface", notRelatedInterfaces)
setDistinguishable("UnrelatedInterface",
allBut(argTypes, ["object", "UnrelatedInterface"]))
setDistinguishable("ImplementedInterface", notRelatedInterfaces)
setDistinguishable("CallbackInterface", nonUserObjects)
setDistinguishable("CallbackInterface?", allBut(nonUserObjects, nullables))
setDistinguishable("CallbackInterface2", nonUserObjects)
setDistinguishable("object", nonObjects)
setDistinguishable("Callback", nonUserObjects)
setDistinguishable("Callback2", nonUserObjects)
setDistinguishable("optional Dict", allBut(nonUserObjects, nullables))
setDistinguishable("optional Dict2", allBut(nonUserObjects, nullables))
setDistinguishable("sequence<long>",
allBut(argTypes, sequences + ["object"]))
setDistinguishable("sequence<short>",
allBut(argTypes, sequences + ["object"]))
setDistinguishable("MozMap<object>", nonUserObjects)
setDistinguishable("MozMap<Dict>", nonUserObjects)
setDistinguishable("MozMap<long>", nonUserObjects)
setDistinguishable("Date", allBut(argTypes, dates + ["object"]))
setDistinguishable("Date?", allBut(argTypes, dates + nullables + ["object"]))
setDistinguishable("any", [])
setDistinguishable("Promise<any>", [])
setDistinguishable("Promise<any>?", [])
setDistinguishable("ArrayBuffer", allBut(argTypes, ["ArrayBuffer", "object"]))
setDistinguishable("ArrayBufferView", allBut(argTypes, ["ArrayBufferView", "Uint8Array", "Uint16Array", "object"]))
setDistinguishable("Uint8Array", allBut(argTypes, ["ArrayBufferView", "Uint8Array", "object"]))
setDistinguishable("Uint16Array", allBut(argTypes, ["ArrayBufferView", "Uint16Array", "object"]))
setDistinguishable("SharedArrayBuffer", allBut(argTypes, ["SharedArrayBuffer", "object"]))
def areDistinguishable(type1, type2):
return data[type1].get(type2, False)
def checkDistinguishability(parser, type1, type2):
idlTemplate = """
enum Enum { "a", "b" };
enum Enum2 { "c", "d" };
interface Interface : AncestorInterface {};
interface AncestorInterface {};
interface UnrelatedInterface {};
interface ImplementedInterface {};
Interface implements ImplementedInterface;
callback interface CallbackInterface {};
callback interface CallbackInterface2 {};
callback Callback = any();
callback Callback2 = long(short arg);
dictionary Dict {};
dictionary Dict2 {};
interface _Promise {};
interface TestInterface {%s
};
"""
methodTemplate = """
void myMethod(%s arg);"""
methods = (methodTemplate % type1) + (methodTemplate % type2)
idl = idlTemplate % methods
parser = parser.reset()
threw = False
try:
parser.parse(idl)
results = parser.finish()
except:
threw = True
if areDistinguishable(type1, type2):
harness.ok(not threw,
"Should not throw for '%s' and '%s' because they are distinguishable" % (type1, type2))
else:
harness.ok(threw,
"Should throw for '%s' and '%s' because they are not distinguishable" % (type1, type2))
# Enumerate over everything in both orders, since order matters in
# terms of our implementation of distinguishability checks
for type1 in argTypes:
for type2 in argTypes:
checkDistinguishability(parser, type1, type2)
| mpl-2.0 |
catapult-project/catapult-csm | third_party/google-endpoints/enum/__init__.py | 105 | 31054 | """Python Enumerations"""
import sys as _sys
__all__ = ['Enum', 'IntEnum', 'unique']
version = 1, 1, 6
pyver = float('%s.%s' % _sys.version_info[:2])
try:
any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
from collections import OrderedDict
except ImportError:
OrderedDict = None
try:
basestring
except NameError:
# In Python 2 basestring is the ancestor of both str and unicode
# in Python 3 it's just str, but was missing in 3.1
basestring = str
try:
unicode
except NameError:
# In Python 3 unicode no longer exists (it's just str)
unicode = str
class _RouteClassAttributeToGetattr(object):
"""Route attribute access on a class to __getattr__.
This is a descriptor, used to define attributes that act differently when
accessed through an instance and through a class. Instance access remains
normal, but access to an attribute through a class will be routed to the
class's __getattr__ method; this is done by raising AttributeError.
"""
def __init__(self, fget=None):
self.fget = fget
def __get__(self, instance, ownerclass=None):
if instance is None:
raise AttributeError()
return self.fget(instance)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
def _is_descriptor(obj):
"""Returns True if obj is a descriptor, False otherwise."""
return (
hasattr(obj, '__get__') or
hasattr(obj, '__set__') or
hasattr(obj, '__delete__'))
def _is_dunder(name):
"""Returns True if a __dunder__ name, False otherwise."""
return (name[:2] == name[-2:] == '__' and
name[2:3] != '_' and
name[-3:-2] != '_' and
len(name) > 4)
def _is_sunder(name):
"""Returns True if a _sunder_ name, False otherwise."""
return (name[0] == name[-1] == '_' and
name[1:2] != '_' and
name[-2:-1] != '_' and
len(name) > 2)
def _make_class_unpicklable(cls):
"""Make the given class un-picklable."""
def _break_on_call_reduce(self, protocol=None):
raise TypeError('%r cannot be pickled' % self)
cls.__reduce_ex__ = _break_on_call_reduce
cls.__module__ = '<unknown>'
class _EnumDict(dict):
"""Track enum member order and ensure member names are not reused.
EnumMeta will use the names found in self._member_names as the
enumeration member names.
"""
def __init__(self):
super(_EnumDict, self).__init__()
self._member_names = []
def __setitem__(self, key, value):
"""Changes anything not dundered or not a descriptor.
If a descriptor is added with the same name as an enum member, the name
is removed from _member_names (this may leave a hole in the numerical
sequence of values).
If an enum member name is used twice, an error is raised; duplicate
values are not checked for.
Single underscore (sunder) names are reserved.
Note: in 3.x __order__ is simply discarded as a not necessary piece
leftover from 2.x
"""
if pyver >= 3.0 and key in ('_order_', '__order__'):
return
elif key == '__order__':
key = '_order_'
if _is_sunder(key):
if key != '_order_':
raise ValueError('_names_ are reserved for future Enum use')
elif _is_dunder(key):
pass
elif key in self._member_names:
# descriptor overwriting an enum?
raise TypeError('Attempted to reuse key: %r' % key)
elif not _is_descriptor(value):
if key in self:
# enum overwriting a descriptor?
raise TypeError('Key already defined as: %r' % self[key])
self._member_names.append(key)
super(_EnumDict, self).__setitem__(key, value)
# Dummy value for Enum as EnumMeta explicity checks for it, but of course until
# EnumMeta finishes running the first time the Enum class doesn't exist. This
# is also why there are checks in EnumMeta like `if Enum is not None`
Enum = None
class EnumMeta(type):
"""Metaclass for Enum"""
@classmethod
def __prepare__(metacls, cls, bases):
return _EnumDict()
def __new__(metacls, cls, bases, classdict):
# an Enum class is final once enumeration items have been defined; it
# cannot be mixed with other types (int, float, etc.) if it has an
# inherited __new__ unless a new __new__ is defined (or the resulting
# class will fail).
if type(classdict) is dict:
original_dict = classdict
classdict = _EnumDict()
for k, v in original_dict.items():
classdict[k] = v
member_type, first_enum = metacls._get_mixins_(bases)
__new__, save_new, use_args = metacls._find_new_(classdict, member_type,
first_enum)
# save enum items into separate mapping so they don't get baked into
# the new class
members = dict((k, classdict[k]) for k in classdict._member_names)
for name in classdict._member_names:
del classdict[name]
# py2 support for definition order
_order_ = classdict.get('_order_')
if _order_ is None:
if pyver < 3.0:
try:
_order_ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])]
except TypeError:
_order_ = [name for name in sorted(members.keys())]
else:
_order_ = classdict._member_names
else:
del classdict['_order_']
if pyver < 3.0:
_order_ = _order_.replace(',', ' ').split()
aliases = [name for name in members if name not in _order_]
_order_ += aliases
# check for illegal enum names (any others?)
invalid_names = set(members) & set(['mro'])
if invalid_names:
raise ValueError('Invalid enum member name(s): %s' % (
', '.join(invalid_names), ))
# save attributes from super classes so we know if we can take
# the shortcut of storing members in the class dict
base_attributes = set([a for b in bases for a in b.__dict__])
# create our new Enum type
enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases, classdict)
enum_class._member_names_ = [] # names in random order
if OrderedDict is not None:
enum_class._member_map_ = OrderedDict()
else:
enum_class._member_map_ = {} # name->value map
enum_class._member_type_ = member_type
# Reverse value->name map for hashable values.
enum_class._value2member_map_ = {}
# instantiate them, checking for duplicates as we go
# we instantiate first instead of checking for duplicates first in case
# a custom __new__ is doing something funky with the values -- such as
# auto-numbering ;)
if __new__ is None:
__new__ = enum_class.__new__
for member_name in _order_:
value = members[member_name]
if not isinstance(value, tuple):
args = (value, )
else:
args = value
if member_type is tuple: # special case for tuple enums
args = (args, ) # wrap it one more time
if not use_args or not args:
enum_member = __new__(enum_class)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = value
else:
enum_member = __new__(enum_class, *args)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = member_type(*args)
value = enum_member._value_
enum_member._name_ = member_name
enum_member.__objclass__ = enum_class
enum_member.__init__(*args)
# If another member with the same value was already defined, the
# new member becomes an alias to the existing one.
for name, canonical_member in enum_class._member_map_.items():
if canonical_member.value == enum_member._value_:
enum_member = canonical_member
break
else:
# Aliases don't appear in member names (only in __members__).
enum_class._member_names_.append(member_name)
# performance boost for any member that would not shadow
# a DynamicClassAttribute (aka _RouteClassAttributeToGetattr)
if member_name not in base_attributes:
setattr(enum_class, member_name, enum_member)
# now add to _member_map_
enum_class._member_map_[member_name] = enum_member
try:
# This may fail if value is not hashable. We can't add the value
# to the map, and by-value lookups for this value will be
# linear.
enum_class._value2member_map_[value] = enum_member
except TypeError:
pass
# If a custom type is mixed into the Enum, and it does not know how
# to pickle itself, pickle.dumps will succeed but pickle.loads will
# fail. Rather than have the error show up later and possibly far
# from the source, sabotage the pickle protocol for this class so
# that pickle.dumps also fails.
#
# However, if the new class implements its own __reduce_ex__, do not
# sabotage -- it's on them to make sure it works correctly. We use
# __reduce_ex__ instead of any of the others as it is preferred by
# pickle over __reduce__, and it handles all pickle protocols.
unpicklable = False
if '__reduce_ex__' not in classdict:
if member_type is not object:
methods = ('__getnewargs_ex__', '__getnewargs__',
'__reduce_ex__', '__reduce__')
if not any(m in member_type.__dict__ for m in methods):
_make_class_unpicklable(enum_class)
unpicklable = True
# double check that repr and friends are not the mixin's or various
# things break (such as pickle)
for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
class_method = getattr(enum_class, name)
obj_method = getattr(member_type, name, None)
enum_method = getattr(first_enum, name, None)
if name not in classdict and class_method is not enum_method:
if name == '__reduce_ex__' and unpicklable:
continue
setattr(enum_class, name, enum_method)
# method resolution and int's are not playing nice
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
if issubclass(enum_class, int):
setattr(enum_class, '__cmp__', getattr(int, '__cmp__'))
elif pyver < 3.0:
if issubclass(enum_class, int):
for method in (
'__le__',
'__lt__',
'__gt__',
'__ge__',
'__eq__',
'__ne__',
'__hash__',
):
setattr(enum_class, method, getattr(int, method))
# replace any other __new__ with our own (as long as Enum is not None,
# anyway) -- again, this is to support pickle
if Enum is not None:
# if the user defined their own __new__, save it before it gets
# clobbered in case they subclass later
if save_new:
setattr(enum_class, '__member_new__', enum_class.__dict__['__new__'])
setattr(enum_class, '__new__', Enum.__dict__['__new__'])
return enum_class
def __bool__(cls):
"""
classes/types should always be True.
"""
return True
def __call__(cls, value, names=None, module=None, type=None, start=1):
"""Either returns an existing member, or creates a new enum class.
This method is used both when an enum class is given a value to match
to an enumeration member (i.e. Color(3)) and for the functional API
(i.e. Color = Enum('Color', names='red green blue')).
When used for the functional API: `module`, if set, will be stored in
the new class' __module__ attribute; `type`, if set, will be mixed in
as the first base class.
Note: if `module` is not set this routine will attempt to discover the
calling module by walking the frame stack; if this is unsuccessful
the resulting class will not be pickleable.
"""
if names is None: # simple value lookup
return cls.__new__(cls, value)
# otherwise, functional API: we're creating a new Enum type
return cls._create_(value, names, module=module, type=type, start=start)
def __contains__(cls, member):
return isinstance(member, cls) and member.name in cls._member_map_
def __delattr__(cls, attr):
# nicer error message when someone tries to delete an attribute
# (see issue19025).
if attr in cls._member_map_:
raise AttributeError(
"%s: cannot delete Enum member." % cls.__name__)
super(EnumMeta, cls).__delattr__(attr)
def __dir__(self):
return (['__class__', '__doc__', '__members__', '__module__'] +
self._member_names_)
@property
def __members__(cls):
"""Returns a mapping of member name->value.
This mapping lists all enum members, including aliases. Note that this
is a copy of the internal mapping.
"""
return cls._member_map_.copy()
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
if _is_dunder(name):
raise AttributeError(name)
try:
return cls._member_map_[name]
except KeyError:
raise AttributeError(name)
def __getitem__(cls, name):
return cls._member_map_[name]
def __iter__(cls):
return (cls._member_map_[name] for name in cls._member_names_)
def __reversed__(cls):
return (cls._member_map_[name] for name in reversed(cls._member_names_))
def __len__(cls):
return len(cls._member_names_)
__nonzero__ = __bool__
def __repr__(cls):
return "<enum %r>" % cls.__name__
def __setattr__(cls, name, value):
"""Block attempts to reassign Enum members.
A simple assignment to the class namespace only changes one of the
several possible ways to get an Enum member from the Enum class,
resulting in an inconsistent Enumeration.
"""
member_map = cls.__dict__.get('_member_map_', {})
if name in member_map:
raise AttributeError('Cannot reassign members.')
super(EnumMeta, cls).__setattr__(name, value)
def _create_(cls, class_name, names=None, module=None, type=None, start=1):
"""Convenience method to create a new Enum class.
`names` can be:
* A string containing member names, separated either with spaces or
commas. Values are auto-numbered from 1.
* An iterable of member names. Values are auto-numbered from 1.
* An iterable of (member name, value) pairs.
* A mapping of member name -> value.
"""
if pyver < 3.0:
# if class_name is unicode, attempt a conversion to ASCII
if isinstance(class_name, unicode):
try:
class_name = class_name.encode('ascii')
except UnicodeEncodeError:
raise TypeError('%r is not representable in ASCII' % class_name)
metacls = cls.__class__
if type is None:
bases = (cls, )
else:
bases = (type, cls)
classdict = metacls.__prepare__(class_name, bases)
_order_ = []
# special processing needed for names?
if isinstance(names, basestring):
names = names.replace(',', ' ').split()
if isinstance(names, (tuple, list)) and isinstance(names[0], basestring):
names = [(e, i+start) for (i, e) in enumerate(names)]
# Here, names is either an iterable of (name, value) or a mapping.
item = None # in case names is empty
for item in names:
if isinstance(item, basestring):
member_name, member_value = item, names[item]
else:
member_name, member_value = item
classdict[member_name] = member_value
_order_.append(member_name)
# only set _order_ in classdict if name/value was not from a mapping
if not isinstance(item, basestring):
classdict['_order_'] = ' '.join(_order_)
enum_class = metacls.__new__(metacls, class_name, bases, classdict)
# TODO: replace the frame hack if a blessed way to know the calling
# module is ever developed
if module is None:
try:
module = _sys._getframe(2).f_globals['__name__']
except (AttributeError, ValueError):
pass
if module is None:
_make_class_unpicklable(enum_class)
else:
enum_class.__module__ = module
return enum_class
@staticmethod
def _get_mixins_(bases):
"""Returns the type for creating enum members, and the first inherited
enum class.
bases: the tuple of bases that was given to __new__
"""
if not bases or Enum is None:
return object, Enum
# double check that we are not subclassing a class with existing
# enumeration members; while we're at it, see if any other data
# type has been mixed in so we can use the correct __new__
member_type = first_enum = None
for base in bases:
if (base is not Enum and
issubclass(base, Enum) and
base._member_names_):
raise TypeError("Cannot extend enumerations")
# base is now the last base in bases
if not issubclass(base, Enum):
raise TypeError("new enumerations must be created as "
"`ClassName([mixin_type,] enum_type)`")
# get correct mix-in type (either mix-in type of Enum subclass, or
# first base if last base is Enum)
if not issubclass(bases[0], Enum):
member_type = bases[0] # first data type
first_enum = bases[-1] # enum type
else:
for base in bases[0].__mro__:
# most common: (IntEnum, int, Enum, object)
# possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
# <class 'int'>, <Enum 'Enum'>,
# <class 'object'>)
if issubclass(base, Enum):
if first_enum is None:
first_enum = base
else:
if member_type is None:
member_type = base
return member_type, first_enum
if pyver < 3.0:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
if __new__:
return None, True, True # __new__, save_new, use_args
N__new__ = getattr(None, '__new__')
O__new__ = getattr(object, '__new__')
if Enum is None:
E__new__ = N__new__
else:
E__new__ = Enum.__dict__['__new__']
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
try:
target = possible.__dict__[method]
except (AttributeError, KeyError):
target = getattr(possible, method, None)
if target not in [
None,
N__new__,
O__new__,
E__new__,
]:
if method == '__member_new__':
classdict['__new__'] = target
return None, False, True
if isinstance(target, staticmethod):
target = target.__get__(member_type)
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, False, use_args
else:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
# should __new__ be saved as __member_new__ later?
save_new = __new__ is not None
if __new__ is None:
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
target = getattr(possible, method, None)
if target not in (
None,
None.__new__,
object.__new__,
Enum.__new__,
):
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, save_new, use_args
########################################################
# In order to support Python 2 and 3 with a single
# codebase we have to create the Enum methods separately
# and then use the `type(name, bases, dict)` method to
# create the class.
########################################################
temp_enum_dict = {}
temp_enum_dict['__doc__'] = "Generic enumeration.\n\n Derive from this class to define new enumerations.\n\n"
def __new__(cls, value):
# all enum instances are actually created during class construction
# without calling this method; this method is called by the metaclass'
# __call__ (i.e. Color(3) ), and by pickle
if type(value) is cls:
# For lookups like Color(Color.red)
value = value.value
#return value
# by-value search for a matching enum member
# see if it's in the reverse mapping (for hashable values)
try:
if value in cls._value2member_map_:
return cls._value2member_map_[value]
except TypeError:
# not there, now do long search -- O(n) behavior
for member in cls._member_map_.values():
if member.value == value:
return member
raise ValueError("%s is not a valid %s" % (value, cls.__name__))
temp_enum_dict['__new__'] = __new__
del __new__
def __repr__(self):
return "<%s.%s: %r>" % (
self.__class__.__name__, self._name_, self._value_)
temp_enum_dict['__repr__'] = __repr__
del __repr__
def __str__(self):
return "%s.%s" % (self.__class__.__name__, self._name_)
temp_enum_dict['__str__'] = __str__
del __str__
if pyver >= 3.0:
def __dir__(self):
added_behavior = [
m
for cls in self.__class__.mro()
for m in cls.__dict__
if m[0] != '_' and m not in self._member_map_
]
return (['__class__', '__doc__', '__module__', ] + added_behavior)
temp_enum_dict['__dir__'] = __dir__
del __dir__
def __format__(self, format_spec):
# mixed-in Enums should use the mixed-in type's __format__, otherwise
# we can get strange results with the Enum name showing up instead of
# the value
# pure Enum branch
if self._member_type_ is object:
cls = str
val = str(self)
# mix-in branch
else:
cls = self._member_type_
val = self.value
return cls.__format__(val, format_spec)
temp_enum_dict['__format__'] = __format__
del __format__
####################################
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
def __cmp__(self, other):
if type(other) is self.__class__:
if self is other:
return 0
return -1
return NotImplemented
raise TypeError("unorderable types: %s() and %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__cmp__'] = __cmp__
del __cmp__
else:
def __le__(self, other):
raise TypeError("unorderable types: %s() <= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__le__'] = __le__
del __le__
def __lt__(self, other):
raise TypeError("unorderable types: %s() < %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__lt__'] = __lt__
del __lt__
def __ge__(self, other):
raise TypeError("unorderable types: %s() >= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__ge__'] = __ge__
del __ge__
def __gt__(self, other):
raise TypeError("unorderable types: %s() > %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__gt__'] = __gt__
del __gt__
def __eq__(self, other):
if type(other) is self.__class__:
return self is other
return NotImplemented
temp_enum_dict['__eq__'] = __eq__
del __eq__
def __ne__(self, other):
if type(other) is self.__class__:
return self is not other
return NotImplemented
temp_enum_dict['__ne__'] = __ne__
del __ne__
def __hash__(self):
return hash(self._name_)
temp_enum_dict['__hash__'] = __hash__
del __hash__
def __reduce_ex__(self, proto):
return self.__class__, (self._value_, )
temp_enum_dict['__reduce_ex__'] = __reduce_ex__
del __reduce_ex__
# _RouteClassAttributeToGetattr is used to provide access to the `name`
# and `value` properties of enum members while keeping some measure of
# protection from modification, while still allowing for an enumeration
# to have members named `name` and `value`. This works because enumeration
# members are not set directly on the enum class -- __getattr__ is
# used to look them up.
@_RouteClassAttributeToGetattr
def name(self):
return self._name_
temp_enum_dict['name'] = name
del name
@_RouteClassAttributeToGetattr
def value(self):
return self._value_
temp_enum_dict['value'] = value
del value
@classmethod
def _convert(cls, name, module, filter, source=None):
"""
Create a new Enum subclass that replaces a collection of global constants
"""
# convert all constants from source (or module) that pass filter() to
# a new Enum called name, and export the enum and its members back to
# module;
# also, replace the __reduce_ex__ method so unpickling works in
# previous Python versions
module_globals = vars(_sys.modules[module])
if source:
source = vars(source)
else:
source = module_globals
members = dict((name, value) for name, value in source.items() if filter(name))
cls = cls(name, members, module=module)
cls.__reduce_ex__ = _reduce_ex_by_name
module_globals.update(cls.__members__)
module_globals[name] = cls
return cls
temp_enum_dict['_convert'] = _convert
del _convert
Enum = EnumMeta('Enum', (object, ), temp_enum_dict)
del temp_enum_dict
# Enum has now been created
###########################
class IntEnum(int, Enum):
"""Enum where members are also (and must be) ints"""
def _reduce_ex_by_name(self, proto):
return self.name
def unique(enumeration):
"""Class decorator that ensures only unique members exist in an enumeration."""
duplicates = []
for name, member in enumeration.__members__.items():
if name != member.name:
duplicates.append((name, member.name))
if duplicates:
duplicate_names = ', '.join(
["%s -> %s" % (alias, name) for (alias, name) in duplicates]
)
raise ValueError('duplicate names found in %r: %s' %
(enumeration, duplicate_names)
)
return enumeration
| bsd-3-clause |
acq4/acq4 | acq4/devices/Sensapex/sensapex.py | 3 | 9688 | # -*- coding: utf-8 -*-
from __future__ import print_function
import time
import numpy as np
from acq4.util import Qt
from ..Stage import Stage, MoveFuture, StageInterface
from acq4.drivers.sensapex import SensapexDevice, UMP, UMPError
from acq4.util.Mutex import Mutex
from acq4.util.Thread import Thread
from acq4.pyqtgraph import debug, ptime, SpinBox, Transform3D, solve3DTransform
class Sensapex(Stage):
"""
A Sensapex manipulator.
"""
devices = {}
def __init__(self, man, config, name):
self.devid = config.get('deviceId')
self.scale = config.pop('scale', (1e-9, 1e-9, 1e-9))
self.xPitch = config.pop('xPitch', 0) # angle of x-axis. 0=parallel to xy plane, 90=pointing downward
# sensapex manipulators do not have orthogonal axes, so we set up a 3D transform to compensate:
a = self.xPitch * np.pi / 180.
s = self.scale
pts1 = np.array([ # unit vector in sensapex space
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
])
pts2 = np.array([ # corresponding vector in global space
[0, 0, 0],
[s[0] * np.cos(a), 0, -s[0] * np.sin(a)],
[0, s[1], 0],
[0, 0, s[2]],
])
tr = solve3DTransform(pts1, pts2)
tr[3,3] = 1
self._internalTransform = Transform3D(tr)
self._internalInvTransform = self._internalTransform.inverted()[0]
all_devs = UMP.get_ump().list_devices()
if self.devid not in all_devs:
raise Exception("Invalid sensapex device ID %s. Options are: %r" % (self.devid, all_devs))
Stage.__init__(self, man, config, name)
# create handle to this manipulator
# note: n_axes is used in cases where the device is not capable of answering this on its own
self.dev = SensapexDevice(self.devid, callback=self._positionChanged, n_axes=config.get('nAxes'))
# force cache update for this device.
# This should also verify that we have a valid device ID
self.dev.get_pos()
self._lastMove = None
man.sigAbortAll.connect(self.stop)
# clear cached position for this device and re-read to generate an initial position update
self._lastPos = None
self.getPosition(refresh=True)
# TODO: set any extra parameters specified in the config
Sensapex.devices[self.devid] = self
def capabilities(self):
"""Return a structure describing the capabilities of this device"""
if 'capabilities' in self.config:
return self.config['capabilities']
else:
return {
'getPos': (True, True, True),
'setPos': (True, True, True),
'limits': (False, False, False),
}
def stop(self):
"""Stop the manipulator immediately.
"""
with self.lock:
self.dev.stop()
if self._lastMove is not None:
self._lastMove._stopped()
self._lastMove = None
def _getPosition(self):
# Called by superclass when user requests position refresh
with self.lock:
# using timeout=0 firces read from cache (the monitor thread ensures
# these values are up to date)
pos = self._internalTransform.map(self.dev.get_pos(timeout=0)[:3])
if self._lastPos is None:
dif = 1
else:
dif = ((np.array(pos) - np.array(self._lastPos))**2).sum()**0.5
if dif > 0.1e-6:
self._lastPos = pos
emit = True
else:
emit = False
if emit:
# don't emit signal while locked
self.posChanged(pos)
return pos
def _positionChanged(self, dev, newPos, oldPos):
# called by driver poller when position has changed
self._getPosition()
def targetPosition(self):
with self.lock:
if self._lastMove is None or self._lastMove.isDone():
return self.getPosition()
else:
return self._lastMove.targetPos
def quit(self):
Sensapex.devices.pop(self.devid)
if len(Sensapex.devices) == 0:
UMP.get_ump().poller.stop()
Stage.quit(self)
def _move(self, abs, rel, speed, linear):
with self.lock:
if self._lastMove is not None and not self._lastMove.isDone():
self.stop()
pos = self._toAbsolutePosition(abs, rel)
speed = self._interpretSpeed(speed)
self._lastMove = SensapexMoveFuture(self, pos, speed)
return self._lastMove
#def deviceInterface(self, win):
#return SensapexGUI(self, win)
class MonitorThread(Thread):
"""Thread to poll for all Sensapex manipulator position changes.
"""
def __init__(self):
self.lock = Mutex(recursive=True)
self.stopped = False
Thread.__init__(self)
def start(self):
self.stopped = False
Thread.start(self)
def stop(self):
with self.lock:
self.stopped = True
def run(self):
ump = UMP.get_ump()
devices = Sensapex.devices
while True:
try:
with self.lock:
if self.stopped:
break
# read all updates waiting in queue
devids = ump.recv_all()
if len(devids) == 0:
# no packets in queue; just wait for the next one.
try:
devids = [ump.recv()]
except UMPError as err:
if err.errno == -3:
# ignore timeouts
continue
for devid in devids:
dev = devices.get(devid, None)
if dev is not None:
# received an update packet for this device; ask it to update its position
dev._getPosition()
time.sleep(0.03) # rate-limit updates to 30 Hz
except:
debug.printExc('Error in Sensapex monitor thread:')
time.sleep(1)
class SensapexMoveFuture(MoveFuture):
"""Provides access to a move-in-progress on a Sensapex manipulator.
"""
def __init__(self, dev, pos, speed):
MoveFuture.__init__(self, dev, pos, speed)
self._interrupted = False
self._errorMsg = None
self._finished = False
#pos = np.array(pos) / np.array(self.dev.scale)
pos = self.dev._internalInvTransform.map(pos)
self.dev.dev.goto_pos(pos, speed * 1e6)
def wasInterrupted(self):
"""Return True if the move was interrupted before completing.
"""
return self._interrupted
def isDone(self):
"""Return True if the move is complete.
"""
return self._getStatus() != 0
def _getStatus(self):
# check status of move unless we already know it is complete.
# 0: still moving; 1: finished successfully; -1: finished unsuccessfully
if self._finished:
if self._interrupted:
return -1
else:
return 1
busy = self.dev.dev.is_busy()
if busy:
# Still moving
return 0
# did we reach target?
pos = self.dev._getPosition()
dif = ((np.array(pos) - np.array(self.targetPos))**2).sum()**0.5
if dif < 2.5e-6:
# reached target
self._finished = True
return 1
else:
# missed
self._finished = True
self._interrupted = True
self._errorMsg = "Move did not complete (target=%s, position=%s, dif=%s)." % (self.targetPos, pos, dif)
return -1
def _stopped(self):
# Called when the manipulator is stopped, possibly interrupting this move.
status = self._getStatus()
if status == 1:
# finished; ignore stop
return
elif status == -1:
self._errorMsg = "Move was interrupted before completion."
elif status == 0:
# not actually stopped! This should not happen.
raise RuntimeError("Interrupted move but manipulator is still running!")
else:
raise Exception("Unknown status: %s" % status)
def errorMessage(self):
return self._errorMsg
#class SensapexGUI(StageInterface):
#def __init__(self, dev, win):
#StageInterface.__init__(self, dev, win)
## Insert Sensapex-specific controls into GUI
#self.zeroBtn = Qt.QPushButton('Zero position')
#self.layout.addWidget(self.zeroBtn, self.nextRow, 0, 1, 2)
#self.nextRow += 1
#self.psGroup = Qt.QGroupBox('Rotary Controller')
#self.layout.addWidget(self.psGroup, self.nextRow, 0, 1, 2)
#self.nextRow += 1
#self.psLayout = Qt.QGridLayout()
#self.psGroup.setLayout(self.psLayout)
#self.speedLabel = Qt.QLabel('Speed')
#self.speedSpin = SpinBox(value=self.dev.userSpeed, suffix='m/turn', siPrefix=True, dec=True, limits=[1e-6, 10e-3])
#self.psLayout.addWidget(self.speedLabel, 0, 0)
#self.psLayout.addWidget(self.speedSpin, 0, 1)
#self.zeroBtn.clicked.connect(self.dev.dev.zeroPosition)
#self.speedSpin.valueChanged.connect(lambda v: self.dev.setDefaultSpeed(v))
| mit |
immanetize/nikola | nikola/plugins/compile/markdown/mdx_podcast.py | 2 | 3417 | # -*- coding: utf-8 -*-
#
# Copyright © 2013-2015 Michael Rabbitt, Roberto Alsina and others.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Inspired by "[Python] reStructuredText GitHub Podcast directive"
# (https://gist.github.com/brianhsu/1407759), public domain by Brian Hsu
from __future__ import print_function, unicode_literals
'''
Extension to Python Markdown for Embedded Audio
Basic Example:
>>> import markdown
>>> text = """[podcast]http://archive.org/download/Rebeldes_Stereotipos/rs20120609_1.mp3[/podcast]"""
>>> html = markdown.markdown(text, [PodcastExtension()])
>>> print(html)
<p><audio src="http://archive.org/download/Rebeldes_Stereotipos/rs20120609_1.mp3"></audio></p>
'''
from nikola.plugin_categories import MarkdownExtension
try:
from markdown.extensions import Extension
from markdown.inlinepatterns import Pattern
from markdown.util import etree
except ImportError:
# No need to catch this, if you try to use this without Markdown,
# the markdown compiler will fail first
Pattern = Extension = object
PODCAST_RE = r'\[podcast\](?P<url>.+)\[/podcast\]'
class PodcastPattern(Pattern):
""" InlinePattern for footnote markers in a document's body text. """
def __init__(self, pattern, configs):
Pattern.__init__(self, pattern)
def handleMatch(self, m):
url = m.group('url').strip()
audio_elem = etree.Element('audio')
audio_elem.set('controls', '')
source_elem = etree.SubElement(audio_elem, 'source')
source_elem.set('src', url)
source_elem.set('type', 'audio/mpeg')
return audio_elem
class PodcastExtension(MarkdownExtension, Extension):
def __init__(self, configs={}):
# set extension defaults
self.config = {}
# Override defaults with user settings
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
podcast_md_pattern = PodcastPattern(PODCAST_RE, self.getConfigs())
podcast_md_pattern.md = md
md.inlinePatterns.add('podcast', podcast_md_pattern, "<not_strong")
md.registerExtension(self)
def makeExtension(configs=None):
return PodcastExtension(configs)
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=(doctest.NORMALIZE_WHITESPACE +
doctest.REPORT_NDIFF))
| mit |
nstopkimsk/pinpoint | web/src/main/webapp/components/bootstrap/test-infra/s3_cache.py | 1700 | 3523 | #!/usr/bin/env python2.7
from __future__ import absolute_import, unicode_literals, print_function, division
from sys import argv
from os import environ, stat, remove as _delete_file
from os.path import isfile, dirname, basename, abspath
from hashlib import sha256
from subprocess import check_call as run
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
NEED_TO_UPLOAD_MARKER = '.need-to-upload'
BYTES_PER_MB = 1024 * 1024
try:
BUCKET_NAME = environ['TWBS_S3_BUCKET']
except KeyError:
raise SystemExit("TWBS_S3_BUCKET environment variable not set!")
def _sha256_of_file(filename):
hasher = sha256()
with open(filename, 'rb') as input_file:
hasher.update(input_file.read())
file_hash = hasher.hexdigest()
print('sha256({}) = {}'.format(filename, file_hash))
return file_hash
def _delete_file_quietly(filename):
try:
_delete_file(filename)
except (OSError, IOError):
pass
def _tarball_size(directory):
kib = stat(_tarball_filename_for(directory)).st_size // BYTES_PER_MB
return "{} MiB".format(kib)
def _tarball_filename_for(directory):
return abspath('./{}.tar.gz'.format(basename(directory)))
def _create_tarball(directory):
print("Creating tarball of {}...".format(directory))
run(['tar', '-czf', _tarball_filename_for(directory), '-C', dirname(directory), basename(directory)])
def _extract_tarball(directory):
print("Extracting tarball of {}...".format(directory))
run(['tar', '-xzf', _tarball_filename_for(directory), '-C', dirname(directory)])
def download(directory):
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
try:
print("Downloading {} tarball from S3...".format(friendly_name))
key.get_contents_to_filename(_tarball_filename_for(directory))
except S3ResponseError as err:
open(NEED_TO_UPLOAD_MARKER, 'a').close()
print(err)
raise SystemExit("Cached {} download failed!".format(friendly_name))
print("Downloaded {}.".format(_tarball_size(directory)))
_extract_tarball(directory)
print("{} successfully installed from cache.".format(friendly_name))
def upload(directory):
_create_tarball(directory)
print("Uploading {} tarball to S3... ({})".format(friendly_name, _tarball_size(directory)))
key.set_contents_from_filename(_tarball_filename_for(directory))
print("{} cache successfully updated.".format(friendly_name))
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
if __name__ == '__main__':
# Uses environment variables:
# AWS_ACCESS_KEY_ID -- AWS Access Key ID
# AWS_SECRET_ACCESS_KEY -- AWS Secret Access Key
argv.pop(0)
if len(argv) != 4:
raise SystemExit("USAGE: s3_cache.py <download | upload> <friendly name> <dependencies file> <directory>")
mode, friendly_name, dependencies_file, directory = argv
conn = S3Connection()
bucket = conn.lookup(BUCKET_NAME, validate=False)
if bucket is None:
raise SystemExit("Could not access bucket!")
dependencies_file_hash = _sha256_of_file(dependencies_file)
key = Key(bucket, dependencies_file_hash)
key.storage_class = 'REDUCED_REDUNDANCY'
if mode == 'download':
download(directory)
elif mode == 'upload':
if isfile(NEED_TO_UPLOAD_MARKER): # FIXME
upload(directory)
else:
print("No need to upload anything.")
else:
raise SystemExit("Unrecognized mode {!r}".format(mode))
| apache-2.0 |
jumpstarter-io/keystone | keystone/token/providers/pki.py | 5 | 1918 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Keystone PKI Token Provider"""
from keystoneclient.common import cms
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from keystone.common import environment
from keystone.common import utils
from keystone import exception
from keystone.i18n import _, _LE
from keystone.token.providers import common
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class Provider(common.BaseProvider):
def _get_token_id(self, token_data):
try:
# force conversion to a string as the keystone client cms code
# produces unicode. This can be removed if the client returns
# str()
# TODO(ayoung): Make to a byte_str for Python3
token_json = jsonutils.dumps(token_data, cls=utils.PKIEncoder)
token_id = str(cms.cms_sign_token(token_json,
CONF.signing.certfile,
CONF.signing.keyfile))
return token_id
except environment.subprocess.CalledProcessError:
LOG.exception(_LE('Unable to sign token'))
raise exception.UnexpectedError(_(
'Unable to sign token.'))
def needs_persistence(self):
"""Should the token be written to a backend."""
return True
| apache-2.0 |
GoogleCloudPlatform/cloudml-samples | chainer/containers/quickstart/mnist/trainer/mnist.py | 1 | 6554 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the \"License\");
# you may not use this file except in compliance with the License.\n",
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an \"AS IS\" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import six
import subprocess
import hypertune
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
from chainer import serializers
MODEL_FILE_NAME = 'chainer.model'
class Net(chainer.Chain):
def __init__(self):
super(Net, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(1, 10, ksize=5)
self.conv2 = L.Convolution2D(10, 20, ksize=5)
self.fc1 = L.Linear(None, 50)
self.fc2 = L.Linear(None, 10)
def forward(self, x):
x = F.relu(F.max_pooling_2d(self.conv1(x), 2))
x = F.relu(F.max_pooling_2d(F.dropout(self.conv2(x)), 2))
x = F.reshape(F.flatten(x), (-1, 320))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return x
class HpReport(chainer.training.Extension):
"""Trainer extension for hyper parameter tuning with CMLE.
Args:
log_report (str or LogReport): Log report to accumulate the
observations. This is either the name of a LogReport extensions
registered to the trainer, or a LogReport instance to use
internally.
global_step: key to epoch
hyperparameter_metric_tag: user-defined
metric_value: key to metric
"""
def __init__(self,
log_report='LogReport',
hp_global_step='epoch',
hp_metric_val='validation/main/loss',
hp_metric_tag='loss'):
self._log_report = log_report
self._log_len = 0 # number of observations already done
self._hp_global_step = hp_global_step
self._hp_metric_val = hp_metric_val
self._hp_metric_tag = hp_metric_tag
def __call__(self, trainer):
log_report = self._log_report
if isinstance(log_report, str):
log_report = trainer.get_extension(log_report)
elif isinstance(log_report, log_report_module.LogReport):
log_report(trainer) # update the log report
else:
raise TypeError('log report has a wrong type %s' %
type(log_report))
log = log_report.log
log_len = self._log_len
hpt = hypertune.HyperTune()
while len(log) > log_len:
target_log = log[log_len]
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag=self._hp_metric_tag,
metric_value=target_log[self._hp_metric_val],
global_step=target_log[self._hp_global_step])
log_len += 1
self.log_len = log_len
def get_args():
"""Argument parser.
Returns:
Dictionary of arguments.
"""
parser = argparse.ArgumentParser(description='Chainer MNIST Example')
parser.add_argument(
'--batch-size',
type=int,
default=100,
metavar='N',
help='input batch size for training (default: 100)')
parser.add_argument(
'--test-batch-size',
type=int,
default=1000,
metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument(
'--epochs',
type=int,
default=10,
metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument(
'--lr',
type=float,
default=0.01,
metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument(
'--momentum',
type=float,
default=0.5,
metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument(
'--model-dir',
default=None,
help='The directory to store the model')
parser.add_argument(
'--gpu',
type=int,
default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument(
'--resume',
action='store_true',
help='Resume training')
args = parser.parse_args()
return args
def main():
# Training settings
args = get_args()
# Set up a neural network to train
model = L.Classifier(Net())
if args.gpu >= 0:
# Make a specified GPU current
chainer.backends.cuda.get_device_from_id(args.gpu).use()
model.to_gpu() # Copy the model to the GPU
# Setup an optimizer
optimizer = chainer.optimizers.MomentumSGD(lr=args.lr, momentum=args.momentum)
optimizer.setup(model)
# Load the MNIST dataset
train, test = chainer.datasets.get_mnist(ndim=3)
train_iter = chainer.iterators.SerialIterator(train, args.batch_size)
test_iter = chainer.iterators.SerialIterator(test, args.test_batch_size,
repeat=False, shuffle=False)
# Set up a trainer
updater = training.updaters.StandardUpdater(
train_iter, optimizer, device=args.gpu)
trainer = training.Trainer(updater, (args.epochs, 'epoch'))
# Evaluate the model with the test dataset for each epoch
trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport())
# Print selected entries of the log to stdout
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
# Send selected entries of the log to CMLE HP tuning system
trainer.extend(
HpReport(hp_metric_val='validation/main/loss', hp_metric_tag='my_loss'))
if args.resume:
# Resume from a snapshot
tmp_model_file = os.path.join('/tmp', MODEL_FILE_NAME)
if not os.path.exists(tmp_model_file):
subprocess.check_call([
'gsutil', 'cp', os.path.join(args.model_dir, MODEL_FILE_NAME),
tmp_model_file])
if os.path.exists(tmp_model_file):
chainer.serializers.load_npz(tmp_model_file, trainer)
trainer.run()
if args.model_dir:
tmp_model_file = os.path.join('/tmp', MODEL_FILE_NAME)
serializers.save_npz(tmp_model_file, model)
subprocess.check_call([
'gsutil', 'cp', tmp_model_file,
os.path.join(args.model_dir, MODEL_FILE_NAME)])
if __name__ == '__main__':
main()
| apache-2.0 |
Zeppen/xbmc-pvr-spotify | lib/freetype/src/tools/docmaker/content.py | 74 | 17630 | # Content (c) 2002, 2004, 2006, 2007, 2008, 2009
# David Turner <david@freetype.org>
#
# This file contains routines used to parse the content of documentation
# comment blocks and build more structured objects out of them.
#
from sources import *
from utils import *
import string, re
# this regular expression is used to detect code sequences. these
# are simply code fragments embedded in '{' and '}' like in:
#
# {
# x = y + z;
# if ( zookoo == 2 )
# {
# foobar();
# }
# }
#
# note that indentation of the starting and ending accolades must be
# exactly the same. the code sequence can contain accolades at greater
# indentation
#
re_code_start = re.compile( r"(\s*){\s*$" )
re_code_end = re.compile( r"(\s*)}\s*$" )
# this regular expression is used to isolate identifiers from
# other text
#
re_identifier = re.compile( r'(\w*)' )
# we collect macros ending in `_H'; while outputting the object data, we use
# this info together with the object's file location to emit the appropriate
# header file macro and name before the object itself
#
re_header_macro = re.compile( r'^#define\s{1,}(\w{1,}_H)\s{1,}<(.*)>' )
#############################################################################
#
# The DocCode class is used to store source code lines.
#
# 'self.lines' contains a set of source code lines that will be dumped as
# HTML in a <PRE> tag.
#
# The object is filled line by line by the parser; it strips the leading
# "margin" space from each input line before storing it in 'self.lines'.
#
class DocCode:
def __init__( self, margin, lines ):
self.lines = []
self.words = None
# remove margin spaces
for l in lines:
if string.strip( l[:margin] ) == "":
l = l[margin:]
self.lines.append( l )
def dump( self, prefix = "", width = 60 ):
lines = self.dump_lines( 0, width )
for l in lines:
print prefix + l
def dump_lines( self, margin = 0, width = 60 ):
result = []
for l in self.lines:
result.append( " " * margin + l )
return result
#############################################################################
#
# The DocPara class is used to store "normal" text paragraph.
#
# 'self.words' contains the list of words that make up the paragraph
#
class DocPara:
def __init__( self, lines ):
self.lines = None
self.words = []
for l in lines:
l = string.strip( l )
self.words.extend( string.split( l ) )
def dump( self, prefix = "", width = 60 ):
lines = self.dump_lines( 0, width )
for l in lines:
print prefix + l
def dump_lines( self, margin = 0, width = 60 ):
cur = "" # current line
col = 0 # current width
result = []
for word in self.words:
ln = len( word )
if col > 0:
ln = ln + 1
if col + ln > width:
result.append( " " * margin + cur )
cur = word
col = len( word )
else:
if col > 0:
cur = cur + " "
cur = cur + word
col = col + ln
if col > 0:
result.append( " " * margin + cur )
return result
#############################################################################
#
# The DocField class is used to store a list containing either DocPara or
# DocCode objects. Each DocField also has an optional "name" which is used
# when the object corresponds to a field or value definition
#
class DocField:
def __init__( self, name, lines ):
self.name = name # can be None for normal paragraphs/sources
self.items = [] # list of items
mode_none = 0 # start parsing mode
mode_code = 1 # parsing code sequences
mode_para = 3 # parsing normal paragraph
margin = -1 # current code sequence indentation
cur_lines = []
# now analyze the markup lines to see if they contain paragraphs,
# code sequences or fields definitions
#
start = 0
mode = mode_none
for l in lines:
# are we parsing a code sequence ?
if mode == mode_code:
m = re_code_end.match( l )
if m and len( m.group( 1 ) ) <= margin:
# that's it, we finished the code sequence
code = DocCode( 0, cur_lines )
self.items.append( code )
margin = -1
cur_lines = []
mode = mode_none
else:
# nope, continue the code sequence
cur_lines.append( l[margin:] )
else:
# start of code sequence ?
m = re_code_start.match( l )
if m:
# save current lines
if cur_lines:
para = DocPara( cur_lines )
self.items.append( para )
cur_lines = []
# switch to code extraction mode
margin = len( m.group( 1 ) )
mode = mode_code
else:
if not string.split( l ) and cur_lines:
# if the line is empty, we end the current paragraph,
# if any
para = DocPara( cur_lines )
self.items.append( para )
cur_lines = []
else:
# otherwise, simply add the line to the current
# paragraph
cur_lines.append( l )
if mode == mode_code:
# unexpected end of code sequence
code = DocCode( margin, cur_lines )
self.items.append( code )
elif cur_lines:
para = DocPara( cur_lines )
self.items.append( para )
def dump( self, prefix = "" ):
if self.field:
print prefix + self.field + " ::"
prefix = prefix + "----"
first = 1
for p in self.items:
if not first:
print ""
p.dump( prefix )
first = 0
def dump_lines( self, margin = 0, width = 60 ):
result = []
nl = None
for p in self.items:
if nl:
result.append( "" )
result.extend( p.dump_lines( margin, width ) )
nl = 1
return result
# this regular expression is used to detect field definitions
#
re_field = re.compile( r"\s*(\w*|\w(\w|\.)*\w)\s*::" )
class DocMarkup:
def __init__( self, tag, lines ):
self.tag = string.lower( tag )
self.fields = []
cur_lines = []
field = None
mode = 0
for l in lines:
m = re_field.match( l )
if m:
# we detected the start of a new field definition
# first, save the current one
if cur_lines:
f = DocField( field, cur_lines )
self.fields.append( f )
cur_lines = []
field = None
field = m.group( 1 ) # record field name
ln = len( m.group( 0 ) )
l = " " * ln + l[ln:]
cur_lines = [l]
else:
cur_lines.append( l )
if field or cur_lines:
f = DocField( field, cur_lines )
self.fields.append( f )
def get_name( self ):
try:
return self.fields[0].items[0].words[0]
except:
return None
def get_start( self ):
try:
result = ""
for word in self.fields[0].items[0].words:
result = result + " " + word
return result[1:]
except:
return "ERROR"
def dump( self, margin ):
print " " * margin + "<" + self.tag + ">"
for f in self.fields:
f.dump( " " )
print " " * margin + "</" + self.tag + ">"
class DocChapter:
def __init__( self, block ):
self.block = block
self.sections = []
if block:
self.name = block.name
self.title = block.get_markup_words( "title" )
self.order = block.get_markup_words( "sections" )
else:
self.name = "Other"
self.title = string.split( "Miscellaneous" )
self.order = []
class DocSection:
def __init__( self, name = "Other" ):
self.name = name
self.blocks = {}
self.block_names = [] # ordered block names in section
self.defs = []
self.abstract = ""
self.description = ""
self.order = []
self.title = "ERROR"
self.chapter = None
def add_def( self, block ):
self.defs.append( block )
def add_block( self, block ):
self.block_names.append( block.name )
self.blocks[block.name] = block
def process( self ):
# look up one block that contains a valid section description
for block in self.defs:
title = block.get_markup_text( "title" )
if title:
self.title = title
self.abstract = block.get_markup_words( "abstract" )
self.description = block.get_markup_items( "description" )
self.order = block.get_markup_words( "order" )
return
def reorder( self ):
self.block_names = sort_order_list( self.block_names, self.order )
class ContentProcessor:
def __init__( self ):
"""initialize a block content processor"""
self.reset()
self.sections = {} # dictionary of documentation sections
self.section = None # current documentation section
self.chapters = [] # list of chapters
self.headers = {} # dictionary of header macros
def set_section( self, section_name ):
"""set current section during parsing"""
if not self.sections.has_key( section_name ):
section = DocSection( section_name )
self.sections[section_name] = section
self.section = section
else:
self.section = self.sections[section_name]
def add_chapter( self, block ):
chapter = DocChapter( block )
self.chapters.append( chapter )
def reset( self ):
"""reset the content processor for a new block"""
self.markups = []
self.markup = None
self.markup_lines = []
def add_markup( self ):
"""add a new markup section"""
if self.markup and self.markup_lines:
# get rid of last line of markup if it's empty
marks = self.markup_lines
if len( marks ) > 0 and not string.strip( marks[-1] ):
self.markup_lines = marks[:-1]
m = DocMarkup( self.markup, self.markup_lines )
self.markups.append( m )
self.markup = None
self.markup_lines = []
def process_content( self, content ):
"""process a block content and return a list of DocMarkup objects
corresponding to it"""
markup = None
markup_lines = []
first = 1
for line in content:
found = None
for t in re_markup_tags:
m = t.match( line )
if m:
found = string.lower( m.group( 1 ) )
prefix = len( m.group( 0 ) )
line = " " * prefix + line[prefix:] # remove markup from line
break
# is it the start of a new markup section ?
if found:
first = 0
self.add_markup() # add current markup content
self.markup = found
if len( string.strip( line ) ) > 0:
self.markup_lines.append( line )
elif first == 0:
self.markup_lines.append( line )
self.add_markup()
return self.markups
def parse_sources( self, source_processor ):
blocks = source_processor.blocks
count = len( blocks )
for n in range( count ):
source = blocks[n]
if source.content:
# this is a documentation comment, we need to catch
# all following normal blocks in the "follow" list
#
follow = []
m = n + 1
while m < count and not blocks[m].content:
follow.append( blocks[m] )
m = m + 1
doc_block = DocBlock( source, follow, self )
def finish( self ):
# process all sections to extract their abstract, description
# and ordered list of items
#
for sec in self.sections.values():
sec.process()
# process chapters to check that all sections are correctly
# listed there
for chap in self.chapters:
for sec in chap.order:
if self.sections.has_key( sec ):
section = self.sections[sec]
section.chapter = chap
section.reorder()
chap.sections.append( section )
else:
sys.stderr.write( "WARNING: chapter '" + \
chap.name + "' in " + chap.block.location() + \
" lists unknown section '" + sec + "'\n" )
# check that all sections are in a chapter
#
others = []
for sec in self.sections.values():
if not sec.chapter:
others.append( sec )
# create a new special chapter for all remaining sections
# when necessary
#
if others:
chap = DocChapter( None )
chap.sections = others
self.chapters.append( chap )
class DocBlock:
def __init__( self, source, follow, processor ):
processor.reset()
self.source = source
self.code = []
self.type = "ERRTYPE"
self.name = "ERRNAME"
self.section = processor.section
self.markups = processor.process_content( source.content )
# compute block type from first markup tag
try:
self.type = self.markups[0].tag
except:
pass
# compute block name from first markup paragraph
try:
markup = self.markups[0]
para = markup.fields[0].items[0]
name = para.words[0]
m = re_identifier.match( name )
if m:
name = m.group( 1 )
self.name = name
except:
pass
if self.type == "section":
# detect new section starts
processor.set_section( self.name )
processor.section.add_def( self )
elif self.type == "chapter":
# detect new chapter
processor.add_chapter( self )
else:
processor.section.add_block( self )
# now, compute the source lines relevant to this documentation
# block. We keep normal comments in for obvious reasons (??)
source = []
for b in follow:
if b.format:
break
for l in b.lines:
# collect header macro definitions
m = re_header_macro.match( l )
if m:
processor.headers[m.group( 2 )] = m.group( 1 );
# we use "/* */" as a separator
if re_source_sep.match( l ):
break
source.append( l )
# now strip the leading and trailing empty lines from the sources
start = 0
end = len( source ) - 1
while start < end and not string.strip( source[start] ):
start = start + 1
while start < end and not string.strip( source[end] ):
end = end - 1
if start == end:
self.code = []
else:
self.code = source[start:end + 1]
def location( self ):
return self.source.location()
def get_markup( self, tag_name ):
"""return the DocMarkup corresponding to a given tag in a block"""
for m in self.markups:
if m.tag == string.lower( tag_name ):
return m
return None
def get_markup_name( self, tag_name ):
"""return the name of a given primary markup in a block"""
try:
m = self.get_markup( tag_name )
return m.get_name()
except:
return None
def get_markup_words( self, tag_name ):
try:
m = self.get_markup( tag_name )
return m.fields[0].items[0].words
except:
return []
def get_markup_text( self, tag_name ):
result = self.get_markup_words( tag_name )
return string.join( result )
def get_markup_items( self, tag_name ):
try:
m = self.get_markup( tag_name )
return m.fields[0].items
except:
return None
# eof
| gpl-2.0 |
trueblue2704/AskMeAnything | lib/python2.7/site-packages/setuptools/tests/test_dist_info.py | 148 | 2261 | """Test .dist-info style distributions.
"""
import os
import shutil
import tempfile
import pytest
import pkg_resources
from .textwrap import DALS
class TestDistInfo:
def test_distinfo(self):
dists = dict(
(d.project_name, d)
for d in pkg_resources.find_distributions(self.tmpdir)
)
assert len(dists) == 2, dists
unversioned = dists['UnversionedDistribution']
versioned = dists['VersionedDistribution']
assert versioned.version == '2.718' # from filename
assert unversioned.version == '0.3' # from METADATA
@pytest.mark.importorskip('ast')
def test_conditional_dependencies(self):
specs = 'splort==4', 'quux>=1.1'
requires = list(map(pkg_resources.Requirement.parse, specs))
for d in pkg_resources.find_distributions(self.tmpdir):
assert d.requires() == requires[:1]
assert d.requires(extras=('baz',)) == requires
assert d.extras == ['baz']
metadata_template = DALS("""
Metadata-Version: 1.2
Name: {name}
{version}
Requires-Dist: splort (==4)
Provides-Extra: baz
Requires-Dist: quux (>=1.1); extra == 'baz'
""")
def setup_method(self, method):
self.tmpdir = tempfile.mkdtemp()
dist_info_name = 'VersionedDistribution-2.718.dist-info'
versioned = os.path.join(self.tmpdir, dist_info_name)
os.mkdir(versioned)
with open(os.path.join(versioned, 'METADATA'), 'w+') as metadata_file:
metadata = self.metadata_template.format(
name='VersionedDistribution',
version='',
).replace('\n\n', '\n')
metadata_file.write(metadata)
dist_info_name = 'UnversionedDistribution.dist-info'
unversioned = os.path.join(self.tmpdir, dist_info_name)
os.mkdir(unversioned)
with open(os.path.join(unversioned, 'METADATA'), 'w+') as metadata_file:
metadata = self.metadata_template.format(
name='UnversionedDistribution',
version='Version: 0.3',
)
metadata_file.write(metadata)
def teardown_method(self, method):
shutil.rmtree(self.tmpdir)
| mit |
bitcoinplusorg/xbcwalletsource | contrib/devtools/clang-format-diff.py | 90 | 6192 | #!/usr/bin/env python
#
#===- clang-format-diff.py - ClangFormat Diff Reformatter ----*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License.
#
# ============================================================
#
# University of Illinois/NCSA
# Open Source License
#
# Copyright (c) 2007-2015 University of Illinois at Urbana-Champaign.
# All rights reserved.
#
# Developed by:
#
# LLVM Team
#
# University of Illinois at Urbana-Champaign
#
# http://llvm.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal with
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimers.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimers in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the names of the LLVM Team, University of Illinois at
# Urbana-Champaign, nor the names of its contributors may be used to
# endorse or promote products derived from this Software without specific
# prior written permission.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
# SOFTWARE.
#
# ============================================================
#
#===------------------------------------------------------------------------===#
r"""
ClangFormat Diff Reformatter
============================
This script reads input from a unified diff and reformats all the changed
lines. This is useful to reformat all the lines touched by a specific patch.
Example usage for git/svn users:
git diff -U0 HEAD^ | clang-format-diff.py -p1 -i
svn diff --diff-cmd=diff -x-U0 | clang-format-diff.py -i
"""
import argparse
import difflib
import re
import string
import subprocess
import StringIO
import sys
# Change this to the full path if clang-format is not on the path.
binary = 'clang-format'
def main():
parser = argparse.ArgumentParser(description=
'Reformat changed lines in diff. Without -i '
'option just output the diff that would be '
'introduced.')
parser.add_argument('-i', action='store_true', default=False,
help='apply edits to files instead of displaying a diff')
parser.add_argument('-p', metavar='NUM', default=0,
help='strip the smallest prefix containing P slashes')
parser.add_argument('-regex', metavar='PATTERN', default=None,
help='custom pattern selecting file paths to reformat '
'(case sensitive, overrides -iregex)')
parser.add_argument('-iregex', metavar='PATTERN', default=
r'.*\.(cpp|cc|c\+\+|cxx|c|cl|h|hpp|m|mm|inc|js|ts|proto'
r'|protodevel|java)',
help='custom pattern selecting file paths to reformat '
'(case insensitive, overridden by -regex)')
parser.add_argument('-sort-includes', action='store_true', default=False,
help='let clang-format sort include blocks')
parser.add_argument('-v', '--verbose', action='store_true',
help='be more verbose, ineffective without -i')
args = parser.parse_args()
# Extract changed lines for each file.
filename = None
lines_by_file = {}
for line in sys.stdin:
match = re.search('^\+\+\+\ (.*?/){%s}(\S*)' % args.p, line)
if match:
filename = match.group(2)
if filename == None:
continue
if args.regex is not None:
if not re.match('^%s$' % args.regex, filename):
continue
else:
if not re.match('^%s$' % args.iregex, filename, re.IGNORECASE):
continue
match = re.search('^@@.*\+(\d+)(,(\d+))?', line)
if match:
start_line = int(match.group(1))
line_count = 1
if match.group(3):
line_count = int(match.group(3))
if line_count == 0:
continue
end_line = start_line + line_count - 1;
lines_by_file.setdefault(filename, []).extend(
['-lines', str(start_line) + ':' + str(end_line)])
# Reformat files containing changes in place.
for filename, lines in lines_by_file.iteritems():
if args.i and args.verbose:
print 'Formatting', filename
command = [binary, filename]
if args.i:
command.append('-i')
if args.sort_includes:
command.append('-sort-includes')
command.extend(lines)
command.extend(['-style=file', '-fallback-style=none'])
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=None, stdin=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
sys.exit(p.returncode);
if not args.i:
with open(filename) as f:
code = f.readlines()
formatted_code = StringIO.StringIO(stdout).readlines()
diff = difflib.unified_diff(code, formatted_code,
filename, filename,
'(before formatting)', '(after formatting)')
diff_string = string.join(diff, '')
if len(diff_string) > 0:
sys.stdout.write(diff_string)
if __name__ == '__main__':
main()
| mit |
SecHackLabs/WebHackSHL | modules/johnmod.py | 1 | 7131 | #!/usr/bin/python2
# encoding: utf-8
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from subprocess import call
import os
from modules import checker
from modules import hashid
hashtypelist=['7z','7z-opencl','AFS','agilekeychain','agilekeychain-opencl','aix-smd5','aix-ssha1','aix-ssha256','aix-ssha512','asa-md5','bcrypt','bcrypt-opencl','bfegg','Bitcoin','blackberry-es10','Blockchain','blockchain-opencl','bsdicrypt','chap','Citrix_NS10','Clipperz','cloudkeychain','cq','CRC32','crypt','dahua','descrypt','descrypt-opencl','Django','django-scrypt','dmd5','dmg','dmg-opencl','dominosec','dragonfly3-32','dragonfly3-64','dragonfly4-32','dragonfly4-64','Drupal7','dummy','dynamic_n','eCryptfs','EFS','eigrp','EncFS','encfs-opencl','EPI','EPiServer','fde','FormSpring','Fortigate','gost','gpg','gpg-opencl','HAVAL-128-4','HAVAL-256-3','hdaa','HMAC-MD5','HMAC-SHA1','HMAC-SHA224','HMAC-SHA256','HMAC-SHA384','HMAC-SHA512','hMailServer','hsrp','IKE','ipb2','KeePass','keychain','keychain-opencl','keyring','keyring-opencl','keystore','known_hosts','krb4','krb5','krb5-18','krb5pa-md5','krb5pa-md5-opencl','krb5pa-sha1','krb5pa-sha1-opencl','kwallet','LastPass','LM','lotus5','lotus5-opencl','lotus85','LUKS','MD2','md4-gen','md5crypt','md5crypt-opencl','md5ns','mdc2','MediaWiki','MongoDB','Mozilla','mscash','mscash2','mscash2-opencl','MSCHAPv2','mschapv2-naive','mssql','mssql05','mssql12','mysql','mysql-sha1','mysql-sha1-opencl','mysqlna','net-md5','net-sha1','nethalflm','netlm','netlmv2','netntlm','netntlm-naive','netntlmv2','nk','nsldap','NT','nt-opencl','nt2','ntlmv2-opencl','o5logon','o5logon-opencl','ODF','ODF-AES-opencl','ODF-opencl','Office','Office2007-opencl','office2010-opencl','office2013-opencl','oldoffice','oldoffice-opencl','OpenBSD-SoftRAID','openssl-enc','OpenVMS','oracle','oracle11','osc','Panama','PBKDF2-HMAC-SHA1','PBKDF2-HMAC-SHA1-opencl','PBKDF2-HMAC-SHA256','PBKDF2-HMAC-SHA256-opencl','PBKDF2-HMAC-SHA512','pbkdf2-hmac-sha512-opencl','PDF','PFX','phpass','phpass-opencl','PHPS','pix-md5','PKZIP','po','postgres','PST','PuTTY','pwsafe','pwsafe-opencl','RACF','RAdmin','RAKP','RAKP-opencl','rar','rar-opencl','RAR5','RAR5-opencl','Raw-Blake2','Raw-Keccak','Raw-Keccak-256','Raw-MD4','Raw-MD4-opencl','Raw-MD5','Raw-MD5-opencl','Raw-MD5u','Raw-SHA','Raw-SHA1','Raw-SHA1-Linkedin','Raw-SHA1-ng','Raw-SHA1-opencl','Raw-SHA224','Raw-SHA256','Raw-SHA256-ng','Raw-SHA256-opencl','Raw-SHA384','Raw-SHA512','Raw-SHA512-ng','Raw-SHA512-opencl','ripemd-128','ripemd-160','rsvp','Salted-SHA1','sapb','sapg','scrypt','sha1-gen','sha1crypt','sha1crypt-opencl','sha256crypt','sha256crypt-opencl','sha512crypt','sha512crypt-opencl','Siemens-S7','SIP','skein-256','skein-512','skey','Snefru-128','Snefru-256','SSH','SSH-ng','ssha-opencl','SSHA512','STRIP','strip-opencl','SunMD5','sxc','sxc-opencl','Sybase-PROP','sybasease','tc_aes_xts','tc_ripemd160','tc_sha512','tc_whirlpool','tcp-md5','Tiger','tripcode','VNC','vtp','wbb3','whirlpool','whirlpool0','whirlpool1','WoWSRP','wpapsk','wpapsk-opencl','xsha','xsha512','XSHA512-opencl','ZIP','zip-opencl']
def getwordlist():
global wdlist
print ("""\n¿Que tipo de WordList deseas usar?
a) Lista por defecto de WebHackSHL (RockYou).
b) Una WordList que esta en otro directorio.
""")
wliste=input("Introduce tu opción: ")
if wliste=="a":
wdlist="modules/wordlists/rockyou.txt"
arewdlist=os.path.isfile(wdlist)
if arewdlist:
return wdlist
else:
print ("\nAl parecer no tienes las wordlist necesarias para empezar el ataque.")
descargarwdls()
elif wliste=="b":
wdlist=input("Introduce el PATH donde se encuentra la wordlist: ")
if os.path.isfile(wdlist):
return wdlist
else:
print ("El archivo no existe.")
getwordlist()
else:
print ("Opcion invalida, intentalo de nuevo.")
getwordlist()
def descargarwdls():
try:
print ("\nDesea descargar la wordlist de Kali-Linux?")
wordlstkali=input("Introduce una opcion y/n: ")
if wordlstkali == "y":
print ("Descargando paquetes ...")
os.system("git clone git://git.kali.org/packages/wordlists.git modules/wordlists")
print ("Descomprimiendo WordList...")
sinmp=os.system("cd modules/wordlists && gzip -d rockyou.txt.gz")
if sinmp == 0:
print ("Descompresion exitosa.")
print ("Regresando al menú anterior, todo esta listo para descifrar.")
hashid.menu()
else:
print ("Ha ocurrido un error, saliendo.\n")
hashid.menu()
elif wordlstkali == "n":
print ("Debes tener una wordlist para utilizarlo.")
hashid.menu()
else:
print ("Opción invalida, intenta de nuevo.")
descargarwdls()
except KeyboardInterrupt:
print ("Saliendo.")
pass
def gethash():
hashatt=input("Introduce el Hash: ")
archash=open('hash.txt','w')
archash.write(hashatt)
archash.close()
if hashatt == "":
checker.cRojo("Hash invalido / No introdujo un Hash.")
gethash()
else:
pass
def hashdecrypt(hashtype):
getwordlist()
gethash()
print ("\nIniciando desencriptación del Hash...")
print ("Intentando desecriptar el hash de tipo ",hashtype,".")
call(["john","--format="+hashtype,"--wordlist="+wdlist,"hash.txt"])
verdecrypt()
def verdecrypt():
verhash=input("\n¿Desea ver los hashes desencriptados (En caso de haberlos)? y/n: ")
if verhash == "y":
print ("Imprimiendo los Hash desencriptados.")
call(["john","--show","hash.txt"])
elif verhash == "n":
print ("Regresando al menú anterior.\n")
pass
else:
print ("Opción incorrecta, saliendo al menú anterior.\n")
pass
def md5hash():
hashdecrypt("Raw-MD5")
def sha1hash():
hashdecrypt("Raw-SHA1")
def mysqlhash():
hashdecrypt("mysql")
def djangohash():
hashdecrypt("Django")
def anyhash():
checker.cAmarillo("\nLa lista de tipos Hash que puede desencriptar es:\n")
checker.cAmarillo(hashtypelist)
hashtyp=input("\nIntroduce el tipo de Hash que deseas desencriptar usando John The Ripper: ")
if hashtyp in hashtypelist:
hashdecrypt(hashtyp)
else:
print ("El tipo de Hash especificado no es valido, intentalo de nuevo.\n")
anyhash()
| gpl-3.0 |
yasserglez/pytiger2c | packages/pytiger2c/ast/integerliteralexpressionnode.py | 1 | 2204 | # -*- coding: utf-8 -*-
"""
Clase C{IntegerLiteralExpressionNode} del árbol de sintáxis abstracta.
"""
from pytiger2c.ast.valuedexpressionnode import ValuedExpressionNode
from pytiger2c.types.integertype import IntegerType
class IntegerLiteralExpressionNode(ValuedExpressionNode):
"""
Clase C{IntegerLiteralExpressionNode} del árbol de sintáxis abstracta.
Representa un literal de un número entero en el lenguaje Tiger. El valor
de retorno de esta expresión siempre será C{IntegerType}.
"""
def _get_integer(self):
"""
Método para obtener el valor de la propiedad C{integer}.
"""
return self._integer
integer = property(_get_integer)
def __init__(self, integer):
"""
Inicializa la clase C{IntegerLiteralExpressionNode}.
@type integer: C{int}
@param integer: Valor del número entero literal.
"""
super(IntegerLiteralExpressionNode, self).__init__()
self._integer = integer
def check_semantics(self, scope, errors):
"""
Para obtener información acerca de los parámetros recibidos por
el método consulte la documentación del método C{check_semantics}
en la clase C{LanguageNode}.
Este nodo del árbol de sintáxis abstracta no requiere comprobación
semántica, solamente se da valor al tipo de retorno del nodo que
siempre será C{IntegerType}.
"""
self._scope = scope
self._return_type = IntegerType()
def generate_code(self, generator):
"""
Genera el código correspondiente a la estructura del lenguaje Tiger
representada por el nodo.
Para obtener información acerca de los parámetros recibidos por
este método consulte la documentación del método C{generate_code}
de la clase C{LanguageNode}.
"""
self.scope.generate_code(generator)
int_code_type = IntegerType().code_type
local_var = generator.define_local(int_code_type)
generator.add_statement('{0} = {1};'.format(local_var, self.integer))
self._code_name = local_var
| mit |
ischwabacher/seaborn | seaborn/algorithms.py | 35 | 6889 | """Algorithms to support fitting routines in seaborn plotting functions."""
from __future__ import division
import numpy as np
from scipy import stats
from .external.six.moves import range
def bootstrap(*args, **kwargs):
"""Resample one or more arrays with replacement and store aggregate values.
Positional arguments are a sequence of arrays to bootstrap along the first
axis and pass to a summary function.
Keyword arguments:
n_boot : int, default 10000
Number of iterations
axis : int, default None
Will pass axis to ``func`` as a keyword argument.
units : array, default None
Array of sampling unit IDs. When used the bootstrap resamples units
and then observations within units instead of individual
datapoints.
smooth : bool, default False
If True, performs a smoothed bootstrap (draws samples from a kernel
destiny estimate); only works for one-dimensional inputs and cannot
be used `units` is present.
func : callable, default np.mean
Function to call on the args that are passed in.
random_seed : int | None, default None
Seed for the random number generator; useful if you want
reproducible resamples.
Returns
-------
boot_dist: array
array of bootstrapped statistic values
"""
# Ensure list of arrays are same length
if len(np.unique(list(map(len, args)))) > 1:
raise ValueError("All input arrays must have the same length")
n = len(args[0])
# Default keyword arguments
n_boot = kwargs.get("n_boot", 10000)
func = kwargs.get("func", np.mean)
axis = kwargs.get("axis", None)
units = kwargs.get("units", None)
smooth = kwargs.get("smooth", False)
random_seed = kwargs.get("random_seed", None)
if axis is None:
func_kwargs = dict()
else:
func_kwargs = dict(axis=axis)
# Initialize the resampler
rs = np.random.RandomState(random_seed)
# Coerce to arrays
args = list(map(np.asarray, args))
if units is not None:
units = np.asarray(units)
# Do the bootstrap
if smooth:
return _smooth_bootstrap(args, n_boot, func, func_kwargs)
if units is not None:
return _structured_bootstrap(args, n_boot, units, func,
func_kwargs, rs)
boot_dist = []
for i in range(int(n_boot)):
resampler = rs.randint(0, n, n)
sample = [a.take(resampler, axis=0) for a in args]
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _structured_bootstrap(args, n_boot, units, func, func_kwargs, rs):
"""Resample units instead of datapoints."""
unique_units = np.unique(units)
n_units = len(unique_units)
args = [[a[units == unit] for unit in unique_units] for a in args]
boot_dist = []
for i in range(int(n_boot)):
resampler = rs.randint(0, n_units, n_units)
sample = [np.take(a, resampler, axis=0) for a in args]
lengths = map(len, sample[0])
resampler = [rs.randint(0, n, n) for n in lengths]
sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)]
for a in sample]
sample = list(map(np.concatenate, sample))
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _smooth_bootstrap(args, n_boot, func, func_kwargs):
"""Bootstrap by resampling from a kernel density estimate."""
n = len(args[0])
boot_dist = []
kde = [stats.gaussian_kde(np.transpose(a)) for a in args]
for i in range(int(n_boot)):
sample = [a.resample(n).T for a in kde]
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def randomize_corrmat(a, tail="both", corrected=True, n_iter=1000,
random_seed=None, return_dist=False):
"""Test the significance of set of correlations with permutations.
By default this corrects for multiple comparisons across one side
of the matrix.
Parameters
----------
a : n_vars x n_obs array
array with variables as rows
tail : both | upper | lower
whether test should be two-tailed, or which tail to integrate over
corrected : boolean
if True reports p values with respect to the max stat distribution
n_iter : int
number of permutation iterations
random_seed : int or None
seed for RNG
return_dist : bool
if True, return n_vars x n_vars x n_iter
Returns
-------
p_mat : float
array of probabilites for actual correlation from null CDF
"""
if tail not in ["upper", "lower", "both"]:
raise ValueError("'tail' must be 'upper', 'lower', or 'both'")
rs = np.random.RandomState(random_seed)
a = np.asarray(a, np.float)
flat_a = a.ravel()
n_vars, n_obs = a.shape
# Do the permutations to establish a null distribution
null_dist = np.empty((n_vars, n_vars, n_iter))
for i_i in range(n_iter):
perm_i = np.concatenate([rs.permutation(n_obs) + (v * n_obs)
for v in range(n_vars)])
a_i = flat_a[perm_i].reshape(n_vars, n_obs)
null_dist[..., i_i] = np.corrcoef(a_i)
# Get the observed correlation values
real_corr = np.corrcoef(a)
# Figure out p values based on the permutation distribution
p_mat = np.zeros((n_vars, n_vars))
upper_tri = np.triu_indices(n_vars, 1)
if corrected:
if tail == "both":
max_dist = np.abs(null_dist[upper_tri]).max(axis=0)
elif tail == "lower":
max_dist = null_dist[upper_tri].min(axis=0)
elif tail == "upper":
max_dist = null_dist[upper_tri].max(axis=0)
cdf = lambda x: stats.percentileofscore(max_dist, x) / 100.
for i, j in zip(*upper_tri):
observed = real_corr[i, j]
if tail == "both":
p_ij = 1 - cdf(abs(observed))
elif tail == "lower":
p_ij = cdf(observed)
elif tail == "upper":
p_ij = 1 - cdf(observed)
p_mat[i, j] = p_ij
else:
for i, j in zip(*upper_tri):
null_corrs = null_dist[i, j]
cdf = lambda x: stats.percentileofscore(null_corrs, x) / 100.
observed = real_corr[i, j]
if tail == "both":
p_ij = 2 * (1 - cdf(abs(observed)))
elif tail == "lower":
p_ij = cdf(observed)
elif tail == "upper":
p_ij = 1 - cdf(observed)
p_mat[i, j] = p_ij
# Make p matrix symettrical with nans on the diagonal
p_mat += p_mat.T
p_mat[np.diag_indices(n_vars)] = np.nan
if return_dist:
return p_mat, null_dist
return p_mat
| bsd-3-clause |
unreal666/outwiker | plugins/markdown/markdown/markdown_plugin_libs/pygments/lexers/dotnet.py | 6 | 27693 | # -*- coding: utf-8 -*-
"""
pygments.lexers.dotnet
~~~~~~~~~~~~~~~~~~~~~~
Lexers for .net languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, include, \
using, this, default, words
from pygments.token import Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Literal, Other
from pygments.util import get_choice_opt, iteritems
from pygments import unistring as uni
from pygments.lexers.html import XmlLexer
__all__ = ['CSharpLexer', 'NemerleLexer', 'BooLexer', 'VbNetLexer',
'CSharpAspxLexer', 'VbNetAspxLexer', 'FSharpLexer']
class CSharpLexer(RegexLexer):
"""
For `C# <http://msdn2.microsoft.com/en-us/vcsharp/default.aspx>`_
source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
.. versionadded:: 0.8
"""
name = 'C#'
aliases = ['csharp', 'c#']
filenames = ['*.cs']
mimetypes = ['text/x-csharp'] # inferred
flags = re.MULTILINE | re.DOTALL | re.UNICODE
# for the range of allowed unicode characters in identifiers, see
# http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
'none': r'@?[_a-zA-Z]\w*',
'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'),
'full': ('@?(?:_|[^' +
uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])'
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
}
tokens = {}
token_variants = True
for levelname, cs_ident in iteritems(levels):
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Punctuation)),
(r'^\s*\[.*?\]', Name.Attribute),
(r'[^\S\n]+', Text),
(r'\\\n', Text), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Text),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'"(\\\\|\\"|[^"\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?"
r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number),
(r'#[ \t]*(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b.*?\n',
Comment.Preproc),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
Keyword)),
(r'(abstract|as|async|await|base|break|by|case|catch|'
r'checked|const|continue|default|delegate|'
r'do|else|enum|event|explicit|extern|false|finally|'
r'fixed|for|foreach|goto|if|implicit|in|interface|'
r'internal|is|let|lock|new|null|on|operator|'
r'out|override|params|private|protected|public|readonly|'
r'ref|return|sealed|sizeof|stackalloc|static|'
r'switch|this|throw|true|try|typeof|'
r'unchecked|unsafe|virtual|void|while|'
r'get|set|new|partial|yield|add|remove|value|alias|ascending|'
r'descending|from|group|into|orderby|select|thenby|where|'
r'join|equals)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|dynamic|float|int|long|object|'
r'sbyte|short|string|uint|ulong|ushort|var)\b\??', Keyword.Type),
(r'(class|struct)(\s+)', bygroups(Keyword, Text), 'class'),
(r'(namespace|using)(\s+)', bygroups(Keyword, Text), 'namespace'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop'),
default('#pop'),
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop'),
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', list(self.tokens), 'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class NemerleLexer(RegexLexer):
"""
For `Nemerle <http://nemerle.org>`_ source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
.. versionadded:: 1.5
"""
name = 'Nemerle'
aliases = ['nemerle']
filenames = ['*.n']
mimetypes = ['text/x-nemerle'] # inferred
flags = re.MULTILINE | re.DOTALL | re.UNICODE
# for the range of allowed unicode characters in identifiers, see
# http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
'none': r'@?[_a-zA-Z]\w*',
'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'),
'full': ('@?(?:_|[^' +
uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])'
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
}
tokens = {}
token_variants = True
for levelname, cs_ident in iteritems(levels):
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Punctuation)),
(r'^\s*\[.*?\]', Name.Attribute),
(r'[^\S\n]+', Text),
(r'\\\n', Text), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Text),
(r'\$\s*"', String, 'splice-string'),
(r'\$\s*<#', String, 'splice-string2'),
(r'<#', String, 'recursive-string'),
(r'(<\[)\s*(' + cs_ident + ':)?', Keyword),
(r'\]\>', Keyword),
# quasiquotation only
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'"(\\\\|\\"|[^"\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFLdD]?", Number),
(r'#[ \t]*(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b.*?\n',
Comment.Preproc),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
Keyword)),
(r'(abstract|and|as|base|catch|def|delegate|'
r'enum|event|extern|false|finally|'
r'fun|implements|interface|internal|'
r'is|macro|match|matches|module|mutable|new|'
r'null|out|override|params|partial|private|'
r'protected|public|ref|sealed|static|'
r'syntax|this|throw|true|try|type|typeof|'
r'virtual|volatile|when|where|with|'
r'assert|assert2|async|break|checked|continue|do|else|'
r'ensures|for|foreach|if|late|lock|new|nolate|'
r'otherwise|regexp|repeat|requires|return|surroundwith|'
r'unchecked|unless|using|while|yield)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|float|int|long|object|sbyte|'
r'short|string|uint|ulong|ushort|void|array|list)\b\??',
Keyword.Type),
(r'(:>?)\s*(' + cs_ident + r'\??)',
bygroups(Punctuation, Keyword.Type)),
(r'(class|struct|variant|module)(\s+)',
bygroups(Keyword, Text), 'class'),
(r'(namespace|using)(\s+)', bygroups(Keyword, Text),
'namespace'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop')
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
],
'splice-string': [
(r'[^"$]', String),
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'\\"', String),
(r'"', String, '#pop')
],
'splice-string2': [
(r'[^#<>$]', String),
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'<#', String, '#push'),
(r'#>', String, '#pop')
],
'recursive-string': [
(r'[^#<>]', String),
(r'<#', String, '#push'),
(r'#>', String, '#pop')
],
'splice-string-content': [
(r'if|match', Keyword),
(r'[~!%^&*+=|\[\]:;,.<>/?-\\"$ ]', Punctuation),
(cs_ident, Name),
(r'\d+', Number),
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop')
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', list(self.tokens),
'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class BooLexer(RegexLexer):
"""
For `Boo <http://boo.codehaus.org/>`_ source code.
"""
name = 'Boo'
aliases = ['boo']
filenames = ['*.boo']
mimetypes = ['text/x-boo']
tokens = {
'root': [
(r'\s+', Text),
(r'(#|//).*$', Comment.Single),
(r'/[*]', Comment.Multiline, 'comment'),
(r'[]{}:(),.;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'/(\\\\|\\/|[^/\s])/', String.Regex),
(r'@/(\\\\|\\/|[^/])*/', String.Regex),
(r'=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator),
(r'(as|abstract|callable|constructor|destructor|do|import|'
r'enum|event|final|get|interface|internal|of|override|'
r'partial|private|protected|public|return|set|static|'
r'struct|transient|virtual|yield|super|and|break|cast|'
r'continue|elif|else|ensure|except|for|given|goto|if|in|'
r'is|isa|not|or|otherwise|pass|raise|ref|try|unless|when|'
r'while|from|as)\b', Keyword),
(r'def(?=\s+\(.*?\))', Keyword),
(r'(def)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(namespace)(\s+)', bygroups(Keyword, Text), 'namespace'),
(r'(?<!\.)(true|false|null|self|__eval__|__switch__|array|'
r'assert|checked|enumerate|filter|getter|len|lock|map|'
r'matrix|max|min|normalArrayIndexing|print|property|range|'
r'rawArrayIndexing|required|typeof|unchecked|using|'
r'yieldAll|zip)\b', Name.Builtin),
(r'"""(\\\\|\\"|.*?)"""', String.Double),
(r'"(\\\\|\\"|[^"]*?)"', String.Double),
(r"'(\\\\|\\'|[^']*?)'", String.Single),
(r'[a-zA-Z_]\w*', Name),
(r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float),
(r'[0-9][0-9.]*(ms?|d|h|s)', Number),
(r'0\d+', Number.Oct),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer),
],
'comment': [
('/[*]', Comment.Multiline, '#push'),
('[*]/', Comment.Multiline, '#pop'),
('[^/*]', Comment.Multiline),
('[*/]', Comment.Multiline)
],
'funcname': [
(r'[a-zA-Z_]\w*', Name.Function, '#pop')
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'namespace': [
(r'[a-zA-Z_][\w.]*', Name.Namespace, '#pop')
]
}
class VbNetLexer(RegexLexer):
"""
For
`Visual Basic.NET <http://msdn2.microsoft.com/en-us/vbasic/default.aspx>`_
source code.
"""
name = 'VB.net'
aliases = ['vb.net', 'vbnet']
filenames = ['*.vb', '*.bas']
mimetypes = ['text/x-vbnet', 'text/x-vba'] # (?)
uni_name = '[_' + uni.combine('Ll', 'Lt', 'Lm', 'Nl') + ']' + \
'[' + uni.combine('Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^\s*<.*?>', Name.Attribute),
(r'\s+', Text),
(r'\n', Text),
(r'rem\b.*?\n', Comment),
(r"'.*?\n", Comment),
(r'#If\s.*?\sThen|#ElseIf\s.*?\sThen|#Else|#End\s+If|#Const|'
r'#ExternalSource.*?\n|#End\s+ExternalSource|'
r'#Region.*?\n|#End\s+Region|#ExternalChecksum',
Comment.Preproc),
(r'[(){}!#,.:]', Punctuation),
(r'Option\s+(Strict|Explicit|Compare)\s+'
r'(On|Off|Binary|Text)', Keyword.Declaration),
(words((
'AddHandler', 'Alias', 'ByRef', 'ByVal', 'Call', 'Case',
'Catch', 'CBool', 'CByte', 'CChar', 'CDate', 'CDec', 'CDbl',
'CInt', 'CLng', 'CObj', 'Continue', 'CSByte', 'CShort', 'CSng',
'CStr', 'CType', 'CUInt', 'CULng', 'CUShort', 'Declare',
'Default', 'Delegate', 'DirectCast', 'Do', 'Each', 'Else',
'ElseIf', 'EndIf', 'Erase', 'Error', 'Event', 'Exit', 'False',
'Finally', 'For', 'Friend', 'Get', 'Global', 'GoSub', 'GoTo',
'Handles', 'If', 'Implements', 'Inherits', 'Interface', 'Let',
'Lib', 'Loop', 'Me', 'MustInherit', 'MustOverride', 'MyBase',
'MyClass', 'Narrowing', 'New', 'Next', 'Not', 'Nothing',
'NotInheritable', 'NotOverridable', 'Of', 'On', 'Operator',
'Option', 'Optional', 'Overloads', 'Overridable', 'Overrides',
'ParamArray', 'Partial', 'Private', 'Protected', 'Public',
'RaiseEvent', 'ReadOnly', 'ReDim', 'RemoveHandler', 'Resume',
'Return', 'Select', 'Set', 'Shadows', 'Shared', 'Single',
'Static', 'Step', 'Stop', 'SyncLock', 'Then', 'Throw', 'To',
'True', 'Try', 'TryCast', 'Wend', 'Using', 'When', 'While',
'Widening', 'With', 'WithEvents', 'WriteOnly'),
prefix=r'(?<!\.)', suffix=r'\b'), Keyword),
(r'(?<!\.)End\b', Keyword, 'end'),
(r'(?<!\.)(Dim|Const)\b', Keyword, 'dim'),
(r'(?<!\.)(Function|Sub|Property)(\s+)',
bygroups(Keyword, Text), 'funcname'),
(r'(?<!\.)(Class|Structure|Enum)(\s+)',
bygroups(Keyword, Text), 'classname'),
(r'(?<!\.)(Module|Namespace|Imports)(\s+)',
bygroups(Keyword, Text), 'namespace'),
(r'(?<!\.)(Boolean|Byte|Char|Date|Decimal|Double|Integer|Long|'
r'Object|SByte|Short|Single|String|Variant|UInteger|ULong|'
r'UShort)\b', Keyword.Type),
(r'(?<!\.)(AddressOf|And|AndAlso|As|GetType|In|Is|IsNot|Like|Mod|'
r'Or|OrElse|TypeOf|Xor)\b', Operator.Word),
(r'&=|[*]=|/=|\\=|\^=|\+=|-=|<<=|>>=|<<|>>|:=|'
r'<=|>=|<>|[-&*/\\^+=<>\[\]]',
Operator),
('"', String, 'string'),
(r'_\n', Text), # Line continuation (must be before Name)
(uni_name + '[%&@!#$]?', Name),
('#.*?#', Literal.Date),
(r'(\d+\.\d*|\d*\.\d+)(F[+-]?[0-9]+)?', Number.Float),
(r'\d+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&H[0-9a-f]+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&O[0-7]+([SILDFR]|US|UI|UL)?', Number.Integer),
],
'string': [
(r'""', String),
(r'"C?', String, '#pop'),
(r'[^"]+', String),
],
'dim': [
(uni_name, Name.Variable, '#pop'),
default('#pop'), # any other syntax
],
'funcname': [
(uni_name, Name.Function, '#pop'),
],
'classname': [
(uni_name, Name.Class, '#pop'),
],
'namespace': [
(uni_name, Name.Namespace),
(r'\.', Name.Namespace),
default('#pop'),
],
'end': [
(r'\s+', Text),
(r'(Function|Sub|Property|Class|Structure|Enum|Module|Namespace)\b',
Keyword, '#pop'),
default('#pop'),
]
}
def analyse_text(text):
if re.search(r'^\s*(#If|Module|Namespace)', text, re.MULTILINE):
return 0.5
class GenericAspxLexer(RegexLexer):
"""
Lexer for ASP.NET pages.
"""
name = 'aspx-gen'
filenames = []
mimetypes = []
flags = re.DOTALL
tokens = {
'root': [
(r'(<%[@=#]?)(.*?)(%>)', bygroups(Name.Tag, Other, Name.Tag)),
(r'(<script.*?>)(.*?)(</script>)', bygroups(using(XmlLexer),
Other,
using(XmlLexer))),
(r'(.+?)(?=<)', using(XmlLexer)),
(r'.+', using(XmlLexer)),
],
}
# TODO support multiple languages within the same source file
class CSharpAspxLexer(DelegatingLexer):
"""
Lexer for highlighting C# within ASP.NET pages.
"""
name = 'aspx-cs'
aliases = ['aspx-cs']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super(CSharpAspxLexer, self).__init__(CSharpLexer, GenericAspxLexer,
**options)
def analyse_text(text):
if re.search(r'Page\s*Language="C#"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']C#', text, re.I) is not None:
return 0.15
class VbNetAspxLexer(DelegatingLexer):
"""
Lexer for highlighting Visual Basic.net within ASP.NET pages.
"""
name = 'aspx-vb'
aliases = ['aspx-vb']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super(VbNetAspxLexer, self).__init__(VbNetLexer, GenericAspxLexer,
**options)
def analyse_text(text):
if re.search(r'Page\s*Language="Vb"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']vb', text, re.I) is not None:
return 0.15
# Very close to functional.OcamlLexer
class FSharpLexer(RegexLexer):
"""
For the F# language (version 3.0).
AAAAACK Strings
http://research.microsoft.com/en-us/um/cambridge/projects/fsharp/manual/spec.html#_Toc335818775
.. versionadded:: 1.5
"""
name = 'FSharp'
aliases = ['fsharp']
filenames = ['*.fs', '*.fsi']
mimetypes = ['text/x-fsharp']
keywords = [
'abstract', 'as', 'assert', 'base', 'begin', 'class', 'default',
'delegate', 'do!', 'do', 'done', 'downcast', 'downto', 'elif', 'else',
'end', 'exception', 'extern', 'false', 'finally', 'for', 'function',
'fun', 'global', 'if', 'inherit', 'inline', 'interface', 'internal',
'in', 'lazy', 'let!', 'let', 'match', 'member', 'module', 'mutable',
'namespace', 'new', 'null', 'of', 'open', 'override', 'private', 'public',
'rec', 'return!', 'return', 'select', 'static', 'struct', 'then', 'to',
'true', 'try', 'type', 'upcast', 'use!', 'use', 'val', 'void', 'when',
'while', 'with', 'yield!', 'yield',
]
# Reserved words; cannot hurt to color them as keywords too.
keywords += [
'atomic', 'break', 'checked', 'component', 'const', 'constraint',
'constructor', 'continue', 'eager', 'event', 'external', 'fixed',
'functor', 'include', 'method', 'mixin', 'object', 'parallel',
'process', 'protected', 'pure', 'sealed', 'tailcall', 'trait',
'virtual', 'volatile',
]
keyopts = [
'!=', '#', '&&', '&', r'\(', r'\)', r'\*', r'\+', ',', r'-\.',
'->', '-', r'\.\.', r'\.', '::', ':=', ':>', ':', ';;', ';', '<-',
r'<\]', '<', r'>\]', '>', r'\?\?', r'\?', r'\[<', r'\[\|', r'\[', r'\]',
'_', '`', r'\{', r'\|\]', r'\|', r'\}', '~', '<@@', '<@', '=', '@>', '@@>',
]
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ['and', 'or', 'not']
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = [
'sbyte', 'byte', 'char', 'nativeint', 'unativeint', 'float32', 'single',
'float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32',
'uint32', 'int64', 'uint64', 'decimal', 'unit', 'bool', 'string',
'list', 'exn', 'obj', 'enum',
]
# See http://msdn.microsoft.com/en-us/library/dd233181.aspx and/or
# http://fsharp.org/about/files/spec.pdf for reference. Good luck.
tokens = {
'escape-sequence': [
(r'\\[\\"\'ntbrafv]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\U[0-9a-fA-F]{8}', String.Escape),
],
'root': [
(r'\s+', Text),
(r'\(\)|\[\]', Name.Builtin.Pseudo),
(r'\b(?<!\.)([A-Z][\w\']*)(?=\s*\.)',
Name.Namespace, 'dotted'),
(r'\b([A-Z][\w\']*)', Name),
(r'///.*?\n', String.Doc),
(r'//.*?\n', Comment.Single),
(r'\(\*(?!\))', Comment, 'comment'),
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'\b(open|module)(\s+)([\w.]+)',
bygroups(Keyword, Text, Name.Namespace)),
(r'\b(let!?)(\s+)(\w+)',
bygroups(Keyword, Text, Name.Variable)),
(r'\b(type)(\s+)(\w+)',
bygroups(Keyword, Text, Name.Class)),
(r'\b(member|override)(\s+)(\w+)(\.)(\w+)',
bygroups(Keyword, Text, Name, Punctuation, Name.Function)),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'``([^`\n\r\t]|`[^`\n\r\t])+``', Name),
(r'(%s)' % '|'.join(keyopts), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r'#[ \t]*(if|endif|else|line|nowarn|light|\d+)\b.*?\n',
Comment.Preproc),
(r"[^\W\d][\w']*", Name),
(r'\d[\d_]*[uU]?[yslLnQRZINGmM]?', Number.Integer),
(r'0[xX][\da-fA-F][\da-fA-F_]*[uU]?[yslLn]?[fF]?', Number.Hex),
(r'0[oO][0-7][0-7_]*[uU]?[yslLn]?', Number.Oct),
(r'0[bB][01][01_]*[uU]?[yslLn]?', Number.Bin),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)[fFmM]?',
Number.Float),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'B?",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'@?"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][\w\']*', Name, '#pop'),
(r'[a-z_][\w\']*', Name, '#pop'),
# e.g. dictionary index access
default('#pop'),
],
'comment': [
(r'[^(*)@"]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
# comments cannot be closed within strings in comments
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'[(*)@]', Comment),
],
'string': [
(r'[^\\"]+', String),
include('escape-sequence'),
(r'\\\n', String),
(r'\n', String), # newlines are allowed in any string
(r'"B?', String, '#pop'),
],
'lstring': [
(r'[^"]+', String),
(r'\n', String),
(r'""', String),
(r'"B?', String, '#pop'),
],
'tqs': [
(r'[^"]+', String),
(r'\n', String),
(r'"""B?', String, '#pop'),
(r'"', String),
],
}
| gpl-3.0 |
HousekeepLtd/django | django/contrib/gis/db/models/functions.py | 54 | 16089 | from decimal import Decimal
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.sql import AreaField
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.measure import (
Area as AreaMeasure, Distance as DistanceMeasure,
)
from django.core.exceptions import FieldError
from django.db.models import FloatField, IntegerField, TextField
from django.db.models.expressions import Func, Value
from django.utils import six
NUMERIC_TYPES = six.integer_types + (float, Decimal)
class GeoFunc(Func):
function = None
output_field_class = None
geom_param_pos = 0
def __init__(self, *expressions, **extra):
if 'output_field' not in extra and self.output_field_class:
extra['output_field'] = self.output_field_class()
super(GeoFunc, self).__init__(*expressions, **extra)
@property
def name(self):
return self.__class__.__name__
@property
def srid(self):
expr = self.source_expressions[self.geom_param_pos]
if hasattr(expr, 'srid'):
return expr.srid
try:
return expr.field.srid
except (AttributeError, FieldError):
return None
def as_sql(self, compiler, connection):
if self.function is None:
self.function = connection.ops.spatial_function_name(self.name)
return super(GeoFunc, self).as_sql(compiler, connection)
def resolve_expression(self, *args, **kwargs):
res = super(GeoFunc, self).resolve_expression(*args, **kwargs)
base_srid = res.srid
if not base_srid:
raise TypeError("Geometry functions can only operate on geometric content.")
for pos, expr in enumerate(res.source_expressions[1:], start=1):
if isinstance(expr, GeomValue) and expr.srid != base_srid:
# Automatic SRID conversion so objects are comparable
res.source_expressions[pos] = Transform(expr, base_srid).resolve_expression(*args, **kwargs)
return res
def _handle_param(self, value, param_name='', check_types=None):
if not hasattr(value, 'resolve_expression'):
if check_types and not isinstance(value, check_types):
raise TypeError(
"The %s parameter has the wrong type: should be %s." % (
param_name, str(check_types))
)
return value
class GeomValue(Value):
geography = False
@property
def srid(self):
return self.value.srid
def as_sql(self, compiler, connection):
if self.geography:
self.value = connection.ops.Adapter(self.value, geography=self.geography)
else:
self.value = connection.ops.Adapter(self.value)
return super(GeomValue, self).as_sql(compiler, connection)
def as_mysql(self, compiler, connection):
return 'GeomFromText(%%s, %s)' % self.srid, [connection.ops.Adapter(self.value)]
def as_sqlite(self, compiler, connection):
return 'GeomFromText(%%s, %s)' % self.srid, [connection.ops.Adapter(self.value)]
def as_oracle(self, compiler, connection):
return 'SDO_GEOMETRY(%%s, %s)' % self.srid, [connection.ops.Adapter(self.value)]
class GeoFuncWithGeoParam(GeoFunc):
def __init__(self, expression, geom, *expressions, **extra):
if not hasattr(geom, 'srid'):
# Try to interpret it as a geometry input
try:
geom = GEOSGeometry(geom)
except Exception:
raise ValueError("This function requires a geometric parameter.")
if not geom.srid:
raise ValueError("Please provide a geometry attribute with a defined SRID.")
geom = GeomValue(geom)
super(GeoFuncWithGeoParam, self).__init__(expression, geom, *expressions, **extra)
class SQLiteDecimalToFloatMixin(object):
"""
By default, Decimal values are converted to str by the SQLite backend, which
is not acceptable by the GIS functions expecting numeric values.
"""
def as_sqlite(self, compiler, connection):
for expr in self.get_source_expressions():
if hasattr(expr, 'value') and isinstance(expr.value, Decimal):
expr.value = float(expr.value)
return super(SQLiteDecimalToFloatMixin, self).as_sql(compiler, connection)
class OracleToleranceMixin(object):
tolerance = 0.05
def as_oracle(self, compiler, connection):
tol = self.extra.get('tolerance', self.tolerance)
self.template = "%%(function)s(%%(expressions)s, %s)" % tol
return super(OracleToleranceMixin, self).as_sql(compiler, connection)
class Area(OracleToleranceMixin, GeoFunc):
def as_sql(self, compiler, connection):
if connection.ops.geography:
# Geography fields support area calculation, returns square meters.
self.output_field = AreaField('sq_m')
elif not self.output_field.geodetic(connection):
# Getting the area units of the geographic field.
units = self.output_field.units_name(connection)
if units:
self.output_field = AreaField(
AreaMeasure.unit_attname(self.output_field.units_name(connection))
)
else:
self.output_field = FloatField()
else:
# TODO: Do we want to support raw number areas for geodetic fields?
raise NotImplementedError('Area on geodetic coordinate systems not supported.')
return super(Area, self).as_sql(compiler, connection)
def as_oracle(self, compiler, connection):
self.output_field = AreaField('sq_m') # Oracle returns area in units of meters.
return super(Area, self).as_oracle(compiler, connection)
class AsGeoJSON(GeoFunc):
output_field_class = TextField
def __init__(self, expression, bbox=False, crs=False, precision=8, **extra):
expressions = [expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', six.integer_types))
options = 0
if crs and bbox:
options = 3
elif bbox:
options = 1
elif crs:
options = 2
if options:
expressions.append(options)
super(AsGeoJSON, self).__init__(*expressions, **extra)
class AsGML(GeoFunc):
geom_param_pos = 1
output_field_class = TextField
def __init__(self, expression, version=2, precision=8, **extra):
expressions = [version, expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', six.integer_types))
super(AsGML, self).__init__(*expressions, **extra)
class AsKML(AsGML):
def as_sqlite(self, compiler, connection):
# No version parameter
self.source_expressions.pop(0)
return super(AsKML, self).as_sql(compiler, connection)
class AsSVG(GeoFunc):
output_field_class = TextField
def __init__(self, expression, relative=False, precision=8, **extra):
relative = relative if hasattr(relative, 'resolve_expression') else int(relative)
expressions = [
expression,
relative,
self._handle_param(precision, 'precision', six.integer_types),
]
super(AsSVG, self).__init__(*expressions, **extra)
class BoundingCircle(GeoFunc):
def __init__(self, expression, num_seg=48, **extra):
super(BoundingCircle, self).__init__(*[expression, num_seg], **extra)
class Centroid(OracleToleranceMixin, GeoFunc):
pass
class Difference(OracleToleranceMixin, GeoFuncWithGeoParam):
pass
class DistanceResultMixin(object):
def convert_value(self, value, expression, connection, context):
if value is None:
return None
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
if geo_field.geodetic(connection):
dist_att = 'm'
else:
units = geo_field.units_name(connection)
if units:
dist_att = DistanceMeasure.unit_attname(units)
else:
dist_att = None
if dist_att:
return DistanceMeasure(**{dist_att: value})
return value
class Distance(DistanceResultMixin, OracleToleranceMixin, GeoFuncWithGeoParam):
output_field_class = FloatField
spheroid = None
def __init__(self, expr1, expr2, spheroid=None, **extra):
expressions = [expr1, expr2]
if spheroid is not None:
self.spheroid = spheroid
expressions += (self._handle_param(spheroid, 'spheroid', bool),)
super(Distance, self).__init__(*expressions, **extra)
def as_postgresql(self, compiler, connection):
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
src_field = self.get_source_fields()[0]
geography = src_field.geography and self.srid == 4326
if geography:
# Set parameters as geography if base field is geography
for pos, expr in enumerate(
self.source_expressions[self.geom_param_pos + 1:], start=self.geom_param_pos + 1):
if isinstance(expr, GeomValue):
expr.geography = True
elif geo_field.geodetic(connection):
# Geometry fields with geodetic (lon/lat) coordinates need special distance functions
if self.spheroid:
self.function = 'ST_Distance_Spheroid' # More accurate, resource intensive
# Replace boolean param by the real spheroid of the base field
self.source_expressions[2] = Value(geo_field._spheroid)
else:
self.function = 'ST_Distance_Sphere'
return super(Distance, self).as_sql(compiler, connection)
def as_oracle(self, compiler, connection):
if self.spheroid:
self.source_expressions.pop(2)
return super(Distance, self).as_oracle(compiler, connection)
class Envelope(GeoFunc):
pass
class ForceRHR(GeoFunc):
pass
class GeoHash(GeoFunc):
output_field_class = TextField
def __init__(self, expression, precision=None, **extra):
expressions = [expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', six.integer_types))
super(GeoHash, self).__init__(*expressions, **extra)
class Intersection(OracleToleranceMixin, GeoFuncWithGeoParam):
pass
class Length(DistanceResultMixin, OracleToleranceMixin, GeoFunc):
output_field_class = FloatField
def __init__(self, expr1, spheroid=True, **extra):
self.spheroid = spheroid
super(Length, self).__init__(expr1, **extra)
def as_sql(self, compiler, connection):
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
if geo_field.geodetic(connection) and not connection.features.supports_length_geodetic:
raise NotImplementedError("This backend doesn't support Length on geodetic fields")
return super(Length, self).as_sql(compiler, connection)
def as_postgresql(self, compiler, connection):
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
src_field = self.get_source_fields()[0]
geography = src_field.geography and self.srid == 4326
if geography:
self.source_expressions.append(Value(self.spheroid))
elif geo_field.geodetic(connection):
# Geometry fields with geodetic (lon/lat) coordinates need length_spheroid
self.function = 'ST_Length_Spheroid'
self.source_expressions.append(Value(geo_field._spheroid))
else:
dim = min(f.dim for f in self.get_source_fields() if f)
if dim > 2:
self.function = connection.ops.length3d
return super(Length, self).as_sql(compiler, connection)
def as_sqlite(self, compiler, connection):
geo_field = GeometryField(srid=self.srid)
if geo_field.geodetic(connection):
if self.spheroid:
self.function = 'GeodesicLength'
else:
self.function = 'GreatCircleLength'
return super(Length, self).as_sql(compiler, connection)
class MemSize(GeoFunc):
output_field_class = IntegerField
class NumGeometries(GeoFunc):
output_field_class = IntegerField
class NumPoints(GeoFunc):
output_field_class = IntegerField
def as_sqlite(self, compiler, connection):
if self.source_expressions[self.geom_param_pos].output_field.geom_type != 'LINESTRING':
raise TypeError("Spatialite NumPoints can only operate on LineString content")
return super(NumPoints, self).as_sql(compiler, connection)
class Perimeter(DistanceResultMixin, OracleToleranceMixin, GeoFunc):
output_field_class = FloatField
def as_postgresql(self, compiler, connection):
dim = min(f.dim for f in self.get_source_fields())
if dim > 2:
self.function = connection.ops.perimeter3d
return super(Perimeter, self).as_sql(compiler, connection)
class PointOnSurface(OracleToleranceMixin, GeoFunc):
pass
class Reverse(GeoFunc):
pass
class Scale(SQLiteDecimalToFloatMixin, GeoFunc):
def __init__(self, expression, x, y, z=0.0, **extra):
expressions = [
expression,
self._handle_param(x, 'x', NUMERIC_TYPES),
self._handle_param(y, 'y', NUMERIC_TYPES),
]
if z != 0.0:
expressions.append(self._handle_param(z, 'z', NUMERIC_TYPES))
super(Scale, self).__init__(*expressions, **extra)
class SnapToGrid(SQLiteDecimalToFloatMixin, GeoFunc):
def __init__(self, expression, *args, **extra):
nargs = len(args)
expressions = [expression]
if nargs in (1, 2):
expressions.extend(
[self._handle_param(arg, '', NUMERIC_TYPES) for arg in args]
)
elif nargs == 4:
# Reverse origin and size param ordering
expressions.extend(
[self._handle_param(arg, '', NUMERIC_TYPES) for arg in args[2:]]
)
expressions.extend(
[self._handle_param(arg, '', NUMERIC_TYPES) for arg in args[0:2]]
)
else:
raise ValueError('Must provide 1, 2, or 4 arguments to `SnapToGrid`.')
super(SnapToGrid, self).__init__(*expressions, **extra)
class SymDifference(OracleToleranceMixin, GeoFuncWithGeoParam):
pass
class Transform(GeoFunc):
def __init__(self, expression, srid, **extra):
expressions = [
expression,
self._handle_param(srid, 'srid', six.integer_types),
]
super(Transform, self).__init__(*expressions, **extra)
@property
def srid(self):
# Make srid the resulting srid of the transformation
return self.source_expressions[self.geom_param_pos + 1].value
def convert_value(self, value, expression, connection, context):
value = super(Transform, self).convert_value(value, expression, connection, context)
if not connection.ops.postgis and not value.srid:
# Some backends do not set the srid on the returning geometry
value.srid = self.srid
return value
class Translate(Scale):
def as_sqlite(self, compiler, connection):
func_name = connection.ops.spatial_function_name(self.name)
if func_name == 'ST_Translate' and len(self.source_expressions) < 4:
# Always provide the z parameter for ST_Translate (Spatialite >= 3.1)
self.source_expressions.append(Value(0))
elif func_name == 'ShiftCoords' and len(self.source_expressions) > 3:
raise ValueError("This version of Spatialite doesn't support 3D")
return super(Translate, self).as_sqlite(compiler, connection)
class Union(OracleToleranceMixin, GeoFuncWithGeoParam):
pass
| bsd-3-clause |
jgmanzanas/CMNT_004_15 | project-addons/sale_display_stock/report/sale_order_line_report.py | 1 | 4447 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Comunitea All Rights Reserved
# $Jesús Ventosinos Mayor <jesus@comunitea.com>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, tools
class sale_order_line_report(models.Model):
_name = 'sale.order.line.report'
_auto = False
name = fields.Char('Name', readonly=True)
partner_id = fields.Many2one('res.partner', 'Partner', readonly=True)
product_qty = fields.Float('Quantity', readonly=True)
uom = fields.Many2one('product.uom', 'UoM', readonly=True)
price_unit = fields.Float('Price unit', readonly=True)
discount = fields.Float('Discount', readonly=True)
salesman_id = fields.Many2one('res.users', 'Salesperson', readonly=True)
state = fields.Char('State', readonly=True)
product_id = fields.Many2one('product.product', 'Product', readonly=True)
order_id = fields.Many2one('sale.order', 'Order', readonly=True)
qty_kitchen = fields.Float('Qty in kitchen', group_operator="avg",
readonly=True)
qty_stock = fields.Float('Stock qty', group_operator="avg", readonly=True)
company_id = fields.Many2one("res.company", "Company", readonly=True)
def init(self, cr):
tools.drop_view_if_exists(cr, self._table)
cr.execute("""
CREATE or REPLACE VIEW sale_order_line_report as (SELECT sol.id as id,
sol.name as name,
sol.order_partner_id as partner_id,
sol.product_uom_qty as product_qty,
sol.product_uom as uom,
sol.price_unit as price_unit,
sol.discount as discount,
sol.salesman_id as salesman_id,
sol.state as state,
sol.order_id as order_id,
sol.company_id as company_id,
q_kt.product_id,
q_kt.qty AS qty_kitchen,
stck.qty AS qty_stock
FROM sale_order_line sol
LEFT JOIN (SELECT product_id,
Sum(qty) AS qty
FROM stock_quant
WHERE location_id IN (SELECT res_id
FROM ir_model_data
WHERE module = 'location_moves' AND name IN ('stock_location_kitchen','stock_location_pantry')
)
GROUP BY product_id) q_kt
ON sol.product_id = q_kt.product_id
LEFT JOIN (SELECT product_id,
Sum(qty) AS qty
FROM stock_quant
WHERE location_id IN (SELECT loc.id
FROM stock_location loc
INNER JOIN (SELECT parent_left,
parent_right
FROM stock_location
WHERE
id IN (select view_location_id from stock_warehouse))
stock
ON loc.parent_left >=
stock.parent_left
AND loc.parent_right <=
stock.parent_right)
GROUP BY product_id) stck
ON sol.product_id = stck.product_id
WHERE q_kt.qty > 0 and sol.id in (select sale_line_id from procurement_order po where po.state not in ('done', 'cancel'))
GROUP BY sol.id, sol.name, sol.order_partner_id, sol.product_uom_qty,
sol.product_uom, sol.price_unit, sol.discount, sol.company_id,
sol.salesman_id, sol.state, sol.order_id, q_kt.product_id, q_kt.qty, stck.qty)
""")
| agpl-3.0 |
j00bar/ansible | test/units/modules/cloud/openstack/test_os_server.py | 55 | 6456 | import mock
import pytest
import yaml
import inspect
import collections
from ansible.modules.cloud.openstack import os_server
class AnsibleFail(Exception):
pass
class AnsibleExit(Exception):
pass
def params_from_doc(func):
'''This function extracts the docstring from the specified function,
parses it as a YAML document, and returns parameters for the os_server
module.'''
doc = inspect.getdoc(func)
cfg = yaml.load(doc)
for task in cfg:
for module, params in task.items():
for k, v in params.items():
if k in ['nics'] and type(v) == str:
params[k] = [v]
task[module] = collections.defaultdict(str,
params)
return cfg[0]['os_server']
class FakeCloud (object):
ports = [
{'name': 'port1', 'id': '1234'},
{'name': 'port2', 'id': '4321'},
]
networks = [
{'name': 'network1', 'id': '5678'},
{'name': 'network2', 'id': '8765'},
]
images = [
{'name': 'cirros', 'id': '1'},
{'name': 'fedora', 'id': '2'},
]
flavors = [
{'name': 'm1.small', 'id': '1', 'flavor_ram': 1024},
{'name': 'm1.tiny', 'id': '2', 'flavor_ram': 512},
]
def _find(self, source, name):
for item in source:
if item['name'] == name or item['id'] == name:
return item
def get_image_id(self, name, exclude=None):
image = self._find(self.images, name)
if image:
return image['id']
def get_flavor(self, name):
return self._find(self.flavors, name)
def get_flavor_by_ram(self, ram, include=None):
for flavor in self.flavors:
if flavor['ram'] >= ram and (include is None or include in
flavor['name']):
return flavor
def get_port(self, name):
return self._find(self.ports, name)
def get_network(self, name):
return self._find(self.networks, name)
create_server = mock.MagicMock()
class TestNetworkArgs(object):
'''This class exercises the _network_args function of the
os_server module. For each test, we parse the YAML document
contained in the docstring to retrieve the module parameters for the
test.'''
def setup_method(self, method):
self.cloud = FakeCloud()
self.module = mock.MagicMock()
self.module.params = params_from_doc(method)
def test_nics_string_net_id(self):
'''
- os_server:
nics: net-id=1234
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '1234')
def test_nics_string_net_id_list(self):
'''
- os_server:
nics: net-id=1234,net-id=4321
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '1234')
assert(args[1]['net-id'] == '4321')
def test_nics_string_port_id(self):
'''
- os_server:
nics: port-id=1234
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['port-id'] == '1234')
def test_nics_string_net_name(self):
'''
- os_server:
nics: net-name=network1
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '5678')
def test_nics_string_port_name(self):
'''
- os_server:
nics: port-name=port1
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['port-id'] == '1234')
def test_nics_structured_net_id(self):
'''
- os_server:
nics:
- net-id: '1234'
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '1234')
def test_nics_structured_mixed(self):
'''
- os_server:
nics:
- net-id: '1234'
- port-name: port1
- 'net-name=network1,port-id=4321'
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '1234')
assert(args[1]['port-id'] == '1234')
assert(args[2]['net-id'] == '5678')
assert(args[3]['port-id'] == '4321')
class TestCreateServer(object):
def setup_method(self, method):
self.cloud = FakeCloud()
self.module = mock.MagicMock()
self.module.params = params_from_doc(method)
self.module.fail_json.side_effect = AnsibleFail()
self.module.exit_json.side_effect = AnsibleExit()
self.meta = mock.MagicMock()
self.meta.gett_hostvars_from_server.return_value = {
'id': '1234'
}
os_server.meta = self.meta
def test_create_server(self):
'''
- os_server:
image: cirros
flavor: m1.tiny
nics:
- net-name: network1
meta:
- key: value
'''
with pytest.raises(AnsibleExit):
os_server._create_server(self.module, self.cloud)
assert(self.cloud.create_server.call_count == 1)
assert(self.cloud.create_server.call_args[1]['image']
== self.cloud.get_image_id('cirros'))
assert(self.cloud.create_server.call_args[1]['flavor']
== self.cloud.get_flavor('m1.tiny')['id'])
assert(self.cloud.create_server.call_args[1]['nics'][0]['net-id']
== self.cloud.get_network('network1')['id'])
def test_create_server_bad_flavor(self):
'''
- os_server:
image: cirros
flavor: missing_flavor
nics:
- net-name: network1
'''
with pytest.raises(AnsibleFail):
os_server._create_server(self.module, self.cloud)
assert('missing_flavor' in
self.module.fail_json.call_args[1]['msg'])
def test_create_server_bad_nic(self):
'''
- os_server:
image: cirros
flavor: m1.tiny
nics:
- net-name: missing_network
'''
with pytest.raises(AnsibleFail):
os_server._create_server(self.module, self.cloud)
assert('missing_network' in
self.module.fail_json.call_args[1]['msg'])
| gpl-3.0 |
gevannmullins/linux_server | add_items.py | 1 | 4498 | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Category, Base, Item, User
engine = create_engine('postgresql://catalog:password@localhost/catalog')
# engine = create_engine('sqlite:///catalog.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
# Create dummy user
User1 = User(name="Caron Mullins", email="caronmullins2016@gmail.com", picture='https://pbs.twimg.com/profile_images/2671170543/18debd694829ed78203a5a36dd364160_400x400.png')
session.add(User1)
session.commit()
# Category 1 with 3 items
category1 = Category(user_id=1, name="Soccer", image="http://neobasketball.com/img/bballcourt.jpg")
session.add(category1)
session.commit()
item1 = Item(user_id=1, name="Soccer Ball", description="Soccer balls for practicing and match games.", category=category1)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Soccer Boots", description="Soccer boots to maxumise gameplay", category=category1)
session.add(item2)
session.commit()
item3 = Item(user_id=1, name="Whistles", description="Whistles for training sessions.", category=category1)
session.add(item3)
session.commit()
# Next Categories and its items
category2 = Category(user_id=1, name="Basketball", image="http://neobasketball.com/img/bballcourt.jpg")
session.add(category2)
session.commit()
item1 = Item(user_id=1, name="Crew Socks", description="Stretchy ribbed socks extending to mid calf", category_id = category2.id)
session.add(item1)
session.commit()
# Categories 3
category3 = Category(user_id=1, name="Baseball", image="http://totalsportscomplex.com/wp-content/uploads/2014/09/baseball-pic.jpg")
session.add(category3)
session.commit()
item1 = Item(user_id=1, name="Crew Socks", description="Stretchy ribbed socks extending to mid calf", category_id = category3.id)
session.add(item1)
session.commit()
# Categories 4
category4 = Category(user_id=1, name="Frisbee", image="http://uvmbored.com/wp-content/uploads/2015/10/how_the_frisbee_took_flight.jpg")
session.add(category4)
session.commit()
item1 = Item(user_id=1, name="Flying Disc", description="A Flying disc or a Flying Saucer", category_id = category4.id)
session.add(item1)
session.commit()
# Categories 5
category5 = Category(user_id=1, name="Snowboarding", image="https://pantherfile.uwm.edu/collins9/www/finalproject5/Project_5/snowboarding3.jpg")
session.add(category5)
session.commit()
item1 = Item(user_id=1, name="Snowboard", description="Wooden board suitable to glide on snow", category_id = category5.id)
session.add(item1)
session.commit()
item2 = Item(user_id=1, name="Goggles", description="Anit-glare protective safety glasses",category_id = category5.id)
session.add(item2)
session.commit()
# Categories 6
category6 = Category(user_id=1, name="Rock Climbing", image="http://asme.berkeley.edu/wordpress/wp-content/uploads/2013/11/Rock-Climbing-Wallpaper-HD.jpg")
session.add(category6)
session.commit()
item1 = Item(user_id=1, name="Shoes", description="Superior performance shoew wtih excellent grip", category_id = category6.id)
session.add(item1)
session.commit()
# Categories 7
category7 = Category(user_id=1, name="Skating", image="http://www.ocasia.org/Images-OCA/During-the-Roller-Skating-XXX-contest-between-XXX-_53834132011574.jpg")
session.add(category7)
session.commit()
item1 = Item(user_id=1, name="Skates", description="Roller skates with bearing suitable for beginner and advanced skater", category_id = category7.id)
session.add(item1)
session.commit()
# Categories 8
category8 = Category(user_id=1, name="Hockey", image="http://www.picture-newsletter.com/street-hockey/street-hockey-39.jpg")
session.add(category8)
session.commit()
item1 = Item(user_id=1, name="Stick", description="Composite Stick favorable for both ice and street hockey", category_id = category8.id)
session.add(item1)
session.commit()
print "added menu items!"
| mit |
alphagov/backdrop | tests/read/test_parse_request_args.py | 1 | 5035 | from datetime import datetime
import re
import unittest
from hamcrest import assert_that, is_, has_item
import pytz
from werkzeug.datastructures import MultiDict
from backdrop.read.query import parse_request_args
class Test_parse_request_args(unittest.TestCase):
def test_start_at_is_parsed(self):
request_args = MultiDict([
("start_at", "2012-12-12T08:12:43+00:00")])
args = parse_request_args(request_args)
assert_that(args['start_at'], is_(
datetime(2012, 12, 12, 8, 12, 43, tzinfo=pytz.UTC)))
def test_first_start_at_is_used(self):
request_args = MultiDict([
("start_at", "2012-12-12T08:12:43+00:00"),
("start_at", "2012-12-13T08:12:43+00:00"),
])
args = parse_request_args(request_args)
assert_that(args['start_at'], is_(
datetime(2012, 12, 12, 8, 12, 43, tzinfo=pytz.UTC)))
def test_end_at_is_parsed(self):
request_args = MultiDict([
("end_at", "2012-12-12T08:12:43+00:00")])
args = parse_request_args(request_args)
assert_that(args['end_at'], is_(
datetime(2012, 12, 12, 8, 12, 43, tzinfo=pytz.UTC)))
def test_first_end_at_is_used(self):
request_args = MultiDict([
("end_at", "2012-12-12T08:12:43+00:00"),
("end_at", "2012-12-13T08:12:43+00:00"),
])
args = parse_request_args(request_args)
assert_that(args['end_at'], is_(
datetime(2012, 12, 12, 8, 12, 43, tzinfo=pytz.UTC)))
def test_one_filter_by_is_parsed(self):
request_args = MultiDict([
("filter_by", "foo:bar")])
args = parse_request_args(request_args)
assert_that(args['filter_by'], has_item(["foo", "bar"]))
def test_many_filter_by_are_parsed(self):
request_args = MultiDict([
("filter_by", "foo:bar"),
("filter_by", "bar:foo")
])
args = parse_request_args(request_args)
assert_that(args['filter_by'], has_item(["foo", "bar"]))
assert_that(args['filter_by'], has_item(["bar", "foo"]))
def test_build_query_with_boolean_value(self):
request_args = MultiDict([
("filter_by", "planet:true"),
("filter_by", "star:false"),
])
args = parse_request_args(request_args)
assert_that(args['filter_by'], has_item([ "planet", True ]))
assert_that(args['filter_by'], has_item([ "star", False ]))
def test_one_filter_by_prefix_is_parsed(self):
request_args = MultiDict([
("filter_by_prefix", "foo:/hello/world")])
args = parse_request_args(request_args)
assert_that(args['filter_by_prefix'],
has_item(["foo", '/hello/world']))
def test_many_filter_by_are_parsed(self):
request_args = MultiDict([
("filter_by_prefix", "foo:bar"),
("filter_by_prefix", "bar:foo")
])
args = parse_request_args(request_args)
assert_that(args['filter_by_prefix'], has_item(["foo", 'bar']))
assert_that(args['filter_by_prefix'], has_item(["bar", 'foo']))
def test_group_by_is_passed_through_untouched(self):
request_args = MultiDict([("group_by", "foobar")])
args = parse_request_args(request_args)
assert_that(args['group_by'], is_(['foobar']))
def test_sort_is_parsed(self):
request_args = MultiDict([
("sort_by", "foo:ascending")])
args = parse_request_args(request_args)
assert_that(args['sort_by'], is_(["foo", "ascending"]))
def test_sort_will_use_first_argument_only(self):
request_args = MultiDict([
("sort_by", "foo:descending"),
("sort_by", "foo:ascending"),
])
args = parse_request_args(request_args)
assert_that(args['sort_by'], is_(["foo", "descending"]))
def test_limit_is_parsed(self):
request_args = MultiDict([
("limit", "123")
])
args = parse_request_args(request_args)
assert_that(args['limit'], is_(123))
def test_one_collect_is_parsed_with_default_method(self):
request_args = MultiDict([
("collect", "some_key")
])
args = parse_request_args(request_args)
assert_that(args['collect'], is_([("some_key", "default")]))
def test_two_collects_are_parsed_with_default_methods(self):
request_args = MultiDict([
("collect", "some_key"),
("collect", "some_other_key")
])
args = parse_request_args(request_args)
assert_that(args['collect'], is_([("some_key", "default"),
("some_other_key", "default")]))
def test_one_collect_is_parsed_with_custom_method(self):
request_args = MultiDict([
("collect", "some_key:mean")
])
args = parse_request_args(request_args)
assert_that(args['collect'], is_([("some_key", "mean")]))
| mit |
imgrant/fit2tcx | fit2tcx.py | 1 | 40690 | #!/usr/bin/env python
#
# fit2tcx - convert a FIT file to a TCX file
#
# Copyright (c) 2012, Gustav Tiger <gustav@tiger.name> [https://github.com/Tigge/FIT-to-TCX/]
# Copyright (c) 2014-2016, Ian Grant <ian@iangrant.me> [https://github.com/imgrant/fit2tcx]
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
__version__ = "1.6"
import sys
import copy
import contextlib
import argparse
import lxml.etree
from datetime import datetime, timedelta
from pytz import timezone, utc
from tzwhere import tzwhere
from geopy.distance import GreatCircleDistance
from fitparse import FitFile, FitParseError
"""
Limit values for error checking on speed & distance calculations
"""
# Speed and distance calculated from GPS will be ignored
# for trackpoints where the acceleration from the last
# point is above this threshold (in m/s^2)
MAX_ACCELERATION = 3.0
"""
FIT to TCX values mapping
"""
LAP_TRIGGER_MAP = {
"manual": "Manual",
"time": "Time",
"distance": "Distance",
"position_start": "Location",
"position_lap": "Location",
"position_waypoint": "Location",
"position_marked": "Location",
"session_end": "Manual",
"fitness_equipment": "Manual"}
INTENSITY_MAP = {
"active": "Active",
"warmup": "Active",
"cooldown": "Active",
"rest": "Resting",
None: "Active"}
PRODUCT_MAP = {
0: "Unknown",
255: "Run Trainer 2.0", # Timex
# Garmin products:
1: "Garmin Connect API", # Also HRM1
2: "AXH01",
2: "AXH01",
4: "AXB02",
5: "HRM2SS",
6: "DSI_ALF02",
473: "Forerunner 301",
474: "Forerunner 301",
475: "Forerunner 301",
494: "Forerunner 301",
717: "Forerunner 405",
987: "Forerunner 405",
782: "Forerunner 50",
988: "Forerunner 60",
1011: "DSI_ALF01",
1018: "Forerunner 310XT",
1446: "Forerunner 310XT",
1036: "Edge 500",
1199: "Edge 500",
1213: "Edge 500",
1387: "Edge 500",
1422: "Edge 500",
1124: "Forerunner 110",
1274: "Forerunner 110",
1169: "Edge 800",
1333: "Edge 800",
1334: "Edge 800",
1497: "Edge 800",
1386: "Edge 800",
1253: "Chirp",
1325: "Edge 200",
1555: "Edge 200",
1328: "Forerunner 910XT",
1537: "Forerunner 910XT",
1600: "Forerunner 910XT",
1664: "Forerunner 910XT",
1765: "Forerunner 920XT",
1341: "ALF04",
1345: "Forerunner 610",
1410: "Forerunner 610",
1360: "Forerunner 210",
1436: "Forerunner 70",
1461: "AMX",
1482: "Forerunner 10",
1688: "Forerunner 10",
1499: "Swim",
1551: "Fenix",
1967: "Fenix 2",
1561: "Edge 510",
1742: "Edge 510",
1821: "Edge 510",
1567: "Edge 810",
1721: "Edge 810",
1822: "Edge 810",
1823: "Edge 810",
1836: "Edge 1000",
1570: "Tempe",
1735: "VIRB Elite",
1736: "Edge Touring",
1752: "HRM Run",
10007: "SDM4",
20119: "Training Center",
1623: "Forerunner 620",
2431: "Forerunner 235"}
"""
TCX schema and namespace values
"""
TCD_NAMESPACE = "http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2"
TCD = "{%s}" % TCD_NAMESPACE
XML_SCHEMA_NAMESPACE = "http://www.w3.org/2001/XMLSchema-instance"
XML_SCHEMA = "{%s}" % XML_SCHEMA_NAMESPACE
SCHEMA_LOCATION = \
"http://www.garmin.com/xmlschemas/ActivityExtension/v2 " + \
"http://www.garmin.com/xmlschemas/ActivityExtensionv2.xsd " + \
"http://www.garmin.com/xmlschemas/FatCalories/v1 " + \
"http://www.garmin.com/xmlschemas/fatcalorieextensionv1.xsd " + \
"http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2 " + \
"http://www.garmin.com/xmlschemas/TrainingCenterDatabasev2.xsd"
NSMAP = {
None: TCD_NAMESPACE,
"xsi": XML_SCHEMA_NAMESPACE}
# Class and context manager to suppress stdout for use with tzwhere.
class DummyFile(object):
def write(self, x): pass
@contextlib.contextmanager
def nostdout():
save_stdout = sys.stdout
sys.stdout = DummyFile()
yield
sys.stdout = save_stdout
class MyDataProcessor(object):
"""
Custom units data processor for FIT object
"""
def process_type_bool(self, field_data):
if field_data.value is not None:
field_data.value = bool(field_data.value)
def process_type_date_time(self, field_data):
value = field_data.value
if value is not None and value >= 0x10000000:
dt = datetime.utcfromtimestamp(631065600 + value)
field_data.value = utc.normalize(dt.replace(tzinfo=utc))
field_data.units = None # Units were 's', set to None
def process_type_local_date_time(self, field_data):
if field_data.value is not None:
dt = datetime.fromtimestamp(631065600 + field_data.value)
field_data.value = utc.normalize(dt.replace(tzinfo=utc))
field_data.units = None
def process_units_semicircles(self, field_data):
if field_data.value is not None:
field_data.value *= 180.0 / (2**31)
field_data.units = 'deg'
class TZDataProcessor(MyDataProcessor):
"""
Extra data processor layer for working with timezones.
For the Timex Run Trainer 2.0, date-times claim to be UTC (as per the FIT
format spec), but are actually an (unknown) local timezone.
If the data processor is called with a lat,lon point, we look up the true
timezone and re-normalize date-times to UTC.
Otherwise, if the data processor is called with a timezone name (defaults
to UTC, i.e. no difference), we use that and re-normalize.
"""
def __init__(self, lat=None, lon=None, tzname="UTC"):
if lat is not None and lon is not None:
with nostdout():
w = tzwhere.tzwhere()
self.tz = timezone(w.tzNameAt(lat, lon))
else:
self.tz = timezone(tzname)
def process_type_date_time(self, field_data):
value = field_data.value
if value is not None and value >= 0x10000000:
dt = datetime.utcfromtimestamp(631065600 + value)
dt = self.tz.localize(dt)
field_data.value = utc.normalize(dt)
field_data.units = None # Units were 's', set to None
def process_type_local_date_time(self, field_data):
if field_data.value is not None:
dt = datetime.fromtimestamp(631065600 + field_data.value)
dt = self.tz.localize(dt)
field_data.value = utc.normalize(dt)
field_data.units = None # Units were 's', set to None
def iso_Z_format(dt):
iso = dt.isoformat()
z_iso = iso.replace("+00:00", "Z")
return z_iso
def sum_distance(activity,
start_time=datetime(1899, 1, 1, 0, 0, 1, tzinfo=utc),
end_time=datetime(2189, 12, 31, 23, 59, 59, tzinfo=utc)):
"""
Calculate distance from GPS data for an activity
"""
# First build tps array (using timestamp as the index)
# in order to coalesce values at the same timepoint
# under a single trackpoint element
tps = {}
fit_epoch = datetime(1989, 12, 31, 0, 0, 0, tzinfo=utc)
for trackpoint in activity.get_messages('record'):
tts = trackpoint.get_value("timestamp")
tsi = int((tts - fit_epoch).total_seconds())
if tps.get(tsi) is None:
tps[tsi] = {
'timestamp': tts,
'distance': None,
'position_lat': None,
'position_long': None}
for var in ['distance',
'position_lat',
'position_long']:
if trackpoint.get_value(var) is not None:
tps[tsi][var] = trackpoint.get_value(var)
# For mid-activity laps, iterate through trackpoints to
# grab the first point before the start of the lap, also
# delete points that are not part of the lap
prev = None
for timestamp in sorted(tps, reverse=True):
tp = tps[timestamp]
if tp['timestamp'] < start_time and prev is None:
prev = copy.copy(tp)
if tp['timestamp'] < start_time or tp['timestamp'] > end_time:
del tps[timestamp]
# Then loop over tps array to calculate cumulative point-to-point
# distance from GPS data. Existing distance data (e.g. from footpod)
# is used when there is no GPS position available or it is bad.
distance = 0.0
for timestamp in sorted(tps):
tp = tps[timestamp]
if prev is not None:
if prev['distance'] is None:
prev_dist = 0
else:
prev_dist = prev['distance']
if not None in (tp['position_lat'],
tp['position_long'],
prev['position_lat'],
prev['position_long']):
try:
tp_timedelta = (tp['timestamp'] -
prev['timestamp']).total_seconds()
gps_dist = GreatCircleDistance(
(tp['position_lat'],
tp['position_long']),
(prev['position_lat'],
prev['position_long'])
).meters
gps_speed = (gps_dist / tp_timedelta)
# Fallback to existing distance/speed stream data
# if the GPS data looks erroneous (acceleration test)
if (gps_speed / tp_timedelta) > MAX_ACCELERATION:
gps_dist = tp['distance'] - prev_dist
except:
# Fallback to existing distance stream data on error
gps_dist = tp['distance'] - prev_dist
else:
# Fallback to existing distance stream data if no GPS coords
gps_dist = tp['distance'] - prev_dist
distance += gps_dist
prev = tp
return distance
def create_element(tag, text=None, namespace=None):
"""Create a free element"""
namespace = NSMAP[namespace]
tag = "{%s}%s" % (namespace, tag)
element = lxml.etree.Element(tag, nsmap=NSMAP)
if text is not None:
element.text = text
return element
def create_sub_element(parent, tag, text=None, namespace=None):
"""Create an element as a child of an existing given element"""
element = create_element(tag, text, namespace)
parent.append(element)
return element
def create_document():
"""Create a TCX XML document"""
document = create_element("TrainingCenterDatabase")
document.set(XML_SCHEMA + "schemaLocation", SCHEMA_LOCATION)
document = lxml.etree.ElementTree(document)
return document
def add_author(document):
"""Add author element (TCX writer) to TCX"""
author = create_sub_element(document.getroot(), "Author")
author.set(XML_SCHEMA + "type", "Application_t")
create_sub_element(author, "Name", "fit2tcx Converter")
build = create_sub_element(author, "Build")
version = create_sub_element(build, "Version")
vMajor, vMinor = tuple(map(int, (__version__.split("."))))
create_sub_element(version, "VersionMajor", str(vMajor))
create_sub_element(version, "VersionMinor", str(vMinor))
create_sub_element(version, "BuildMajor", "0")
create_sub_element(version, "BuildMinor", "0")
create_sub_element(author, "LangID", "en")
create_sub_element(author, "PartNumber", "000-00000-00")
def add_creator(element, manufacturer, product_name, product_id, serial):
"""Add creator element (recording device) to TCX activity"""
creator = create_sub_element(element, "Creator")
creator.set(XML_SCHEMA + "type", "Device_t")
create_sub_element(creator, "Name", manufacturer + " " + product_name)
unitID = int(serial or 0)
create_sub_element(creator, "UnitId", str(unitID))
# Set ProductID to 0 for non-Garmin devices
if manufacturer != "Garmin":
product_id = 0
create_sub_element(creator, "ProductID", str(product_id))
version = create_sub_element(creator, "Version")
create_sub_element(version, "VersionMajor", "0")
create_sub_element(version, "VersionMinor", "0")
create_sub_element(version, "BuildMajor", "0")
create_sub_element(version, "BuildMinor", "0")
def add_notes(element, text):
"""Add notes element to TCX activity"""
create_sub_element(element, "Notes", text)
def add_trackpoint(element, trackpoint, sport):
"""Create a trackpoint element"""
timestamp = trackpoint['timestamp']
pos_lat = trackpoint['position_lat']
pos_long = trackpoint['position_long']
distance = trackpoint['distance']
altitude = trackpoint['altitude']
speed = trackpoint['speed']
heart_rate = trackpoint['heart_rate']
cadence = trackpoint['cadence']
create_sub_element(element, "Time", iso_Z_format(timestamp))
if pos_lat is not None and pos_long is not None:
pos = create_sub_element(element, "Position")
create_sub_element(pos, "LatitudeDegrees", "{:.6f}".format(pos_lat))
create_sub_element(pos, "LongitudeDegrees", "{:.6f}".format(pos_long))
if altitude is not None:
create_sub_element(element, "AltitudeMeters", str(altitude))
if distance is not None:
create_sub_element(element, "DistanceMeters", str(distance))
if heart_rate is not None:
heartrateelem = create_sub_element(element, "HeartRateBpm")
heartrateelem.set(XML_SCHEMA + "type", "HeartRateInBeatsPerMinute_t")
create_sub_element(heartrateelem, "Value", str(heart_rate))
if speed is not None or cadence is not None:
if cadence is not None and sport == "Biking":
# Bike cadence is stored in main trackpoint element,
# not an extension, unlike running cadence (below)
create_sub_element(element, "Cadence", str(cadence))
exelem = create_sub_element(element, "Extensions")
tpx = create_sub_element(exelem, "TPX")
tpx.set("xmlns",
"http://www.garmin.com/xmlschemas/ActivityExtension/v2")
if speed is not None:
create_sub_element(tpx, "Speed", str(speed))
if cadence is not None:
if sport == "Running":
tpx.set("CadenceSensor", "Footpod")
create_sub_element(tpx, "RunCadence", str(cadence))
elif sport == "Biking":
tpx.set("CadenceSensor", "Bike")
def add_lap(element,
activity,
lap,
sport,
dist_recalc,
speed_recalc,
calibrate,
current_cal_factor,
per_lap_cal,
fixed_distance,
activity_scaling_factor,
total_cumulative_distance):
"""Add a lap element to a TCX document"""
# Only process laps with timestamps - this serves as a workaround for
# extra fake/empty laps in FIT files from the Timex Run Trainer 2.0
if lap.get_value('timestamp') is not None:
lap_num = lap.get_value("message_index") + 1
start_time = lap.get_value("start_time")
end_time = lap.get_value("timestamp")
totaltime = lap.get_value("total_elapsed_time")
stored_distance = lap.get_value("total_distance")
calculated_distance = sum_distance(activity, start_time, end_time)
if fixed_distance is not None:
reference_distance = fixed_distance
else:
reference_distance = calculated_distance
try:
lap_scaling_factor = reference_distance / stored_distance
except ZeroDivisionError:
lap_scaling_factor = 1.00
if calibrate and per_lap_cal:
scaling_factor = lap_scaling_factor
else:
scaling_factor = activity_scaling_factor
max_speed = lap.get_value("max_speed")
avg_speed = lap.get_value("avg_speed")
calories = lap.get_value("total_calories")
avg_heart = lap.get_value("avg_heart_rate")
max_heart = lap.get_value("max_heart_rate")
intensity = INTENSITY_MAP[lap.get_value("intensity")]
avg_cadence = lap.get_value("avg_cadence")
max_cadence = lap.get_value("max_cadence")
if lap.get_value("lap_trigger"):
triggermet = LAP_TRIGGER_MAP[lap.get_value("lap_trigger")]
else:
triggermet = LAP_TRIGGER_MAP["manual"]
lapelem = create_sub_element(element, "Lap")
lapelem.set("StartTime", iso_Z_format(start_time))
#
# TotalTimeSeconds
#
create_sub_element(lapelem, "TotalTimeSeconds", str("%d" % totaltime))
#
# DistanceMeters
#
lap_dist_elem = create_sub_element(lapelem,
"DistanceMeters",
str("%d" % stored_distance)
)
#
# MaximumSpeed
#
lap_max_spd_elem = create_sub_element(lapelem,
"MaximumSpeed",
str("%.3f" % max_speed))
#
# Calories
#
create_sub_element(lapelem, "Calories", str("%d" % calories))
#
# AverageHeartRateBpm
#
if avg_heart is not None:
heartrateelem = create_sub_element(lapelem, "AverageHeartRateBpm")
heartrateelem.set(
XML_SCHEMA + "type", "HeartRateInBeatsPerMinute_t")
create_sub_element(heartrateelem, "Value", str("%d" % avg_heart))
#
# MaximumHeartRateBpm
#
if max_heart is not None:
heartrateelem = create_sub_element(lapelem, "MaximumHeartRateBpm")
heartrateelem.set(
XML_SCHEMA + "type", "HeartRateInBeatsPerMinute_t")
create_sub_element(heartrateelem, "Value", str("%d" % max_heart))
#
# Intensity
#
create_sub_element(lapelem, "Intensity", intensity)
#
# Cadence (bike)
#
if avg_speed or avg_cadence or max_cadence:
if sport == "Biking" and avg_cadence is not None:
# Average bike cadence is stored in main lap element,
# not as an extension, unlike average running cadence (below)
create_sub_element(lapelem, "Cadence", str("%d" % avg_cadence))
#
# TriggerMethod
#
create_sub_element(lapelem, "TriggerMethod", triggermet)
if dist_recalc:
distance_used = calculated_distance
elif calibrate:
if fixed_distance is not None:
distance_used = fixed_distance
else:
distance_used = stored_distance * scaling_factor
else:
distance_used = stored_distance
#
# Track
#
trackelem = create_sub_element(lapelem, "Track")
# First build tps array (using timestamp as the index)
# in order to coalesce values at the same timepoint
# under a single trackpoint element
tps = {}
fit_epoch = datetime(1989, 12, 31).replace(tzinfo=utc)
for trackpoint in activity.get_messages('record'):
tts = trackpoint.get_value("timestamp")
tsi = int((tts - fit_epoch).total_seconds())
if tps.get(tsi) is None:
tps[tsi] = {
'timestamp': tts,
'cadence': None,
'distance': None,
'position_lat': None,
'position_long': None,
'heart_rate': None,
'altitude': None,
'speed': None}
for var in ['cadence',
'distance',
'position_lat',
'position_long',
'heart_rate',
'altitude',
'speed']:
if trackpoint.get_value(var) is not None:
tps[tsi][var] = trackpoint.get_value(var)
# Iterate through all trackpoints to grab the first point before the
# start of the lap, then delete points that are not part of the lap
prev = None
for timestamp in sorted(tps, reverse=True):
tp = tps[timestamp]
if tp['timestamp'] < start_time and prev is None:
prev = copy.copy(tp)
if tp['timestamp'] < start_time or tp['timestamp'] > end_time:
del tps[timestamp]
# Then process all trackpoints for this lap, recalculating speed &
# distance from GPS and adjusting if requested, before adding element
stored_avg_speed = copy.copy(avg_speed)
stored_max_speed = copy.copy(max_speed)
distance = 0.0
max_speed = 0.0
tp_speed = None
for timestamp in sorted(tps):
tp = tps[timestamp]
trackpointelem = create_sub_element(trackelem, "Trackpoint")
if prev is not None:
if prev['distance'] is None:
prev['distance'] = 0
try:
tp_timedelta = (tp['timestamp'] -
prev['timestamp']).total_seconds()
gps_dist = GreatCircleDistance(
(tp['position_lat'],
tp['position_long']),
(prev['position_lat'],
prev['position_long'])
).meters
gps_speed = (gps_dist / tp_timedelta)
# Fallback to existing distance/speed stream data
# if the GPS data looks erroneous (acceleration test)
if (gps_speed / tp_timedelta) > MAX_ACCELERATION:
gps_speed = tp['speed']
gps_dist = tp['distance'] - prev['distance']
except:
gps_speed = tp['speed']
gps_dist = tp['distance'] - prev['distance']
if dist_recalc:
tp_dist = gps_dist
elif calibrate:
tp_dist = (
tp['distance'] - prev['distance']) * scaling_factor
else:
tp_dist = tp['distance'] - prev['distance']
try:
if speed_recalc:
tp_speed = gps_speed
elif calibrate:
tp_speed = tp['speed'] * scaling_factor
else:
tp_speed = tp['speed']
total_cumulative_distance += tp_dist
distance += tp_dist
if tp_speed > max_speed:
max_speed = tp_speed
except TypeError:
tp_speed = None
# Store previous trackpoint before changing the current one
prev = copy.copy(tp)
# Adjust trackpoint distance & speed values if requested
if ((dist_recalc or calibrate)
and tp['distance'] is not None
and total_cumulative_distance is not None):
tp['distance'] = "{:.1f}".format(total_cumulative_distance)
if ((speed_recalc or calibrate)
and tp['speed'] is not None
and tp_speed is not None):
tp['speed'] = "{:.3f}".format(tp_speed)
# Add trackpoint element
add_trackpoint(trackpointelem, tp, sport)
#
# Notes
#
if fixed_distance is not None:
precision_str = ("; known distance: {ref_dist:.3f} km "
"(FIT precision: {fit_precision:.1f}%; "
"GPS/footpod precision: {gps_precision:.1f}%)")
reference = "known distance"
else:
precision_str = " (precision: {precision:.1f}%)"
reference = "GPS/footpod"
try:
fit_precision_calc = (1 - (abs(reference_distance -
stored_distance) /
reference_distance)) * 100
gps_precision_calc = (1 - (abs(reference_distance -
calculated_distance) /
reference_distance)) * 100
precision_calc = (1 - (abs(calculated_distance -
stored_distance) /
calculated_distance)) * 100
except ZeroDivisionError:
fit_precision_calc = 100
gps_precision_calc = 100
precision_calc = 100
notes = ("Lap {lap_number:d}: {distance_used:.3f} km in {total_time!s}\n"
"Distance in FIT file: {fit_dist:.3f} km; "
"calculated via GPS/footpod: {gps_dist:.3f} km"
+ precision_str + "\n"
"Footpod calibration factor setting: {old_cf:.1f}%; "
"new factor based on {reference} for this lap: {new_cf:.1f}%"
).format(lap_number=lap_num,
distance_used=distance_used / 1000,
total_time=timedelta(seconds=int(totaltime)),
fit_dist=stored_distance / 1000,
gps_dist=calculated_distance / 1000,
ref_dist=reference_distance / 1000,
fit_precision=fit_precision_calc,
gps_precision=gps_precision_calc,
precision=precision_calc,
old_cf=current_cal_factor,
reference=reference,
new_cf=lap_scaling_factor * current_cal_factor)
add_notes(lapelem, notes)
#
# Extensions (AvgSpeed, AvgRunCadence, MaxRunCadence, MaxBikeCadence)
#
if not all(var is None for var in (avg_speed, avg_cadence, max_cadence)):
exelem = create_sub_element(lapelem, "Extensions")
lx = create_sub_element(exelem, "LX")
lx.set("xmlns",
"http://www.garmin.com/xmlschemas/ActivityExtension/v2")
if avg_speed is not None:
lap_avg_spd_elem = create_sub_element(lx,
"AvgSpeed",
str("%.3f" % avg_speed))
if avg_cadence is not None and sport == "Running":
create_sub_element(lx,
"AvgRunCadence",
str("%d" % avg_cadence))
if max_cadence is not None:
if sport == "Running":
create_sub_element(lx,
"MaxRunCadence",
str("%d" % max_cadence))
elif sport == "Biking":
create_sub_element(lx,
"MaxBikeCadence",
str("%d" % max_cadence))
# Adjust overall lap distance & speed values if required
if calibrate:
# Manual distance:
if fixed_distance is not None:
lap_dist_elem.text = "{:d}".format(int(fixed_distance))
lap_avg_spd_elem.text = "{:.3f}".format(
fixed_distance / totaltime)
else:
lap_dist_elem.text = "{:d}".format(
int(stored_distance * scaling_factor))
lap_avg_spd_elem.text = "{:.3f}".format(
stored_avg_speed * scaling_factor)
lap_max_spd_elem.text = "{:.3f}".format(
stored_max_speed * scaling_factor)
# GPS recalculation options override calibration:
if dist_recalc:
lap_dist_elem.text = "{:d}".format(int(distance))
if speed_recalc:
lap_avg_spd_elem.text = "{:.3f}".format(distance / totaltime)
lap_max_spd_elem.text = "{:.3f}".format(max_speed)
return distance
else:
return 0
def add_activity(element,
session,
activity,
dist_recalc,
speed_recalc,
calibrate,
current_cal_factor,
per_lap_cal,
manual_lap_distance,
activity_scaling_factor):
"""Add an activity to a TCX document"""
# Sport type
sport = session.get_value("sport")
sport_mapping = {"running": "Running", "cycling": "Biking"}
sport = sport_mapping[sport] if sport in sport_mapping else "Other"
actelem = create_sub_element(element, "Activity")
actelem.set("Sport", sport)
create_sub_element(actelem,
"Id",
iso_Z_format(session.get_value("start_time")))
total_cumulative_distance = 0.0
lap_num = 0
for lap in activity.get_messages('lap'):
if lap.get_value("start_time") == lap.get_value("timestamp"):
continue # skip very short laps that won't have any data
if manual_lap_distance is not None:
try:
fixed_dist = manual_lap_distance[lap_num]
except IndexError:
fixed_dist = None
else:
fixed_dist = None
lap_dist = add_lap(actelem,
activity,
lap,
sport,
dist_recalc,
speed_recalc,
calibrate,
current_cal_factor,
per_lap_cal,
fixed_dist,
activity_scaling_factor,
total_cumulative_distance)
total_cumulative_distance += lap_dist
lap_num += 1
return (actelem, total_cumulative_distance)
def convert(filename,
time_zone="auto",
dist_recalc=False,
speed_recalc=False,
calibrate=False,
per_lap_cal=False,
manual_lap_distance=None,
current_cal_factor=100.0):
"""Convert a FIT file to TCX format"""
# Calibration requires either GPS recalculation or manual lap distance(s):
if calibrate and not dist_recalc and manual_lap_distance is None:
sys.stderr.write("Calibration requested, enabling distance recalculation from GPS/footpod.\n")
dist_recalc = True
# Calibration with manual lap distances implies
# per-lap calibration:
if calibrate and manual_lap_distance is not None:
per_lap_cal = True
document = create_document()
element = create_sub_element(document.getroot(), "Activities")
try:
if time_zone == "auto":
# We need activity object to be able to get trackpoints,
# before re-creating activity again with timezone info
activity = FitFile(filename,
check_crc=False,
data_processor=MyDataProcessor())
activity.parse()
lat = None
lon = None
for trackpoint in activity.get_messages('record'):
if lat is not None and lon is not None:
break
lat = trackpoint.get_value("position_lat")
lon = trackpoint.get_value("position_long")
if lat is not None and lon is not None:
activity = FitFile(filename,
check_crc=False,
data_processor=TZDataProcessor(lat=lat,
lon=lon))
else:
activity = FitFile(filename,
check_crc=False,
data_processor=TZDataProcessor(tzname=time_zone))
activity.parse()
session = next(activity.get_messages('session'))
total_activity_distance = session.get_value('total_distance')
total_calculated_distance = sum_distance(activity)
activity_scaling_factor = (total_calculated_distance /
total_activity_distance)
new_cal_factor = activity_scaling_factor * current_cal_factor
actelem, total_distance = add_activity(element,
session,
activity,
dist_recalc,
speed_recalc,
calibrate,
current_cal_factor,
per_lap_cal,
manual_lap_distance,
activity_scaling_factor)
except FitParseError as e:
sys.stderr.write(str("Error while parsing .FIT file: %s" % e) + "\n")
sys.exit(1)
if dist_recalc:
distance_used = total_calculated_distance
elif calibrate:
distance_used = total_distance
else:
distance_used = total_activity_distance
method = ""
if dist_recalc or speed_recalc or calibrate:
parts = []
if calibrate:
if per_lap_cal:
parts.append("calibration applied per lap")
else:
parts.append("calibration applied")
if dist_recalc and speed_recalc:
parts.append("speed and distance recalculated")
elif dist_recalc:
parts.append("distance recalculated")
elif speed_recalc:
parts.append("speed recalculated")
if calibrate and manual_lap_distance is not None:
reference = " from known distance (with GPS fill-in)"
elif dist_recalc or speed_recalc:
reference = " from GPS/footpod"
method = "(" + ", ".join(parts) + reference + ")"
notes = ("{total_laps:d} laps: {distance_used:.3f} km in {total_time!s} {dist_method:s}\n"
"Distance in FIT file: {fit_dist:.3f} km; "
"calculated via GPS/footpod: {gps_dist:.3f} km "
"(precision: {precision:.1f}%)\n"
"Footpod calibration factor setting: {old_cf:.1f}%; "
"new factor based on recomputed distance: {new_cf:.1f}%"
).format(total_laps=session.get_value('num_laps'),
distance_used=distance_used / 1000,
total_time=timedelta(seconds=int(session.get_value(
'total_timer_time'))),
fit_dist=total_activity_distance / 1000,
gps_dist=total_calculated_distance / 1000,
precision=(1 - (abs(total_calculated_distance -
total_activity_distance) /
total_calculated_distance)) * 100,
old_cf=current_cal_factor,
new_cf=new_cal_factor,
dist_method=method)
add_notes(actelem, notes)
try:
dinfo = next(activity.get_messages('device_info'))
manufacturer = dinfo.get_value('manufacturer').title().replace('_', ' ')
product_name = dinfo.get_value('descriptor').replace('_', ' ')
product_id = dinfo.get_value('product')
serial_number = dinfo.get_value('serial_number')
except: # if no device_info message, StopIteration is thrown
fid = next(activity.get_messages('file_id'))
manufacturer = fid.get_value('manufacturer').title().replace('_', ' ')
product_id = fid.get_value('product')
product_name = PRODUCT_MAP[product_id] if product_id in PRODUCT_MAP else product_id
serial_number = fid.get_value('serial_number')
add_creator(actelem,
manufacturer,
product_name,
product_id,
serial_number
)
add_author(document)
return document
def main():
"""Read arguments from command line to convert FIT file to TCX"""
parser = argparse.ArgumentParser(prog="fit2tcx")
parser.add_argument("FitFile", help="Input FIT file")
parser.add_argument("TcxFile", help="Output TCX file")
parser.add_argument(
"-v",
"--version",
action='version',
version='%(prog)s {version}'.format(version=__version__))
parser.add_argument(
"-z",
"--timezone",
action="store",
type=str,
default="auto",
help="Specify the timezone for FIT file timestamps (default, 'auto', uses GPS data to lookup the local timezone)")
parser.add_argument(
"-d",
"--recalculate-distance-from-gps",
action="store_true",
help="Recalculate distance from GPS data")
parser.add_argument(
"-s",
"--recalculate-speed-from-gps",
action="store_true",
help="Recalculate speed from GPS data")
parser.add_argument(
"-c",
"--calibrate-footpod",
action="store_true",
help="Use GPS-measured and/or known distance to calibrate footpod data")
parser.add_argument(
"-p",
"--per-lap-calibration",
action="store_true",
help="Apply footpod calibration on a per lap basis")
parser.add_argument(
"-l",
"--manual-lap-distance",
action="append",
default=None,
type=float,
help="Manually specify known lap distance(s) (in metres, use calibration to apply)")
parser.add_argument(
"-f",
"--calibration-factor",
action="store",
default=100.0,
type=float,
help="Existing calibration factor (defaults to 100.0)")
args = parser.parse_args()
if (args.calibrate_footpod and
not args.recalculate_distance_from_gps and
not args.manual_lap_distance):
parser.error("-c (--calibrate-footpod) requires either -d (--recalculate-distance-from-gps) or -l (--manual-lap-distance)")
return 1
try:
document = convert(args.FitFile,
args.timezone,
args.recalculate_distance_from_gps,
args.recalculate_speed_from_gps,
args.calibrate_footpod,
args.per_lap_calibration,
args.manual_lap_distance,
args.calibration_factor)
activity_notes = document.getroot().findtext(".//{*}Activity/{*}Notes")
if activity_notes is not None:
sys.stdout.write(str(activity_notes) + "\n")
tcx = open(args.TcxFile, 'wb')
tcx.write(lxml.etree.tostring(document.getroot(),
pretty_print=True,
xml_declaration=True,
encoding="UTF-8"))
return 0
except FitParseError as exception:
sys.stderr.write(str(exception) + "\n")
return 1
if __name__ == "__main__":
sys.exit(main())
| mit |
gkabbe/cMDLMC | mdlmc/IO/converters.py | 1 | 1764 | # coding=utf-8
import logging
import os
import pathlib
import tables
import h5py
import daiquiri
import fire
import numpy as np
from typing import Union, Iterable
from ..atoms.numpy_atom import dtype_xyz
from ..atoms import numpy_atom as npa
from ..IO.trajectory_parser import XYZTrajectory
logger = logging.getLogger(__name__)
def save_xyz_to_hdf5(xyz_fname, hdf5_fname=None, *, remove_com_movement=False,
dataset_name="trajectory", selection=None):
"""
Note: HDF5 with Blosc compression currently only works if h5py and pytables are installed via
conda!"""
xyz = XYZTrajectory(xyz_fname, selection=selection)
logger.info("Determine length of xyz trajectory.")
trajectory_length = len(xyz)
first_frame = next(iter(xyz))
frame_shape = first_frame.atom_positions.shape
atom_names = first_frame.atom_names.astype("S")
logger.info("Names: %s", atom_names)
if not hdf5_fname:
hdf5_fname = os.path.splitext(xyz_fname)[0] + ".hdf5"
with h5py.File(hdf5_fname, "w") as hdf5_file:
# Use blosc compression (needs tables import and code 32001)
traj_atomnames = hdf5_file.create_dataset("atom_names", atom_names.shape, dtype="2S")
traj_atomnames[:] = atom_names
traj = hdf5_file.create_dataset(dataset_name, shape=(trajectory_length, *frame_shape),
dtype=np.float32, compression=32001)
for i, xyz_frame in enumerate(xyz):
if remove_com_movement:
npa.remove_center_of_mass_movement(xyz_frame)
if i % 1000 == 0:
logger.info("Frame %i", i)
traj[i] = xyz_frame.atom_positions
def main():
daiquiri.setup(level=logging.INFO)
fire.Fire()
| gpl-3.0 |
google-code-export/evennia | src/comms/imc2lib/imc2_ansi.py | 4 | 2204 | """
ANSI parser - this adds colour to text according to
special markup strings.
This is a IMC2 complacent version.
"""
import re
from src.utils import ansi
class IMCANSIParser(ansi.ANSIParser):
"""
This parser is per the IMC2 specification.
"""
def __init__(self):
normal = ansi.ANSI_NORMAL
hilite = ansi.ANSI_HILITE
self.ansi_map = [
(r'~Z', normal), # Random
(r'~x', normal + ansi.ANSI_BLACK), # Black
(r'~D', hilite + ansi.ANSI_BLACK), # Dark Grey
(r'~z', hilite + ansi.ANSI_BLACK),
(r'~w', normal + ansi.ANSI_WHITE), # Grey
(r'~W', hilite + ansi.ANSI_WHITE), # White
(r'~g', normal + ansi.ANSI_GREEN), # Dark Green
(r'~G', hilite + ansi.ANSI_GREEN), # Green
(r'~p', normal + ansi.ANSI_MAGENTA), # Dark magenta
(r'~m', normal + ansi.ANSI_MAGENTA),
(r'~M', hilite + ansi.ANSI_MAGENTA), # Magenta
(r'~P', hilite + ansi.ANSI_MAGENTA),
(r'~c', normal + ansi.ANSI_CYAN), # Cyan
(r'~y', normal + ansi.ANSI_YELLOW), # Dark Yellow (brown)
(r'~Y', hilite + ansi.ANSI_YELLOW), # Yellow
(r'~b', normal + ansi.ANSI_BLUE), # Dark Blue
(r'~B', hilite + ansi.ANSI_BLUE), # Blue
(r'~C', hilite + ansi.ANSI_BLUE),
(r'~r', normal + ansi.ANSI_RED), # Dark Red
(r'~R', hilite + ansi.ANSI_RED), # Red
## Formatting
(r'~L', hilite), # Bold/hilite
(r'~!', normal), # reset
(r'\\r', normal),
(r'\\n', ansi.ANSI_RETURN),
]
# prepare regex matching
self.ansi_sub = [(re.compile(sub[0], re.DOTALL), sub[1])
for sub in self.ansi_map]
# prepare matching ansi codes overall
self.ansi_regex = re.compile("\033\[[0-9;]+m")
ANSI_PARSER = IMCANSIParser()
def parse_ansi(string, strip_ansi=False, parser=ANSI_PARSER):
"""
Shortcut to use the IMC2 ANSI parser.
"""
return parser.parse_ansi(string, strip_ansi=strip_ansi)
| bsd-3-clause |
cricketclubucd/davisdragons | platform-tools/systrace/catapult/common/battor/battor/battor_wrapper.py | 4 | 16017 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import atexit
import datetime
import os
import logging
import platform
import random
import subprocess
import sys
import tempfile
import time
from battor import battor_error
import py_utils
from py_utils import cloud_storage
import dependency_manager
from devil.utils import battor_device_mapping
from devil.utils import find_usb_devices
import serial
from serial.tools import list_ports
DEFAULT_SHELL_CLOSE_TIMEOUT_S = 60
def IsBattOrConnected(test_platform, android_device=None,
android_device_map=None, android_device_file=None):
"""Returns True if BattOr is detected."""
if test_platform == 'android':
if not android_device:
raise ValueError('Must pass android device serial when determining '
'support on android platform')
if not android_device_map:
device_tree = find_usb_devices.GetBusNumberToDeviceTreeMap()
if len(battor_device_mapping.GetBattOrList(device_tree)) == 1:
return True
if android_device_file:
android_device_map = battor_device_mapping.ReadSerialMapFile(
android_device_file)
else:
try:
android_device_map = battor_device_mapping.GenerateSerialMap()
except battor_error.BattOrError:
return False
# If neither if statement above is triggered, it means that an
# android_device_map was passed in and will be used.
return str(android_device) in android_device_map
elif test_platform == 'win':
for (_1, desc, _2) in serial.tools.list_ports.comports():
if 'USB Serial Port' in desc:
return True
logging.info('No usb serial port discovered. Available ones are: %s' %
list(serial.tools.list_ports.comports()))
return False
elif test_platform == 'mac':
for (_1, desc, _2) in serial.tools.list_ports.comports():
if 'BattOr' in desc:
return True
return False
elif test_platform == 'linux':
device_tree = find_usb_devices.GetBusNumberToDeviceTreeMap(fast=True)
return bool(battor_device_mapping.GetBattOrList(device_tree))
return False
class BattOrWrapper(object):
"""A class for communicating with a BattOr in python."""
_EXIT_CMD = 'Exit'
_GET_FIRMWARE_GIT_HASH_CMD = 'GetFirmwareGitHash'
_START_TRACING_CMD = 'StartTracing'
_STOP_TRACING_CMD = 'StopTracing'
_SUPPORTS_CLOCKSYNC_CMD = 'SupportsExplicitClockSync'
_RECORD_CLOCKSYNC_CMD = 'RecordClockSyncMarker'
_SUPPORTED_PLATFORMS = ['android', 'chromeos', 'linux', 'mac', 'win']
_SUPPORTED_AUTOFLASHING_PLATFORMS = ['linux', 'mac', 'win']
_BATTOR_PARTNO = 'x192a3u'
_BATTOR_PROGRAMMER = 'avr109'
_BATTOR_BAUDRATE = '115200'
def __init__(self, target_platform, android_device=None, battor_path=None,
battor_map_file=None, battor_map=None, serial_log_bucket=None,
autoflash=True):
"""Constructor.
Args:
target_platform: Platform BattOr is attached to.
android_device: Serial number of Android device.
battor_path: Path to BattOr device.
battor_map_file: File giving map of [device serial: BattOr path]
battor_map: Map of [device serial: BattOr path]
serial_log_bucket: The cloud storage bucket to which BattOr agent serial
logs are uploaded on failure.
Attributes:
_battor_path: Path to BattOr. Typically similar to /tty/USB0.
_battor_agent_binary: Path to the BattOr agent binary used to communicate
with the BattOr.
_tracing: A bool saying if tracing has been started.
_battor_shell: A subprocess running the battor_agent_binary
_trace_results_path: Path to BattOr trace results file.
_serial_log_bucket: Cloud storage bucket to which BattOr agent serial logs
are uploaded on failure.
_serial_log_file: Temp file for the BattOr agent serial log.
"""
self._battor_path = self._GetBattOrPath(target_platform, android_device,
battor_path, battor_map_file, battor_map)
config = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'battor_binary_dependencies.json')
self._dm = dependency_manager.DependencyManager(
[dependency_manager.BaseConfig(config)])
self._battor_agent_binary = self._dm.FetchPath(
'battor_agent_binary', '%s_%s' % (sys.platform, platform.machine()))
self._autoflash = autoflash
self._serial_log_bucket = serial_log_bucket
self._tracing = False
self._battor_shell = None
self._trace_results_path = None
self._start_tracing_time = None
self._stop_tracing_time = None
self._trace_results = None
self._serial_log_file = None
self._target_platform = target_platform
self._git_hash = None
atexit.register(self.KillBattOrShell)
def _FlashBattOr(self):
assert self._battor_shell, (
'Must start shell before attempting to flash BattOr')
try:
device_git_hash = self.GetFirmwareGitHash()
battor_firmware, cs_git_hash = self._dm.FetchPathWithVersion(
'battor_firmware', 'default')
if cs_git_hash != device_git_hash:
logging.info(
'Flashing BattOr with old firmware version <%s> with new '
'version <%s>.', device_git_hash, cs_git_hash)
avrdude_config = self._dm.FetchPath('avrdude_config', 'default')
self.StopShell()
return self.FlashFirmware(battor_firmware, avrdude_config)
return False
except ValueError:
logging.exception('Git hash returned from BattOr was not as expected: %s'
% self._git_hash)
finally:
if not self._battor_shell:
# TODO(charliea): Once we understand why BattOrs are crashing, remove
# this log.
# http://crbug.com/699581
logging.info('_FlashBattOr serial log:')
self._UploadSerialLogToCloudStorage()
self._serial_log_file = None
self.StartShell()
def KillBattOrShell(self):
if self._battor_shell:
logging.critical('BattOr shell was not properly closed. Killing now.')
self._battor_shell.kill()
def GetShellReturnCode(self):
"""Gets the return code of the BattOr agent shell."""
rc = self._battor_shell.poll()
return rc
def StartShell(self):
"""Start BattOr binary shell."""
assert not self._battor_shell, 'Attempting to start running BattOr shell.'
battor_cmd = [self._battor_agent_binary]
if self._serial_log_bucket:
# Create and immediately close a temp file in order to get a filename
# for the serial log.
self._serial_log_file = tempfile.NamedTemporaryFile(delete=False)
self._serial_log_file.close()
battor_cmd.append('--battor-serial-log=%s' % self._serial_log_file.name)
if self._battor_path:
battor_cmd.append('--battor-path=%s' % self._battor_path)
self._battor_shell = self._StartShellImpl(battor_cmd)
assert self.GetShellReturnCode() is None, 'Shell failed to start.'
def StopShell(self, timeout=None):
"""Stop BattOr binary shell."""
assert self._battor_shell, 'Attempting to stop a non-running BattOr shell.'
assert not self._tracing, 'Attempting to stop a BattOr shell while tracing.'
timeout = timeout if timeout else DEFAULT_SHELL_CLOSE_TIMEOUT_S
self._SendBattOrCommand(self._EXIT_CMD, check_return=False)
try:
py_utils.WaitFor(lambda: self.GetShellReturnCode() != None, timeout)
except py_utils.TimeoutException:
self.KillBattOrShell()
finally:
self._battor_shell = None
def StartTracing(self):
"""Start tracing on the BattOr."""
assert self._battor_shell, 'Must start shell before tracing'
assert not self._tracing, 'Tracing already started.'
self._FlashBattOr()
self._SendBattOrCommand(self._START_TRACING_CMD)
self._tracing = True
self._start_tracing_time = int(time.time())
def StopTracing(self):
"""Stop tracing on the BattOr."""
assert self._tracing, 'Must run StartTracing before StopTracing'
# Create temp file to reserve location for saving results.
temp_file = tempfile.NamedTemporaryFile(delete=False)
self._trace_results_path = temp_file.name
temp_file.close()
self._SendBattOrCommand(
'%s %s' % (self._STOP_TRACING_CMD, self._trace_results_path),
check_return=False)
self._tracing = False
self._stop_tracing_time = int(time.time())
def CollectTraceData(self, timeout=None):
"""Collect trace data from battor.
Args:
timeout: timeout for waiting on the BattOr process to terminate in
seconds.
Returns: Trace data in form of a list.
"""
# The BattOr shell terminates after returning the results.
if timeout is None:
timeout = self._stop_tracing_time - self._start_tracing_time
# TODO(charliea): Once we understand why BattOrs are crashing, only do
# this on failure.
# http://crbug.com/699581
logging.info('CollectTraceData serial log:')
self._UploadSerialLogToCloudStorage()
with open(self._trace_results_path) as results:
self._trace_results = results.read()
self._battor_shell = None
self._serial_log_file = None
return self._trace_results
def SupportsExplicitClockSync(self):
"""Returns if BattOr supports Clock Sync events."""
return bool(int(self._SendBattOrCommand(self._SUPPORTS_CLOCKSYNC_CMD,
check_return=False)))
def RecordClockSyncMarker(self, sync_id):
"""Record clock sync event on BattOr."""
if not isinstance(sync_id, basestring):
raise TypeError('sync_id must be a string.')
self._SendBattOrCommand('%s %s' % (self._RECORD_CLOCKSYNC_CMD, sync_id))
def _GetBattOrPath(self, target_platform, android_device=None,
battor_path=None, battor_map_file=None, battor_map=None):
"""Determines most likely path to the correct BattOr."""
if target_platform not in self._SUPPORTED_PLATFORMS:
raise battor_error.BattOrError(
'%s is an unsupported platform.' % target_platform)
if target_platform in ['win']:
# Right now, the BattOr agent binary isn't able to automatically detect
# the BattOr port on Windows. To get around this, we know that the BattOr
# shows up with a name of 'USB Serial Port', so use the COM port that
# corresponds to a device with that name.
for (port, desc, _) in serial.tools.list_ports.comports():
if 'USB Serial Port' in desc:
return port
raise battor_error.BattOrError(
'Could not find BattOr attached to machine.')
if target_platform in ['mac']:
for (port, desc, _) in serial.tools.list_ports.comports():
if 'BattOr' in desc:
return port
if target_platform in ['android', 'linux']:
device_tree = find_usb_devices.GetBusNumberToDeviceTreeMap(fast=True)
if battor_path:
if not isinstance(battor_path, basestring):
raise battor_error.BattOrError(
'An invalid BattOr path was specified.')
return battor_path
if target_platform == 'android':
if not android_device:
raise battor_error.BattOrError(
'Must specify device for Android platform.')
if not battor_map_file and not battor_map:
# No map was passed, so must create one.
battor_map = battor_device_mapping.GenerateSerialMap()
return battor_device_mapping.GetBattOrPathFromPhoneSerial(
str(android_device), serial_map_file=battor_map_file,
serial_map=battor_map)
# Not Android and no explicitly passed BattOr.
battors = battor_device_mapping.GetBattOrList(device_tree)
if len(battors) != 1:
raise battor_error.BattOrError(
'For non-Android platforms, exactly one BattOr must be '
'attached unless address is explicitly given.')
return '/dev/%s' % battors.pop()
raise NotImplementedError(
'BattOr Wrapper not implemented for given platform')
def _SendBattOrCommandImpl(self, cmd):
"""Sends command to the BattOr."""
self._battor_shell.stdin.write('%s\n' % cmd)
self._battor_shell.stdin.flush()
return self._battor_shell.stdout.readline()
def _SendBattOrCommand(self, cmd, check_return=True):
status = self._SendBattOrCommandImpl(cmd)
if check_return and not 'Done.' in status:
self.KillBattOrShell()
self._UploadSerialLogToCloudStorage()
self._serial_log_file = None
raise battor_error.BattOrError(
'BattOr did not complete command \'%s\' correctly.\n'
'Outputted: %s' % (cmd, status))
return status
def _StartShellImpl(self, battor_cmd):
return subprocess.Popen(
battor_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=False)
def _UploadSerialLogToCloudStorage(self):
"""Uploads the BattOr serial log to cloud storage."""
if not self._serial_log_file or not cloud_storage.IsNetworkIOEnabled():
return
remote_path = ('battor-serial-log-%s-%d.txt' % (
datetime.datetime.now().strftime('%Y-%m-%d_%H-%M.txt'),
random.randint(1, 100000)))
try:
cloud_url = cloud_storage.Insert(
self._serial_log_bucket, remote_path, self._serial_log_file.name)
sys.stderr.write('View BattOr serial log at %s\n' % cloud_url)
except cloud_storage.PermissionError as e:
logging.error('Cannot upload BattOr serial log file to cloud storage due '
'to permission error: %s' % e.message)
def GetFirmwareGitHash(self):
"""Gets the git hash for the BattOr firmware.
Returns: Git hash for firmware currently on the BattOr.
Also sets self._git_hash to this value.
Raises: ValueException if the git hash is not in hex.
"""
assert self._battor_shell, ('Must start shell before getting firmware git '
'hash')
self._git_hash = self._SendBattOrCommand(self._GET_FIRMWARE_GIT_HASH_CMD,
check_return=False).strip()
# We expect the git hash to be a valid 6 character hexstring. This will
# throw a ValueError exception otherwise.
int(self._git_hash, 16)
return self._git_hash
def FlashFirmware(self, hex_path, avrdude_config_path):
"""Flashes the BattOr using an avrdude config at config_path with the new
firmware at hex_path.
"""
assert not self._battor_shell, 'Cannot flash BattOr with open shell'
if self._target_platform not in self._SUPPORTED_AUTOFLASHING_PLATFORMS:
logging.critical('Flashing firmware on this platform is not supported.')
return False
avrdude_binary = self._dm.FetchPath(
'avrdude_binary', '%s_%s' % (sys.platform, platform.machine()))
# Sanitize hex file path for windows. It contains <drive>:/ which avrdude
# is not capable of handling.
_, hex_path = os.path.splitdrive(hex_path)
avr_cmd = [
avrdude_binary,
'-e', # Specify to erase data on chip.
'-p', self._BATTOR_PARTNO, # Specify AVR device.
# Specify which microcontroller programmer to use.
'-c', self._BATTOR_PROGRAMMER,
'-b', self._BATTOR_BAUDRATE, # Specify the baud rate to communicate at.
'-P', self._battor_path, # Serial path to the battor.
# Command to execute with hex file and path to hex file.
'-U', 'flash:w:%s' % hex_path,
'-C', avrdude_config_path, # AVRdude config file path.
'2>&1' # All output goes to stderr for some reason.
]
try:
subprocess.check_output(avr_cmd)
except subprocess.CalledProcessError as e:
raise BattOrFlashError('BattOr flash failed with return code %s.'
% e.returncode)
self._git_hash = None
return True
class BattOrFlashError(Exception):
pass
| mit |
BambooL/jeeves | demo/openmrs/settings.py | 3 | 2345 | """
Django settings for conf project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(__file__)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!$e(y9&5ol=#s7wex!xhv=f&5f2@ufjez3ee9kdifw=41p_+%*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates/'),
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'conf',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'urls'
WSGI_APPLICATION = 'wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
| mit |
xuguozhi/DIGITS | digits/log.py | 17 | 3642 | # Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import sys
import logging
import logging.handlers
from digits.config import config_value
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
class JobIdLogger(logging.Logger):
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None):
"""
Customizing it to set a default value for extra['job_id']
"""
rv = logging.LogRecord(name, level, fn, lno, msg, args, exc_info, func)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
if 'job_id' not in rv.__dict__:
rv.__dict__['job_id'] = ''
return rv
class JobIdLoggerAdapter(logging.LoggerAdapter):
"""
Accepts an optional keyword argument: 'job_id'
You can use this in 2 ways:
1. On class initialization
adapter = JobIdLoggerAdapter(logger, {'job_id': job_id})
adapter.debug(msg)
2. On method invokation
adapter = JobIdLoggerAdapter(logger, {})
adapter.debug(msg, job_id=id)
"""
def process(self, msg, kwargs):
if 'job_id' in kwargs:
if 'extra' not in kwargs:
kwargs['extra'] = {}
kwargs['extra']['job_id'] = ' [%s]' % kwargs['job_id']
del kwargs['job_id']
elif 'job_id' in self.extra:
if 'extra' not in kwargs:
kwargs['extra'] = {}
kwargs['extra']['job_id'] = ' [%s]' % self.extra['job_id']
return msg, kwargs
def setup_logging():
socketio_logger = logging.getLogger('socketio')
socketio_logger.addHandler(logging.StreamHandler(sys.stdout))
# Set custom logger
logging.setLoggerClass(JobIdLogger)
formatter = logging.Formatter(
fmt="%(asctime)s%(job_id)s [%(levelname)-5s] %(message)s",
datefmt=DATE_FORMAT,
)
### digits logger
main_logger = logging.getLogger('digits')
main_logger.setLevel(logging.DEBUG)
# Log to stdout
stdoutHandler = logging.StreamHandler(sys.stdout)
stdoutHandler.setFormatter(formatter)
stdoutHandler.setLevel(logging.DEBUG)
main_logger.addHandler(stdoutHandler)
### digits.webapp logger
if config_value('log_file'):
webapp_logger = logging.getLogger('digits.webapp')
webapp_logger.setLevel(logging.DEBUG)
# Log to file
fileHandler = logging.handlers.RotatingFileHandler(
config_value('log_file'),
maxBytes=(1024*1024*10), # 10 MB
backupCount=10,
)
fileHandler.setFormatter(formatter)
level = config_value('log_level')
if level == 'debug':
fileHandler.setLevel(logging.DEBUG)
elif level == 'info':
fileHandler.setLevel(logging.INFO)
elif level == 'warning':
fileHandler.setLevel(logging.WARNING)
elif level == 'error':
fileHandler.setLevel(logging.ERROR)
elif level == 'critical':
fileHandler.setLevel(logging.CRITICAL)
webapp_logger.addHandler(fileHandler)
### Useful shortcut for the webapp, which may set job_id
return JobIdLoggerAdapter(webapp_logger, {})
else:
print 'WARNING: log_file config option not found - no log file is being saved'
return JobIdLoggerAdapter(main_logger, {})
# Do it when this module is loaded
logger = setup_logging()
| bsd-3-clause |
korrosivesec/crits | crits/actors/urls.py | 17 | 1099 | from django.conf.urls import patterns
urlpatterns = patterns('crits.actors.views',
(r'^add/$', 'add_actor'),
(r'^add_identifier_type/$', 'new_actor_identifier_type'),
(r'^tags/modify/$', 'actor_tags_modify'),
(r'^tags/get/$', 'get_actor_tags'),
(r'^add_identifier/$', 'add_identifier'),
(r'^attribute_identifier/$', 'attribute_identifier'),
(r'^edit_identifier/$', 'edit_attributed_identifier'),
(r'^remove_identifier/$', 'remove_attributed_identifier'),
(r'^edit/name/(?P<id_>\S+)/$', 'edit_actor_name'),
(r'^edit/aliases/$', 'edit_actor_aliases'),
(r'^search/$', 'actor_search'),
(r'^details/(?P<id_>\S+)/$', 'actor_detail'),
(r'^remove/(?P<id_>\S+)/$', 'remove_actor'),
(r'^list/$', 'actors_listing'),
(r'^list/(?P<option>\S+)/$', 'actors_listing'),
(r'^identifiers/types/available/$', 'get_actor_identifier_types'),
(r'^identifiers/values/available/$', 'get_actor_identifier_type_values'),
(r'^identifiers/list/$', 'actor_identifiers_listing'),
(r'^identifiers/list/(?P<option>\S+)/$', 'actor_identifiers_listing'),
)
| mit |
SummerLW/Perf-Insight-Report | third_party/Paste/paste/exceptions/collector.py | 49 | 19684 | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
## Originally zExceptions.ExceptionFormatter from Zope;
## Modified by Ian Bicking, Imaginary Landscape, 2005
"""
An exception collector that finds traceback information plus
supplements
"""
import sys
import traceback
import time
from six.moves import cStringIO as StringIO
import linecache
from paste.exceptions import serial_number_generator
import warnings
DEBUG_EXCEPTION_FORMATTER = True
DEBUG_IDENT_PREFIX = 'E-'
FALLBACK_ENCODING = 'UTF-8'
__all__ = ['collect_exception', 'ExceptionCollector']
class ExceptionCollector(object):
"""
Produces a data structure that can be used by formatters to
display exception reports.
Magic variables:
If you define one of these variables in your local scope, you can
add information to tracebacks that happen in that context. This
allows applications to add all sorts of extra information about
the context of the error, including URLs, environmental variables,
users, hostnames, etc. These are the variables we look for:
``__traceback_supplement__``:
You can define this locally or globally (unlike all the other
variables, which must be defined locally).
``__traceback_supplement__`` is a tuple of ``(factory, arg1,
arg2...)``. When there is an exception, ``factory(arg1, arg2,
...)`` is called, and the resulting object is inspected for
supplemental information.
``__traceback_info__``:
This information is added to the traceback, usually fairly
literally.
``__traceback_hide__``:
If set and true, this indicates that the frame should be
hidden from abbreviated tracebacks. This way you can hide
some of the complexity of the larger framework and let the
user focus on their own errors.
By setting it to ``'before'``, all frames before this one will
be thrown away. By setting it to ``'after'`` then all frames
after this will be thrown away until ``'reset'`` is found. In
each case the frame where it is set is included, unless you
append ``'_and_this'`` to the value (e.g.,
``'before_and_this'``).
Note that formatters will ignore this entirely if the frame
that contains the error wouldn't normally be shown according
to these rules.
``__traceback_reporter__``:
This should be a reporter object (see the reporter module),
or a list/tuple of reporter objects. All reporters found this
way will be given the exception, innermost first.
``__traceback_decorator__``:
This object (defined in a local or global scope) will get the
result of this function (the CollectedException defined
below). It may modify this object in place, or return an
entirely new object. This gives the object the ability to
manipulate the traceback arbitrarily.
The actually interpretation of these values is largely up to the
reporters and formatters.
``collect_exception(*sys.exc_info())`` will return an object with
several attributes:
``frames``:
A list of frames
``exception_formatted``:
The formatted exception, generally a full traceback
``exception_type``:
The type of the exception, like ``ValueError``
``exception_value``:
The string value of the exception, like ``'x not in list'``
``identification_code``:
A hash of the exception data meant to identify the general
exception, so that it shares this code with other exceptions
that derive from the same problem. The code is a hash of
all the module names and function names in the traceback,
plus exception_type. This should be shown to users so they
can refer to the exception later. (@@: should it include a
portion that allows identification of the specific instance
of the exception as well?)
The list of frames goes innermost first. Each frame has these
attributes; some values may be None if they could not be
determined.
``modname``:
the name of the module
``filename``:
the filename of the module
``lineno``:
the line of the error
``revision``:
the contents of __version__ or __revision__
``name``:
the function name
``supplement``:
an object created from ``__traceback_supplement__``
``supplement_exception``:
a simple traceback of any exception ``__traceback_supplement__``
created
``traceback_info``:
the str() of any ``__traceback_info__`` variable found in the local
scope (@@: should it str()-ify it or not?)
``traceback_hide``:
the value of any ``__traceback_hide__`` variable
``traceback_log``:
the value of any ``__traceback_log__`` variable
``__traceback_supplement__`` is thrown away, but a fixed
set of attributes are captured; each of these attributes is
optional.
``object``:
the name of the object being visited
``source_url``:
the original URL requested
``line``:
the line of source being executed (for interpreters, like ZPT)
``column``:
the column of source being executed
``expression``:
the expression being evaluated (also for interpreters)
``warnings``:
a list of (string) warnings to be displayed
``getInfo``:
a function/method that takes no arguments, and returns a string
describing any extra information
``extraData``:
a function/method that takes no arguments, and returns a
dictionary. The contents of this dictionary will not be
displayed in the context of the traceback, but globally for
the exception. Results will be grouped by the keys in the
dictionaries (which also serve as titles). The keys can also
be tuples of (importance, title); in this case the importance
should be ``important`` (shows up at top), ``normal`` (shows
up somewhere; unspecified), ``supplemental`` (shows up at
bottom), or ``extra`` (shows up hidden or not at all).
These are used to create an object with attributes of the same
names (``getInfo`` becomes a string attribute, not a method).
``__traceback_supplement__`` implementations should be careful to
produce values that are relatively static and unlikely to cause
further errors in the reporting system -- any complex
introspection should go in ``getInfo()`` and should ultimately
return a string.
Note that all attributes are optional, and under certain
circumstances may be None or may not exist at all -- the collector
can only do a best effort, but must avoid creating any exceptions
itself.
Formatters may want to use ``__traceback_hide__`` as a hint to
hide frames that are part of the 'framework' or underlying system.
There are a variety of rules about special values for this
variables that formatters should be aware of.
TODO:
More attributes in __traceback_supplement__? Maybe an attribute
that gives a list of local variables that should also be
collected? Also, attributes that would be explicitly meant for
the entire request, not just a single frame. Right now some of
the fixed set of attributes (e.g., source_url) are meant for this
use, but there's no explicit way for the supplement to indicate
new values, e.g., logged-in user, HTTP referrer, environment, etc.
Also, the attributes that do exist are Zope/Web oriented.
More information on frames? cgitb, for instance, produces
extensive information on local variables. There exists the
possibility that getting this information may cause side effects,
which can make debugging more difficult; but it also provides
fodder for post-mortem debugging. However, the collector is not
meant to be configurable, but to capture everything it can and let
the formatters be configurable. Maybe this would have to be a
configuration value, or maybe it could be indicated by another
magical variable (which would probably mean 'show all local
variables below this frame')
"""
show_revisions = 0
def __init__(self, limit=None):
self.limit = limit
def getLimit(self):
limit = self.limit
if limit is None:
limit = getattr(sys, 'tracebacklimit', None)
return limit
def getRevision(self, globals):
if not self.show_revisions:
return None
revision = globals.get('__revision__', None)
if revision is None:
# Incorrect but commonly used spelling
revision = globals.get('__version__', None)
if revision is not None:
try:
revision = str(revision).strip()
except:
revision = '???'
return revision
def collectSupplement(self, supplement, tb):
result = {}
for name in ('object', 'source_url', 'line', 'column',
'expression', 'warnings'):
result[name] = getattr(supplement, name, None)
func = getattr(supplement, 'getInfo', None)
if func:
result['info'] = func()
else:
result['info'] = None
func = getattr(supplement, 'extraData', None)
if func:
result['extra'] = func()
else:
result['extra'] = None
return SupplementaryData(**result)
def collectLine(self, tb, extra_data):
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
globals = f.f_globals
locals = f.f_locals
if not hasattr(locals, 'has_key'):
# Something weird about this frame; it's not a real dict
warnings.warn(
"Frame %s has an invalid locals(): %r" % (
globals.get('__name__', 'unknown'), locals))
locals = {}
data = {}
data['modname'] = globals.get('__name__', None)
data['filename'] = filename
data['lineno'] = lineno
data['revision'] = self.getRevision(globals)
data['name'] = name
data['tbid'] = id(tb)
# Output a traceback supplement, if any.
if '__traceback_supplement__' in locals:
# Use the supplement defined in the function.
tbs = locals['__traceback_supplement__']
elif '__traceback_supplement__' in globals:
# Use the supplement defined in the module.
# This is used by Scripts (Python).
tbs = globals['__traceback_supplement__']
else:
tbs = None
if tbs is not None:
factory = tbs[0]
args = tbs[1:]
try:
supp = factory(*args)
data['supplement'] = self.collectSupplement(supp, tb)
if data['supplement'].extra:
for key, value in data['supplement'].extra.items():
extra_data.setdefault(key, []).append(value)
except:
if DEBUG_EXCEPTION_FORMATTER:
out = StringIO()
traceback.print_exc(file=out)
text = out.getvalue()
data['supplement_exception'] = text
# else just swallow the exception.
try:
tbi = locals.get('__traceback_info__', None)
if tbi is not None:
data['traceback_info'] = str(tbi)
except:
pass
marker = []
for name in ('__traceback_hide__', '__traceback_log__',
'__traceback_decorator__'):
try:
tbh = locals.get(name, globals.get(name, marker))
if tbh is not marker:
data[name[2:-2]] = tbh
except:
pass
return data
def collectExceptionOnly(self, etype, value):
return traceback.format_exception_only(etype, value)
def collectException(self, etype, value, tb, limit=None):
# The next line provides a way to detect recursion.
__exception_formatter__ = 1
frames = []
ident_data = []
traceback_decorators = []
if limit is None:
limit = self.getLimit()
n = 0
extra_data = {}
while tb is not None and (limit is None or n < limit):
if tb.tb_frame.f_locals.get('__exception_formatter__'):
# Stop recursion. @@: should make a fake ExceptionFrame
frames.append('(Recursive formatException() stopped)\n')
break
data = self.collectLine(tb, extra_data)
frame = ExceptionFrame(**data)
frames.append(frame)
if frame.traceback_decorator is not None:
traceback_decorators.append(frame.traceback_decorator)
ident_data.append(frame.modname or '?')
ident_data.append(frame.name or '?')
tb = tb.tb_next
n = n + 1
ident_data.append(str(etype))
ident = serial_number_generator.hash_identifier(
' '.join(ident_data), length=5, upper=True,
prefix=DEBUG_IDENT_PREFIX)
result = CollectedException(
frames=frames,
exception_formatted=self.collectExceptionOnly(etype, value),
exception_type=etype,
exception_value=self.safeStr(value),
identification_code=ident,
date=time.localtime(),
extra_data=extra_data)
if etype is ImportError:
extra_data[('important', 'sys.path')] = [sys.path]
for decorator in traceback_decorators:
try:
new_result = decorator(result)
if new_result is not None:
result = new_result
except:
pass
return result
def safeStr(self, obj):
try:
return str(obj)
except UnicodeEncodeError:
try:
return unicode(obj).encode(FALLBACK_ENCODING, 'replace')
except UnicodeEncodeError:
# This is when something is really messed up, but this can
# happen when the __str__ of an object has to handle unicode
return repr(obj)
limit = 200
class Bunch(object):
"""
A generic container
"""
def __init__(self, **attrs):
for name, value in attrs.items():
setattr(self, name, value)
def __repr__(self):
name = '<%s ' % self.__class__.__name__
name += ' '.join(['%s=%r' % (name, str(value)[:30])
for name, value in self.__dict__.items()
if not name.startswith('_')])
return name + '>'
class CollectedException(Bunch):
"""
This is the result of collection the exception; it contains copies
of data of interest.
"""
# A list of frames (ExceptionFrame instances), innermost last:
frames = []
# The result of traceback.format_exception_only; this looks
# like a normal traceback you'd see in the interactive interpreter
exception_formatted = None
# The *string* representation of the type of the exception
# (@@: should we give the # actual class? -- we can't keep the
# actual exception around, but the class should be safe)
# Something like 'ValueError'
exception_type = None
# The string representation of the exception, from ``str(e)``.
exception_value = None
# An identifier which should more-or-less classify this particular
# exception, including where in the code it happened.
identification_code = None
# The date, as time.localtime() returns:
date = None
# A dictionary of supplemental data:
extra_data = {}
class SupplementaryData(Bunch):
"""
The result of __traceback_supplement__. We don't keep the
supplement object around, for fear of GC problems and whatnot.
(@@: Maybe I'm being too superstitious about copying only specific
information over)
"""
# These attributes are copied from the object, or left as None
# if the object doesn't have these attributes:
object = None
source_url = None
line = None
column = None
expression = None
warnings = None
# This is the *return value* of supplement.getInfo():
info = None
class ExceptionFrame(Bunch):
"""
This represents one frame of the exception. Each frame is a
context in the call stack, typically represented by a line
number and module name in the traceback.
"""
# The name of the module; can be None, especially when the code
# isn't associated with a module.
modname = None
# The filename (@@: when no filename, is it None or '?'?)
filename = None
# Line number
lineno = None
# The value of __revision__ or __version__ -- but only if
# show_revision = True (by defaut it is false). (@@: Why not
# collect this?)
revision = None
# The name of the function with the error (@@: None or '?' when
# unknown?)
name = None
# A SupplementaryData object, if __traceback_supplement__ was found
# (and produced no errors)
supplement = None
# If accessing __traceback_supplement__ causes any error, the
# plain-text traceback is stored here
supplement_exception = None
# The str() of any __traceback_info__ value found
traceback_info = None
# The value of __traceback_hide__
traceback_hide = False
# The value of __traceback_decorator__
traceback_decorator = None
# The id() of the traceback scope, can be used to reference the
# scope for use elsewhere
tbid = None
def get_source_line(self, context=0):
"""
Return the source of the current line of this frame. You
probably want to .strip() it as well, as it is likely to have
leading whitespace.
If context is given, then that many lines on either side will
also be returned. E.g., context=1 will give 3 lines.
"""
if not self.filename or not self.lineno:
return None
lines = []
for lineno in range(self.lineno-context, self.lineno+context+1):
lines.append(linecache.getline(self.filename, lineno))
return ''.join(lines)
if hasattr(sys, 'tracebacklimit'):
limit = min(limit, sys.tracebacklimit)
col = ExceptionCollector()
def collect_exception(t, v, tb, limit=None):
"""
Collection an exception from ``sys.exc_info()``.
Use like::
try:
blah blah
except:
exc_data = collect_exception(*sys.exc_info())
"""
return col.collectException(t, v, tb, limit=limit)
| bsd-3-clause |
rdnetto/entropy | lib/entropy/transceivers/uri_handlers/plugins/interfaces/file_plugin.py | 6 | 9636 | # -*- coding: utf-8 -*-
"""
@author: Fabio Erculiani <lxnay@sabayon.org>
@contact: lxnay@sabayon.org
@copyright: Fabio Erculiani
@copyright: Copyright (C) 2002 Lars Gustaebel <lars@gustaebel.de>
@license: GPL-2
B{EntropyTransceiver File URI Handler module}.
"""
import os
import pwd
import grp
import shutil
import errno
import fcntl
from entropy.const import const_setup_perms, etpConst, const_debug_write
from entropy.transceivers.uri_handlers.skel import EntropyUriHandler
from entropy.tools import md5sum
#---------------------------------------------------------
# Bits used in the mode field, values in octal.
#---------------------------------------------------------
S_IFLNK = 0o120000 # symbolic link
S_IFREG = 0o100000 # regular file
S_IFBLK = 0o060000 # block device
S_IFDIR = 0o040000 # directory
S_IFCHR = 0o020000 # character device
S_IFIFO = 0o010000 # fifo
TSUID = 0o4000 # set UID on execution
TSGID = 0o2000 # set GID on execution
TSVTX = 0o1000 # reserved
TUREAD = 0o400 # read by owner
TUWRITE = 0o200 # write by owner
TUEXEC = 0o100 # execute/search by owner
TGREAD = 0o040 # read by group
TGWRITE = 0o020 # write by group
TGEXEC = 0o010 # execute/search by group
TOREAD = 0o004 # read by other
TOWRITE = 0o002 # write by other
TOEXEC = 0o001 # execute/search by other
_filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((TUREAD, "r"),),
((TUWRITE, "w"),),
((TUEXEC|TSUID, "s"),
(TSUID, "S"),
(TUEXEC, "x")),
((TGREAD, "r"),),
((TGWRITE, "w"),),
((TGEXEC|TSGID, "s"),
(TSGID, "S"),
(TGEXEC, "x")),
((TOREAD, "r"),),
((TOWRITE, "w"),),
((TOEXEC|TSVTX, "t"),
(TSVTX, "T"),
(TOEXEC, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
"""
perm = []
for table in _filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
class EntropyFileUriHandler(EntropyUriHandler):
"""
EntropyUriHandler based FILE (local) transceiver plugin.
"""
PLUGIN_API_VERSION = 4
@staticmethod
def approve_uri(uri):
if uri.startswith("file://"):
return True
return False
@staticmethod
def get_uri_name(uri):
myuri = uri.split("/")[2:][0].split(":")[0]
myuri = myuri.split("@")[-1]
return myuri
@staticmethod
def hide_sensible_data(uri):
return uri
def __init__(self, uri):
EntropyUriHandler.__init__(self, uri)
self.__dir = os.path.expanduser(
os.path.expandvars(self._drop_file_protocol(uri)))
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _drop_file_protocol(self, uri_str):
return uri_str[len("file://"):]
def _setup_remote_path(self, remote_path):
return os.path.join(self.__dir, remote_path)
def download(self, remote_path, save_path):
remote_str = self._setup_remote_path(remote_path)
if not os.path.isfile(remote_str):
return False # remote path not available
tmp_save_path = save_path + EntropyUriHandler.TMP_TXC_FILE_EXT
shutil.copyfile(remote_str, tmp_save_path)
os.rename(tmp_save_path, save_path)
return True
def download_many(self, remote_paths, save_dir):
for remote_path in remote_paths:
save_path = os.path.join(save_dir, os.path.basename(remote_path))
rc = self.download(remote_path, save_path)
if not rc:
return rc
return rc
def upload(self, load_path, remote_path):
remote_str = self._setup_remote_path(remote_path)
tmp_remote_str = remote_str + EntropyUriHandler.TMP_TXC_FILE_EXT
shutil.copyfile(load_path, tmp_remote_str)
os.rename(tmp_remote_str, remote_str)
return True
def lock(self, remote_path):
remote_str = self._setup_remote_path(remote_path)
remote_str_lock = os.path.join(
os.path.dirname(remote_str),
"." + os.path.basename(remote_str) + ".lock")
const_debug_write(__name__,
"lock(): remote_str: %s, lock: %s" % (
remote_str, remote_str_lock,))
# Use low level IO because Python open() and with stmt
# do unwanted things like creating the file on close() or
# context exit. I didn't investigate nor I do care actually.
lock_fd = None
try:
lock_fd = os.open(remote_str_lock, os.O_RDWR | os.O_CREAT)
try:
fcntl.flock(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError as err:
if err.errno not in (errno.EACCES, errno.EAGAIN,):
# ouch, wtf?
raise
return False
# create file
if os.path.isfile(remote_str):
# locked, ouch
return False
# we run in mutual exclusion, so it's safe
# to do test-and-set here
with open(remote_str, "wb") as remote_f:
pass
# cleanup the lock file
os.remove(remote_str_lock)
# release the resource
fcntl.flock(lock_fd, fcntl.LOCK_UN)
return True
finally:
if lock_fd is not None:
os.close(lock_fd)
def upload_many(self, load_path_list, remote_dir):
for load_path in load_path_list:
remote_path = os.path.join(remote_dir, os.path.basename(load_path))
rc = self.upload(load_path, remote_path)
if not rc:
return rc
return True
def rename(self, remote_path_old, remote_path_new):
remote_ptr_old = self._setup_remote_path(remote_path_old)
remote_ptr_new = self._setup_remote_path(remote_path_new)
try:
os.rename(remote_ptr_old, remote_ptr_new)
except OSError:
tmp_remote_ptr_new = remote_ptr_new + \
EntropyUriHandler.TMP_TXC_FILE_EXT
shutil.move(remote_ptr_old, tmp_remote_ptr_new)
os.rename(tmp_remote_ptr_new, remote_ptr_new)
return True
def copy(self, remote_path_old, remote_path_new):
tmp_remote_path_new = remote_path_new + \
EntropyUriHandler.TMP_TXC_FILE_EXT
remote_ptr_old = self._setup_remote_path(remote_path_old)
remote_ptr_new = self._setup_remote_path(tmp_remote_path_new)
try:
shutil.copy2(remote_ptr_old, remote_ptr_new)
except (OSError, IOError):
self.delete(tmp_remote_path_new)
return False
# atomic rename
done = self.rename(tmp_remote_path_new, remote_path_new)
if not done:
self.delete(tmp_remote_path_new)
return done
def delete(self, remote_path):
remote_str = self._setup_remote_path(remote_path)
try:
os.remove(remote_str)
except OSError:
return False
return True
def delete_many(self, remote_paths):
for remote_path in remote_paths:
rc = self.delete(remote_path)
if not rc:
return rc
return True
def get_md5(self, remote_path):
remote_str = self._setup_remote_path(remote_path)
if not os.path.isfile(remote_str):
return None
return md5sum(remote_str)
def list_content(self, remote_path):
remote_str = self._setup_remote_path(remote_path)
if os.path.isdir(remote_str):
return os.listdir(remote_str)
return []
def list_content_metadata(self, remote_path):
content = self.list_content(remote_path)
remote_str = self._setup_remote_path(remote_path)
data = []
for item in content:
item_path = os.path.join(remote_str, item)
st = os.lstat(item_path)
try:
owner = pwd.getpwuid(st.st_uid).pw_name
except KeyError:
owner = "nobody"
try:
group = grp.getgrgid(st.st_gid).gr_name
except KeyError:
group = "nobody"
data.append((item, st.st_size, owner, group, filemode(st.st_mode)))
return data
def is_dir(self, remote_path):
remote_str = self._setup_remote_path(remote_path)
return os.path.isdir(remote_str)
def is_file(self, remote_path):
remote_str = self._setup_remote_path(remote_path)
return os.path.isfile(remote_str)
def is_path_available(self, remote_path):
remote_str = self._setup_remote_path(remote_path)
return os.path.lexists(remote_str)
def makedirs(self, remote_path):
remote_str = self._setup_remote_path(remote_path)
if not os.path.isdir(remote_str):
os.makedirs(remote_str, 0o755)
const_setup_perms(remote_str, etpConst['entropygid'], recursion = False)
return True
def keep_alive(self):
return
def close(self):
return
| gpl-2.0 |
ZacariasBendeck/youtube-dl | youtube_dl/extractor/karaoketv.py | 105 | 1241 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote_plus
from ..utils import (
js_to_json,
)
class KaraoketvIE(InfoExtractor):
_VALID_URL = r'http://karaoketv\.co\.il/\?container=songs&id=(?P<id>[0-9]+)'
_TEST = {
'url': 'http://karaoketv.co.il/?container=songs&id=171568',
'info_dict': {
'id': '171568',
'ext': 'mp4',
'title': 'אל העולם שלך - רותם כהן - שרים קריוקי',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
page_video_url = self._og_search_video_url(webpage, video_id)
config_json = compat_urllib_parse_unquote_plus(self._search_regex(
r'config=(.*)', page_video_url, 'configuration'))
urls_info_json = self._download_json(
config_json, video_id, 'Downloading configuration',
transform_source=js_to_json)
url = urls_info_json['playlist'][0]['url']
return {
'id': video_id,
'title': self._og_search_title(webpage),
'url': url,
}
| unlicense |
remotesyssupport/cobbler-1 | cobbler/collection_files.py | 9 | 2177 | """
Files provide a container for file resources.
Copyright 2010, Kelsey Hightower
Kelsey Hightower <kelsey.hightower@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import item_file as file
import utils
import collection
from cexceptions import CX
from utils import _
#--------------------------------------------
class Files(collection.Collection):
def collection_type(self):
return "file"
def factory_produce(self,config,seed_data):
"""
Return a File forged from seed_data
"""
return file.File(config).from_datastruct(seed_data)
def remove(self,name,with_delete=True,with_sync=True,with_triggers=True,recursive=False,logger=None):
"""
Remove element named 'name' from the collection
"""
name = name.lower()
obj = self.find(name=name)
if obj is not None:
if with_delete:
if with_triggers:
utils.run_triggers(self.config.api, obj, "/var/lib/cobbler/triggers/delete/file/*", [], logger)
del self.listing[name]
self.config.serialize_delete(self, obj)
if with_delete:
if with_triggers:
utils.run_triggers(self.config.api, obj, "/var/lib/cobbler/triggers/delete/file/post/*", [], logger)
utils.run_triggers(self.config.api, obj, "/var/lib/cobbler/triggers/change/*", [], logger)
return True
raise CX(_("cannot delete an object that does not exist: %s") % name)
| gpl-2.0 |
eino-makitalo/odoo | addons/website_gengo/__init__.py | 316 | 1024 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP S.A. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
import models
| agpl-3.0 |
azunite/chrome_build | third_party/coverage/misc.py | 49 | 4259 | """Miscellaneous stuff for Coverage."""
import errno
import inspect
import os
import sys
from coverage.backward import md5, sorted # pylint: disable=W0622
from coverage.backward import string_class, to_bytes
def nice_pair(pair):
"""Make a nice string representation of a pair of numbers.
If the numbers are equal, just return the number, otherwise return the pair
with a dash between them, indicating the range.
"""
start, end = pair
if start == end:
return "%d" % start
else:
return "%d-%d" % (start, end)
def format_lines(statements, lines):
"""Nicely format a list of line numbers.
Format a list of line numbers for printing by coalescing groups of lines as
long as the lines represent consecutive statements. This will coalesce
even if there are gaps between statements.
For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and
`lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14".
"""
pairs = []
i = 0
j = 0
start = None
while i < len(statements) and j < len(lines):
if statements[i] == lines[j]:
if start == None:
start = lines[j]
end = lines[j]
j += 1
elif start:
pairs.append((start, end))
start = None
i += 1
if start:
pairs.append((start, end))
ret = ', '.join(map(nice_pair, pairs))
return ret
def short_stack():
"""Return a string summarizing the call stack."""
stack = inspect.stack()[:0:-1]
return "\n".join(["%30s : %s @%d" % (t[3],t[1],t[2]) for t in stack])
def expensive(fn):
"""A decorator to cache the result of an expensive operation.
Only applies to methods with no arguments.
"""
attr = "_cache_" + fn.__name__
def _wrapped(self):
"""Inner fn that checks the cache."""
if not hasattr(self, attr):
setattr(self, attr, fn(self))
return getattr(self, attr)
return _wrapped
def bool_or_none(b):
"""Return bool(b), but preserve None."""
if b is None:
return None
else:
return bool(b)
def join_regex(regexes):
"""Combine a list of regexes into one that matches any of them."""
if len(regexes) > 1:
return "|".join(["(%s)" % r for r in regexes])
elif regexes:
return regexes[0]
else:
return ""
def file_be_gone(path):
"""Remove a file, and don't get annoyed if it doesn't exist."""
try:
os.remove(path)
except OSError:
_, e, _ = sys.exc_info()
if e.errno != errno.ENOENT:
raise
class Hasher(object):
"""Hashes Python data into md5."""
def __init__(self):
self.md5 = md5()
def update(self, v):
"""Add `v` to the hash, recursively if needed."""
self.md5.update(to_bytes(str(type(v))))
if isinstance(v, string_class):
self.md5.update(to_bytes(v))
elif isinstance(v, (int, float)):
self.update(str(v))
elif isinstance(v, (tuple, list)):
for e in v:
self.update(e)
elif isinstance(v, dict):
keys = v.keys()
for k in sorted(keys):
self.update(k)
self.update(v[k])
else:
for k in dir(v):
if k.startswith('__'):
continue
a = getattr(v, k)
if inspect.isroutine(a):
continue
self.update(k)
self.update(a)
def digest(self):
"""Retrieve the digest of the hash."""
return self.md5.digest()
class CoverageException(Exception):
"""An exception specific to Coverage."""
pass
class NoSource(CoverageException):
"""We couldn't find the source for a module."""
pass
class NoCode(NoSource):
"""We couldn't find any code at all."""
pass
class NotPython(CoverageException):
"""A source file turned out not to be parsable Python."""
pass
class ExceptionDuringRun(CoverageException):
"""An exception happened while running customer code.
Construct it with three arguments, the values from `sys.exc_info`.
"""
pass
| bsd-3-clause |
PsychoTV/PsychoTeam.repository | plugin.video.specto/resources/lib/resolvers/googledocs.py | 23 | 2319 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,json
from resources.lib.libraries import client
def resolve(url):
try:
url = url.split('/preview', 1)[0]
url = url.replace('drive.google.com', 'docs.google.com')
result = client.request(url)
result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0]
u = json.loads(result)
u = [i.split('|')[-1] for i in u.split(',')]
u = sum([tag(i) for i in u], [])
url = []
try: url += [[i for i in u if i['quality'] == '1080p'][0]]
except: pass
try: url += [[i for i in u if i['quality'] == 'HD'][0]]
except: pass
try: url += [[i for i in u if i['quality'] == 'SD'][0]]
except: pass
if url == []: return
return url
except:
return
def tag(url):
quality = re.compile('itag=(\d*)').findall(url)
quality += re.compile('=m(\d*)$').findall(url)
try: quality = quality[0]
except: return []
if quality in ['37', '137', '299', '96', '248', '303', '46']:
return [{'quality': '1080p', 'url': url}]
elif quality in ['22', '84', '136', '298', '120', '95', '247', '302', '45', '102']:
return [{'quality': 'HD', 'url': url}]
elif quality in ['35', '44', '135', '244', '94']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['18', '34', '43', '82', '100', '101', '134', '243', '93']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['5', '6', '36', '83', '133', '242', '92', '132']:
return [{'quality': 'SD', 'url': url}]
else:
return []
| gpl-2.0 |
saurabh6790/med_test_lib | webnotes/widgets/form/utils.py | 22 | 3528 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes, json
from webnotes import _
@webnotes.whitelist()
def remove_attach():
"""remove attachment"""
import webnotes.utils.file_manager
fid = webnotes.form_dict.get('fid')
webnotes.utils.file_manager.remove_file(fid)
@webnotes.whitelist()
def get_fields():
"""get fields"""
r = {}
args = {
'select':webnotes.form_dict.get('select')
,'from':webnotes.form_dict.get('from')
,'where':webnotes.form_dict.get('where')
}
ret = webnotes.conn.sql("select %(select)s from `%(from)s` where %(where)s limit 1" % args)
if ret:
fl, i = webnotes.form_dict.get('fields').split(','), 0
for f in fl:
r[f], i = ret[0][i], i+1
webnotes.response['message']=r
@webnotes.whitelist()
def validate_link():
"""validate link when updated by user"""
import webnotes
import webnotes.utils
value, options, fetch = webnotes.form_dict.get('value'), webnotes.form_dict.get('options'), webnotes.form_dict.get('fetch')
# no options, don't validate
if not options or options=='null' or options=='undefined':
webnotes.response['message'] = 'Ok'
return
if webnotes.conn.sql("select name from `tab%s` where name=%s" % (options, '%s'), value):
# get fetch values
if fetch:
webnotes.response['fetch_values'] = [webnotes.utils.parse_val(c) \
for c in webnotes.conn.sql("select %s from `tab%s` where name=%s" \
% (fetch, options, '%s'), value)[0]]
webnotes.response['message'] = 'Ok'
@webnotes.whitelist()
def add_comment(doclist):
"""allow any logged user to post a comment"""
doclist = json.loads(doclist)
doclist[0]["__islocal"] = 1
doclistobj = webnotes.bean(doclist)
doclistobj.ignore_permissions = True
doclistobj.save()
return [d.fields for d in doclist]
return save(doclist)
@webnotes.whitelist()
def get_next(doctype, name, prev):
import webnotes.widgets.reportview
prev = int(prev)
field = "`tab%s`.name" % doctype
res = webnotes.widgets.reportview.execute(doctype,
fields = [field],
filters = [[doctype, "name", "<" if prev else ">", name]],
order_by = field + " " + ("desc" if prev else "asc"),
limit_start=0, limit_page_length=1, as_list=True)
if not res:
webnotes.msgprint(_("No further records"))
return None
else:
return res[0][0]
@webnotes.whitelist()
def get_linked_docs(doctype, name, metadata_loaded=None):
if not metadata_loaded: metadata_loaded = []
meta = webnotes.get_doctype(doctype, True)
linkinfo = meta[0].get("__linked_with")
results = {}
for dt, link in linkinfo.items():
link["doctype"] = dt
linkmeta = webnotes.get_doctype(dt, True)
if not linkmeta[0].get("issingle"):
fields = [d.fieldname for d in linkmeta.get({"parent":dt, "in_list_view":1,
"fieldtype": ["not in", ["Image", "HTML", "Button", "Table"]]})] \
+ ["name", "modified", "docstatus"]
fields = ["`tab{dt}`.`{fn}`".format(dt=dt, fn=sf.strip()) for sf in fields if sf]
if link.get("child_doctype"):
ret = webnotes.get_list(doctype=dt, fields=fields,
filters=[[link.get('child_doctype'), link.get("fieldname"), '=', name]])
else:
ret = webnotes.get_list(doctype=dt, fields=fields,
filters=[[dt, link.get("fieldname"), '=', name]])
if ret:
results[dt] = ret
if not dt in metadata_loaded:
if not "docs" in webnotes.local.response:
webnotes.local.response.docs = []
webnotes.local.response.docs += linkmeta
return results | mit |
chromium2014/src | tools/perf/page_sets/intl_ko_th_vi.py | 1 | 1913 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class IntlKoThViPage(page_module.Page):
def __init__(self, url, page_set):
super(IntlKoThViPage, self).__init__(url=url, page_set=page_set)
self.user_agent_type = 'desktop'
self.archive_data_file = 'data/intl_ko_th_vi.json'
def RunSmoothness(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage()
interaction.End()
class IntlKoThViPageSet(page_set_module.PageSet):
""" Popular pages in Korean, Thai and Vietnamese. """
def __init__(self):
super(IntlKoThViPageSet, self).__init__(
user_agent_type='desktop',
archive_data_file='data/intl_ko_th_vi.json',
bucket=page_set_module.PARTNER_BUCKET)
urls_list = [
# Why: #7 site in Vietnam
'http://us.24h.com.vn/',
# Why: #6 site in Vietnam
'http://vnexpress.net/',
# Why: #18 site in Vietnam
'http://vietnamnet.vn/',
# Why: #5 site in Vietnam
# pylint: disable=C0301
'http://news.zing.vn/the-gioi/ba-dam-thep-margaret-thatcher-qua-doi/a312895.html#home_noibat1',
'http://kenh14.vn/home.chn',
# Why: #5 site in Korea
'http://www.naver.com/',
# Why: #9 site in Korea
'http://www.daum.net/',
# Why: #25 site in Korea
'http://www.donga.com/',
'http://www.chosun.com/',
'http://www.danawa.com/',
# Why: #10 site in Thailand
'http://pantip.com/',
'http://thaimisc.com/'
]
for url in urls_list:
self.AddPage(IntlKoThViPage(url, self))
| bsd-3-clause |
ianastewart/cwltc-admin | mysite/settings/staging.py | 1 | 3443 | from .base import *
DEBUG = False
LIVE_GO_CARDLESS = False
LIVE_MAIL = False
SITE_NAME = os.path.basename(__file__).title()
env_path = os.path.join(BASE_DIR, ".env")
environ.Env.read_env(env_path)
INSTALLED_APPS += ["raven.contrib.django.raven_compat"]
DATABASES = {"default": env.db_url("DATABASE_URL")}
ALLOWED_HOSTS = ["django.iskt.co.uk"]
SECURE_SSL_REDIRECT = False
TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG
STATIC_ROOT = os.path.join(BASE_DIR, "static_files/")
SECRET_KEY = env.str("SECRET_KEY")
BEE_FREE_ID = env.str("BEE_FREE_ID")
BEE_FREE_SECRET = env.str("BEE_FREE_SECRET")
POS_COOKIE = env.str("POS_COOKIE")
if LIVE_MAIL:
print("Warning - Live mail")
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
ANYMAIL = env.dict("ANYMAIL")
else:
EMAIL_BACKEND = "django.core.mail.backends.dummy.EmailBackend"
if LIVE_GO_CARDLESS:
CARDLESS_ACCESS_TOKEN = env.str("CARDLESS_PRODUCTION_TOKEN")
CARDLESS_ENVIRONMENT = "live"
CARDLESS_WEBHOOK_SECRET = env.str("CARDLESS_WEBHOOK_SECRET")
print("WARNING - LIVE Go Cardless site")
else:
CARDLESS_ACCESS_TOKEN = env.str("CARDLESS_SANDBOX_TOKEN")
CARDLESS_ENVIRONMENT = "sandbox"
CARDLESS_WEBHOOK_SECRET = env.str("CARDLESS_WEBHOOK_SECRET")
# BROKER_URL = env.str('BROKER_URL')
RAVEN_CONFIG = {"dsn": env.str("RAVEN")}
RAVEN_CONFIG = {"dsn": env.str("RAVEN")}
# https://www.webforefront.com/django/setupdjangologging.html
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"filters": {
"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"},
"require_debug_true": {"()": "django.utils.log.RequireDebugTrue"},
},
"formatters": {
"simple": {"format": "[%(asctime)s] %(levelname)s %(message)s", "datefmt": "%Y-%m-%d %H:%M:%S"},
"verbose": {
"format": "[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
},
},
"handlers": {
"console": {
"level": "DEBUG",
"filters": ["require_debug_true"],
"class": "logging.StreamHandler",
"formatter": "simple",
},
"production_logfile": {
"level": "INFO",
"filters": ["require_debug_false"],
"class": "logging.handlers.RotatingFileHandler",
"filename": "./logs/django.log",
"maxBytes": 1024 * 1024 * 10, # 10MB
"backupCount": 5,
"formatter": "simple",
},
"sentry": {
"level": "ERROR", # To capture more than ERROR, change to WARNING, INFO, etc.
"filters": ["require_debug_false"],
"class": "raven.contrib.django.raven_compat.handlers.SentryHandler",
"tags": {"custom-tag": SITE_NAME},
},
},
"root": {"level": "DEBUG", "handlers": ["console"]},
"loggers": {
"members": {"handlers": ["production_logfile", "sentry"]},
"django": {"handlers": ["console", "sentry"], "propagate": True},
# stop sentry logging disallowed host
"django.security.DisallowedHost": {"handlers": ["console"], "propagate": False},
"django.request": { # debug logging of things that break requests
"handlers": ["production_logfile", "sentry"],
"level": "DEBUG",
"propagate": True,
},
},
"py.warnings": {"handlers": ["console"]},
}
| mit |
impowski/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/inputstream.py | 618 | 30855 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
from six.moves import http_client
import codecs
import re
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import encodings, ReparseException
from . import utils
from io import StringIO
try:
from io import BytesIO
except ImportError:
BytesIO = StringIO
try:
from io import BufferedIOBase
except ImportError:
class BufferedIOBase(object):
pass
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
invalid_unicode_re = re.compile("[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]")
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream(object):
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1, 0] # chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos <= self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= len(self.buffer[i])
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return b"".join(rv)
def HTMLInputStream(source, encoding=None, parseMeta=True, chardet=True):
if isinstance(source, http_client.HTTPResponse):
# Work around Python bug #20007: read(0) closes the connection.
# http://bugs.python.org/issue20007
isUnicode = False
elif hasattr(source, "read"):
isUnicode = isinstance(source.read(0), text_type)
else:
isUnicode = isinstance(source, text_type)
if isUnicode:
if encoding is not None:
raise TypeError("Cannot explicitly set an encoding with a unicode string")
return HTMLUnicodeInputStream(source)
else:
return HTMLBinaryInputStream(source, encoding, parseMeta, chardet)
class HTMLUnicodeInputStream(object):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Craziness
if len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
self.replaceCharactersRegexp = re.compile("[\uD800-\uDFFF]")
else:
self.reportCharacterErrors = self.characterErrorsUCS2
self.replaceCharactersRegexp = re.compile("([\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF])")
# List of where new lines occur
self.newLines = [0]
self.charEncoding = ("utf-8", "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind('\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
# Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
self.reportCharacterErrors(data)
# Replace invalid characters
# Note U+0000 is dealt with in the tokenizer
data = self.replaceCharactersRegexp.sub("\ufffd", data)
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for i in range(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
# Someone picked the wrong compile option
# You lose
skip = False
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
# Pretty sure there should be endianness issues here
if utils.isSurrogatePair(data[pos:pos + 2]):
# We have a surrogate pair!
char_val = utils.surrogatePairToCodepoint(data[pos:pos + 2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite=False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = "^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = "".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
self.charEncoding = (codecName(encoding), "certain")
# Encoding Information
# Number of bytes to use when looking for a meta element with
# encoding information
self.numBytesMeta = 512
# Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
# Encoding to use if no other information can be found
self.defaultEncoding = "windows-1252"
# Detect encoding iff no explicit "transport level" encoding is supplied
if (self.charEncoding[0] is None):
self.charEncoding = self.detectEncoding(parseMeta, chardet)
# Call superclass
self.reset()
def reset(self):
self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream,
'replace')
HTMLUnicodeInputStream.reset(self)
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except:
stream = BufferedStream(stream)
return stream
def detectEncoding(self, parseMeta=True, chardet=True):
# First look for a BOM
# This will also read past the BOM if present
encoding = self.detectBOM()
confidence = "certain"
# If there is no BOM need to look for meta elements with encoding
# information
if encoding is None and parseMeta:
encoding = self.detectEncodingMeta()
confidence = "tentative"
# Guess with chardet, if avaliable
if encoding is None and chardet:
confidence = "tentative"
try:
try:
from charade.universaldetector import UniversalDetector
except ImportError:
from chardet.universaldetector import UniversalDetector
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, bytes)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = detector.result['encoding']
self.rawStream.seek(0)
except ImportError:
pass
# If all else fails use the default encoding
if encoding is None:
confidence = "tentative"
encoding = self.defaultEncoding
# Substitute for equivalent encodings:
encodingSub = {"iso-8859-1": "windows-1252"}
if encoding.lower() in encodingSub:
encoding = encodingSub[encoding.lower()]
return encoding, confidence
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != "certain"
newEncoding = codecName(newEncoding)
if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"):
newEncoding = "utf-8"
if newEncoding is None:
return
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.reset()
self.charEncoding = (newEncoding, "certain")
raise ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be',
codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, bytes)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
self.rawStream.seek(encoding and seek or 0)
return encoding
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding in ("utf-16", "utf-16-be", "utf-16-le"):
encoding = "utf-8"
return encoding
class EncodingBytes(bytes):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
assert isinstance(value, bytes)
return bytes.__new__(self, value.lower())
def __init__(self, value):
self._position = -1
def __iter__(self):
return self
def __next__(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p + 1]
def next(self):
# Py2 compat
return self.__next__()
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p:p + 1]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:self.position + 1]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p:p + 1]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p:p + 1]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p + len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes) - 1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
(b"<!--", self.handleComment),
(b"<meta", self.handleMeta),
(b"</", self.handlePossibleEndTag),
(b"<!", self.handleOther),
(b"<?", self.handleOther),
(b"<", self.handlePossibleStartTag))
for byte in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing = False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo(b"-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
# if we have <meta not followed by a space so just keep going
return True
# We have a valid meta element we want to search for attributes
hasPragma = False
pendingEncoding = None
while True:
# Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == b"http-equiv":
hasPragma = attr[1] == b"content-type"
if hasPragma and pendingEncoding is not None:
self.encoding = pendingEncoding
return False
elif attr[0] == b"charset":
tentativeEncoding = attr[1]
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == b"content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
if tentativeEncoding is not None:
codec = codecName(tentativeEncoding)
if codec is not None:
if hasPragma:
self.encoding = codec
return False
else:
pendingEncoding = codec
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
next(self.data)
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
# If the next byte is not an ascii letter either ignore this
# fragment (possible start tag case) or treat it according to
# handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == b"<":
# return to the first step in the overall "two step" algorithm
# reprocessing the < byte
data.previous()
else:
# Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(b">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
assert c is None or len(c) == 1
# Step 2
if c in (b">", None):
return None
# Step 3
attrName = []
attrValue = []
# Step 4 attribute name
while True:
if c == b"=" and attrName:
break
elif c in spaceCharactersBytes:
# Step 6!
c = data.skip()
break
elif c in (b"/", b">"):
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c is None:
return None
else:
attrName.append(c)
# Step 5
c = next(data)
# Step 7
if c != b"=":
data.previous()
return b"".join(attrName), b""
# Step 8
next(data)
# Step 9
c = data.skip()
# Step 10
if c in (b"'", b'"'):
# 10.1
quoteChar = c
while True:
# 10.2
c = next(data)
# 10.3
if c == quoteChar:
next(data)
return b"".join(attrName), b"".join(attrValue)
# 10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
# 10.5
else:
attrValue.append(c)
elif c == b">":
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = next(data)
if c in spacesAngleBrackets:
return b"".join(attrName), b"".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
assert isinstance(data, bytes)
self.data = data
def parse(self):
try:
# Check if the attr name is charset
# otherwise return
self.data.jumpTo(b"charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == b"=":
# If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
# Look for an encoding between matching quote marks
if self.data.currentByte in (b'"', b"'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
# Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
# Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def codecName(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, bytes):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding:
canonicalName = ascii_punctuation_re.sub("", encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
| mpl-2.0 |
yintaoxue/read-open-source-code | solr-4.7.2/src/org/apache/lucene/util/packed/gen_Packed64SingleBlock.py | 15 | 10530 | #! /usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
SUPPORTED_BITS_PER_VALUE = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 16, 21, 32]
HEADER="""// This file has been automatically generated, DO NOT EDIT
package org.apache.lucene.util.packed;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.store.DataInput;
import org.apache.lucene.util.RamUsageEstimator;
/**
* This class is similar to {@link Packed64} except that it trades space for
* speed by ensuring that a single block needs to be read/written in order to
* read/write a value.
*/
abstract class Packed64SingleBlock extends PackedInts.MutableImpl {
public static final int MAX_SUPPORTED_BITS_PER_VALUE = %d;
private static final int[] SUPPORTED_BITS_PER_VALUE = new int[] {%s};
public static boolean isSupported(int bitsPerValue) {
return Arrays.binarySearch(SUPPORTED_BITS_PER_VALUE, bitsPerValue) >= 0;
}
private static int requiredCapacity(int valueCount, int valuesPerBlock) {
return valueCount / valuesPerBlock
+ (valueCount %% valuesPerBlock == 0 ? 0 : 1);
}
final long[] blocks;
Packed64SingleBlock(int valueCount, int bitsPerValue) {
super(valueCount, bitsPerValue);
assert isSupported(bitsPerValue);
final int valuesPerBlock = 64 / bitsPerValue;
blocks = new long[requiredCapacity(valueCount, valuesPerBlock)];
}
@Override
public void clear() {
Arrays.fill(blocks, 0L);
}
@Override
public long ramBytesUsed() {
return RamUsageEstimator.alignObjectSize(
RamUsageEstimator.NUM_BYTES_OBJECT_HEADER
+ 2 * RamUsageEstimator.NUM_BYTES_INT // valueCount,bitsPerValue
+ RamUsageEstimator.NUM_BYTES_OBJECT_REF) // blocks ref
+ RamUsageEstimator.sizeOf(blocks);
}
@Override
public int get(int index, long[] arr, int off, int len) {
assert len > 0 : "len must be > 0 (got " + len + ")";
assert index >= 0 && index < valueCount;
len = Math.min(len, valueCount - index);
assert off + len <= arr.length;
final int originalIndex = index;
// go to the next block boundary
final int valuesPerBlock = 64 / bitsPerValue;
final int offsetInBlock = index %% valuesPerBlock;
if (offsetInBlock != 0) {
for (int i = offsetInBlock; i < valuesPerBlock && len > 0; ++i) {
arr[off++] = get(index++);
--len;
}
if (len == 0) {
return index - originalIndex;
}
}
// bulk get
assert index %% valuesPerBlock == 0;
final PackedInts.Decoder decoder = BulkOperation.of(PackedInts.Format.PACKED_SINGLE_BLOCK, bitsPerValue);
assert decoder.longBlockCount() == 1;
assert decoder.longValueCount() == valuesPerBlock;
final int blockIndex = index / valuesPerBlock;
final int nblocks = (index + len) / valuesPerBlock - blockIndex;
decoder.decode(blocks, blockIndex, arr, off, nblocks);
final int diff = nblocks * valuesPerBlock;
index += diff; len -= diff;
if (index > originalIndex) {
// stay at the block boundary
return index - originalIndex;
} else {
// no progress so far => already at a block boundary but no full block to
// get
assert index == originalIndex;
return super.get(index, arr, off, len);
}
}
@Override
public int set(int index, long[] arr, int off, int len) {
assert len > 0 : "len must be > 0 (got " + len + ")";
assert index >= 0 && index < valueCount;
len = Math.min(len, valueCount - index);
assert off + len <= arr.length;
final int originalIndex = index;
// go to the next block boundary
final int valuesPerBlock = 64 / bitsPerValue;
final int offsetInBlock = index %% valuesPerBlock;
if (offsetInBlock != 0) {
for (int i = offsetInBlock; i < valuesPerBlock && len > 0; ++i) {
set(index++, arr[off++]);
--len;
}
if (len == 0) {
return index - originalIndex;
}
}
// bulk set
assert index %% valuesPerBlock == 0;
final BulkOperation op = BulkOperation.of(PackedInts.Format.PACKED_SINGLE_BLOCK, bitsPerValue);
assert op.longBlockCount() == 1;
assert op.longValueCount() == valuesPerBlock;
final int blockIndex = index / valuesPerBlock;
final int nblocks = (index + len) / valuesPerBlock - blockIndex;
op.encode(arr, off, blocks, blockIndex, nblocks);
final int diff = nblocks * valuesPerBlock;
index += diff; len -= diff;
if (index > originalIndex) {
// stay at the block boundary
return index - originalIndex;
} else {
// no progress so far => already at a block boundary but no full block to
// set
assert index == originalIndex;
return super.set(index, arr, off, len);
}
}
@Override
public void fill(int fromIndex, int toIndex, long val) {
assert fromIndex >= 0;
assert fromIndex <= toIndex;
assert PackedInts.bitsRequired(val) <= bitsPerValue;
final int valuesPerBlock = 64 / bitsPerValue;
if (toIndex - fromIndex <= valuesPerBlock << 1) {
// there needs to be at least one full block to set for the block
// approach to be worth trying
super.fill(fromIndex, toIndex, val);
return;
}
// set values naively until the next block start
int fromOffsetInBlock = fromIndex %% valuesPerBlock;
if (fromOffsetInBlock != 0) {
for (int i = fromOffsetInBlock; i < valuesPerBlock; ++i) {
set(fromIndex++, val);
}
assert fromIndex %% valuesPerBlock == 0;
}
// bulk set of the inner blocks
final int fromBlock = fromIndex / valuesPerBlock;
final int toBlock = toIndex / valuesPerBlock;
assert fromBlock * valuesPerBlock == fromIndex;
long blockValue = 0L;
for (int i = 0; i < valuesPerBlock; ++i) {
blockValue = blockValue | (val << (i * bitsPerValue));
}
Arrays.fill(blocks, fromBlock, toBlock, blockValue);
// fill the gap
for (int i = valuesPerBlock * toBlock; i < toIndex; ++i) {
set(i, val);
}
}
@Override
protected PackedInts.Format getFormat() {
return PackedInts.Format.PACKED_SINGLE_BLOCK;
}
@Override
public String toString() {
return getClass().getSimpleName() + "(bitsPerValue=" + bitsPerValue
+ ", size=" + size() + ", elements.length=" + blocks.length + ")";
}
public static Packed64SingleBlock create(DataInput in,
int valueCount, int bitsPerValue) throws IOException {
Packed64SingleBlock reader = create(valueCount, bitsPerValue);
for (int i = 0; i < reader.blocks.length; ++i) {
reader.blocks[i] = in.readLong();
}
return reader;
}
""" %(SUPPORTED_BITS_PER_VALUE[-1], ", ".join(map(str, SUPPORTED_BITS_PER_VALUE)))
FOOTER = "}"
if __name__ == '__main__':
f = open("Packed64SingleBlock.java", 'w')
f.write(HEADER)
f.write(" public static Packed64SingleBlock create(int valueCount, int bitsPerValue) {\n")
f.write(" switch (bitsPerValue) {\n")
for bpv in SUPPORTED_BITS_PER_VALUE:
f.write(" case %d:\n" %bpv)
f.write(" return new Packed64SingleBlock%d(valueCount);\n" %bpv)
f.write(" default:\n")
f.write(" throw new IllegalArgumentException(\"Unsupported number of bits per value: \" + %d);\n" %bpv)
f.write(" }\n")
f.write(" }\n\n")
for bpv in SUPPORTED_BITS_PER_VALUE:
log_2 = 0
while (1 << log_2) < bpv:
log_2 = log_2 + 1
if (1 << log_2) != bpv:
log_2 = None
f.write(" static class Packed64SingleBlock%d extends Packed64SingleBlock {\n\n" %bpv)
f.write(" Packed64SingleBlock%d(int valueCount) {\n" %bpv)
f.write(" super(valueCount, %d);\n" %bpv)
f.write(" }\n\n")
f.write(" @Override\n")
f.write(" public long get(int index) {\n")
if log_2 is not None:
f.write(" final int o = index >>> %d;\n" %(6 - log_2))
f.write(" final int b = index & %d;\n" %((1 << (6 - log_2)) - 1))
f.write(" final int shift = b << %d;\n" %log_2)
else:
f.write(" final int o = index / %d;\n" %(64 / bpv))
f.write(" final int b = index %% %d;\n" %(64 / bpv))
f.write(" final int shift = b * %d;\n" %bpv)
f.write(" return (blocks[o] >>> shift) & %dL;\n" %((1 << bpv) - 1))
f.write(" }\n\n")
f.write(" @Override\n")
f.write(" public void set(int index, long value) {\n")
if log_2 is not None:
f.write(" final int o = index >>> %d;\n" %(6 - log_2))
f.write(" final int b = index & %d;\n" %((1 << (6 - log_2)) - 1))
f.write(" final int shift = b << %d;\n" %log_2)
else:
f.write(" final int o = index / %d;\n" %(64 / bpv))
f.write(" final int b = index %% %d;\n" %(64 / bpv))
f.write(" final int shift = b * %d;\n" %bpv)
f.write(" blocks[o] = (blocks[o] & ~(%dL << shift)) | (value << shift);\n" % ((1 << bpv) - 1))
f.write(" }\n\n")
f.write(" }\n\n")
f.write(FOOTER)
f.close()
| apache-2.0 |
garbled1/ansible | lib/ansible/modules/cloud/amazon/elb_target_group_facts.py | 18 | 9764 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: elb_target_group_facts
short_description: Gather facts about ELB target groups in AWS
description:
- Gather facts about ELB target groups in AWS
version_added: "2.4"
author: Rob White (@wimnat)
options:
load_balancer_arn:
description:
- The Amazon Resource Name (ARN) of the load balancer.
required: false
target_group_arns:
description:
- The Amazon Resource Names (ARN) of the target groups.
required: false
names:
description:
- The names of the target groups.
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all target groups
- elb_target_group_facts:
# Gather facts about the target group attached to a particular ELB
- elb_target_group_facts:
load_balancer_arn: "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-elb/aabbccddeeff"
# Gather facts about a target groups named 'tg1' and 'tg2'
- elb_target_group_facts:
names:
- tg1
- tg2
'''
RETURN = '''
target_groups:
description: a list of target groups
returned: always
type: complex
contains:
deregistration_delay_timeout_seconds:
description: The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused.
returned: always
type: int
sample: 300
health_check_interval_seconds:
description: The approximate amount of time, in seconds, between health checks of an individual target.
returned: always
type: int
sample: 30
health_check_path:
description: The destination for the health check request.
returned: always
type: string
sample: /index.html
health_check_port:
description: The port to use to connect with the target.
returned: always
type: string
sample: traffic-port
health_check_protocol:
description: The protocol to use to connect with the target.
returned: always
type: string
sample: HTTP
health_check_timeout_seconds:
description: The amount of time, in seconds, during which no response means a failed health check.
returned: always
type: int
sample: 5
healthy_threshold_count:
description: The number of consecutive health checks successes required before considering an unhealthy target healthy.
returned: always
type: int
sample: 5
load_balancer_arns:
description: The Amazon Resource Names (ARN) of the load balancers that route traffic to this target group.
returned: always
type: list
sample: []
matcher:
description: The HTTP codes to use when checking for a successful response from a target.
returned: always
type: dict
sample: {
"http_code": "200"
}
port:
description: The port on which the targets are listening.
returned: always
type: int
sample: 80
protocol:
description: The protocol to use for routing traffic to the targets.
returned: always
type: string
sample: HTTP
stickiness_enabled:
description: Indicates whether sticky sessions are enabled.
returned: always
type: bool
sample: true
stickiness_lb_cookie_duration_seconds:
description: Indicates whether sticky sessions are enabled.
returned: always
type: int
sample: 86400
stickiness_type:
description: The type of sticky sessions.
returned: always
type: string
sample: lb_cookie
tags:
description: The tags attached to the target group.
returned: always
type: dict
sample: "{
'Tag': 'Example'
}"
target_group_arn:
description: The Amazon Resource Name (ARN) of the target group.
returned: always
type: string
sample: "arn:aws:elasticloadbalancing:ap-southeast-2:01234567890:targetgroup/mytargetgroup/aabbccddee0044332211"
target_group_name:
description: The name of the target group.
returned: always
type: string
sample: mytargetgroup
unhealthy_threshold_count:
description: The number of consecutive health check failures required before considering the target unhealthy.
returned: always
type: int
sample: 2
vpc_id:
description: The ID of the VPC for the targets.
returned: always
type: string
sample: vpc-0123456
'''
import traceback
try:
import boto3
from botocore.exceptions import ClientError, NoCredentialsError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
ec2_argument_spec, get_aws_connection_info)
def get_target_group_attributes(connection, module, target_group_arn):
try:
target_group_attributes = boto3_tag_list_to_ansible_dict(connection.describe_target_group_attributes(TargetGroupArn=target_group_arn)['Attributes'])
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
# Replace '.' with '_' in attribute key names to make it more Ansibley
return dict((k.replace('.', '_'), v)
for (k, v) in target_group_attributes.items())
def get_target_group_tags(connection, module, target_group_arn):
try:
return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags'])
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
def list_target_groups(connection, module):
load_balancer_arn = module.params.get("load_balancer_arn")
target_group_arns = module.params.get("target_group_arns")
names = module.params.get("names")
try:
target_group_paginator = connection.get_paginator('describe_target_groups')
if not load_balancer_arn and not target_group_arns and not names:
target_groups = target_group_paginator.paginate().build_full_result()
if load_balancer_arn:
target_groups = target_group_paginator.paginate(LoadBalancerArn=load_balancer_arn).build_full_result()
if target_group_arns:
target_groups = target_group_paginator.paginate(TargetGroupArns=target_group_arns).build_full_result()
if names:
target_groups = target_group_paginator.paginate(Names=names).build_full_result()
except ClientError as e:
if e.response['Error']['Code'] == 'TargetGroupNotFound':
module.exit_json(target_groups=[])
else:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except NoCredentialsError as e:
module.fail_json(msg="AWS authentication problem. " + e.message, exception=traceback.format_exc())
# Get the attributes and tags for each target group
for target_group in target_groups['TargetGroups']:
target_group.update(get_target_group_attributes(connection, module, target_group['TargetGroupArn']))
# Turn the boto3 result in to ansible_friendly_snaked_names
snaked_target_groups = [camel_dict_to_snake_dict(target_group) for target_group in target_groups['TargetGroups']]
# Get tags for each target group
for snaked_target_group in snaked_target_groups:
snaked_target_group['tags'] = get_target_group_tags(connection, module, snaked_target_group['target_group_arn'])
module.exit_json(target_groups=snaked_target_groups)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
load_balancer_arn=dict(type='str'),
target_group_arns=dict(type='list'),
names=dict(type='list')
)
)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=['load_balancer_arn', 'target_group_arns', 'names'],
supports_check_mode=True
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
list_target_groups(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 |
certik/hermes2d | python/examples/03.py | 4 | 1500 | #! /usr/bin/env python
# This example shows how to solve a first simple PDE:
# - load the mesh,
# - perform initial refinements
# - create a H1 space over the mesh
# - define weak formulation
# - initialize matrix solver
# - assemble and solve the matrix system
# - visualize the solution
#
# PDE: Poisson equation -Laplace u = CONST_F with homogeneous (zero)
# Dirichlet boundary conditions.
#
# You can change the constant right-hand side CONST_F, the
# initial polynomial degree P_INIT, and play with various initial
# mesh refinements at the beginning.
# Import modules
from hermes2d import Mesh, MeshView, H1Shapeset, PrecalcShapeset, H1Space, \
WeakForm, Solution, ScalarView, LinSystem, DummySolver
from hermes2d.forms import set_forms
from hermes2d.examples.c03 import set_bc
from hermes2d.examples import get_example_mesh
P_INIT = 5 # Uniform polynomial degree of mesh elements.
# Problem parameters.
CONST_F = 2.0
# Load the mesh file
mesh = Mesh()
mesh.load(get_example_mesh())
# Sample "manual" mesh refinement
mesh.refine_all_elements()
# Create an H1 space with default shapeset
space = H1Space(mesh, P_INIT)
set_bc(space)
# Initialize the weak formulation
wf = WeakForm(1)
set_forms(wf)
# Initialize the linear system
ls = LinSystem(wf)
ls.set_spaces(space)
# Assemble and solve the matrix problem.
sln = Solution()
ls.assemble()
ls.solve_system(sln)
# Visualize the solution
sln.plot()
# Visualize the mesh
mesh.plot(space=space)
| gpl-2.0 |
KaranToor/MA450 | google-cloud-sdk/platform/gsutil/gslib/tests/test_trace.py | 20 | 1679 | # -*- coding: utf-8 -*-
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for gsutil --trace-token option."""
from __future__ import absolute_import
from gslib.cs_api_map import ApiSelector
import gslib.tests.testcase as testcase
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.util import ObjectToURI as suri
@SkipForS3('--trace-token is supported only on GCS JSON API.')
class TestTraceTokenOption(testcase.GsUtilIntegrationTestCase):
"""Integration tests for gsutil --trace-token option."""
def test_minus_tracetoken_cat(self):
"""Tests cat command with trace-token option."""
key_uri = self.CreateObject(contents='0123456789')
(_, stderr) = self.RunGsUtil(
['-D', '--trace-token=THISISATOKEN', 'cat', suri(key_uri)],
return_stdout=True, return_stderr=True)
if self.test_api == ApiSelector.JSON:
self.assertIn('You are running gsutil with trace output enabled.', stderr)
self.assertRegexpMatches(
stderr, r'.*GET.*b/%s/o/%s\?.*&trace=token%%3ATHISISATOKEN' %
(key_uri.bucket_name, key_uri.object_name))
| apache-2.0 |
shsingh/netmiko | tests/test_linux.py | 7 | 2368 | #!/usr/bin/env python
from __future__ import print_function
from netmiko import ConnectHandler
def main():
try:
hostname = raw_input("Enter remote host to test: ")
username = raw_input("Enter remote username: ")
except NameError:
hostname = input("Enter remote host to test: ")
username = input("Enter remote username: ")
linux_test = {
'username': username,
'use_keys': True,
'ip': hostname,
'device_type': 'ovs_linux',
'key_file': '/home/{}/.ssh/test_rsa'.format(username),
'verbose': False}
net_connect = ConnectHandler(**linux_test)
print()
print(net_connect.find_prompt())
# Test enable mode
print()
print("***** Testing enable mode *****")
net_connect.enable()
if net_connect.check_enable_mode():
print("Success: in enable mode")
else:
print("Fail...")
print(net_connect.find_prompt())
net_connect.exit_enable_mode()
print("Out of enable mode")
print(net_connect.find_prompt())
# Test config mode
print()
print("***** Testing config mode *****")
net_connect.config_mode()
if net_connect.check_config_mode():
print("Success: in config mode")
else:
print("Fail...")
print(net_connect.find_prompt())
net_connect.exit_config_mode()
print("Out of config mode")
print(net_connect.find_prompt())
# Test config mode (when already at root prompt)
print()
print("***** Testing config mode when already root *****")
net_connect.enable()
if net_connect.check_enable_mode():
print("Success: in enable mode")
else:
print("Fail...")
print(net_connect.find_prompt())
print("Test config_mode while already at root prompt")
net_connect.config_mode()
if net_connect.check_config_mode():
print("Success: still at root prompt")
else:
print("Fail...")
net_connect.exit_config_mode()
# Should do nothing
net_connect.exit_enable_mode()
print("Out of config/enable mode")
print(net_connect.find_prompt())
# Send config commands
print()
print("***** Testing send_config_set *****")
print(net_connect.find_prompt())
output = net_connect.send_config_set(['ls -al'])
print(output)
print()
if __name__ == "__main__":
main()
| mit |
duyetdev/openerp-6.1.1 | openerp/addons/account_voucher/__openerp__.py | 9 | 2927 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "eInvoicing & Payments",
"version" : "1.0",
"author" : 'OpenERP SA',
'complexity': "normal",
"description": """
Account Voucher module includes all the basic requirements of Voucher Entries for Bank, Cash, Sales, Purchase, Expanse, Contra, etc.
====================================================================================================================================
* Voucher Entry
* Voucher Receipt
* Cheque Register
""",
"category": 'Accounting & Finance',
"sequence": 4,
"website" : "http://tinyerp.com",
"images" : ["images/customer_payment.jpeg","images/journal_voucher.jpeg","images/sales_receipt.jpeg","images/supplier_voucher.jpeg"],
"depends" : ["account"],
"init_xml" : [],
"demo_xml" : [],
"update_xml" : [
"security/ir.model.access.csv",
"account_voucher_sequence.xml",
"account_voucher_workflow.xml",
"account_voucher_report.xml",
"wizard/account_voucher_unreconcile_view.xml",
"wizard/account_statement_from_invoice_view.xml",
"account_voucher_view.xml",
"voucher_payment_receipt_view.xml",
"voucher_sales_purchase_view.xml",
"account_voucher_wizard.xml",
"account_voucher_pay_invoice.xml",
"report/account_voucher_sales_receipt_view.xml",
"security/account_voucher_security.xml"
],
"test" : [
"test/account_voucher.yml",
"test/sales_receipt.yml",
"test/sales_payment.yml",
"test/account_voucher_report.yml",
"test/case1_usd_usd.yml",
"test/case2_usd_eur_debtor_in_eur.yml",
"test/case2_usd_eur_debtor_in_usd.yml",
"test/case3_eur_eur.yml",
"test/case4_cad_chf.yml",
],
'certificate': '0037580727101',
"auto_install": False,
"application": True,
"installable": True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
volatilityfoundation/volatility | volatility/plugins/dlldump.py | 12 | 6282 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright (c) 2008 Brendan Dolan-Gavitt <bdolangavitt@wesleyan.edu>
#
# Additional Authors:
# Mike Auty <mike.auty@gmail.com>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import os
import re
from volatility import renderers
import volatility.plugins.procdump as procdump
from volatility.renderers.basic import Address
import volatility.win32.tasks as tasks
import volatility.debug as debug
import volatility.utils as utils
import volatility.cache as cache
class DLLDump(procdump.ProcDump):
"""Dump DLLs from a process address space"""
def __init__(self, config, *args, **kwargs):
procdump.ProcDump.__init__(self, config, *args, **kwargs)
config.remove_option("OFFSET")
config.add_option('REGEX', short_option = 'r',
help = 'Dump dlls matching REGEX',
action = 'store', type = 'string')
config.add_option('IGNORE-CASE', short_option = 'i',
help = 'Ignore case in pattern match',
action = 'store_true', default = False)
config.add_option('OFFSET', short_option = 'o', default = None,
help = 'Dump DLLs for Process with physical address OFFSET',
action = 'store', type = 'int')
config.add_option('BASE', short_option = 'b', default = None,
help = 'Dump DLLS at the specified BASE offset in the process address space',
action = 'store', type = 'int')
@cache.CacheDecorator(lambda self: "tests/dlldump/regex={0}/ignore_case={1}/offset={2}/base={3}".format(self._config.REGEX, self._config.IGNORE_CASE, self._config.OFFSET, self._config.BASE))
def calculate(self):
addr_space = utils.load_as(self._config)
if self._config.DUMP_DIR == None:
debug.error("Please specify a dump directory (--dump-dir)")
if not os.path.isdir(self._config.DUMP_DIR):
debug.error(self._config.DUMP_DIR + " is not a directory")
if self._config.OFFSET != None:
data = [self.virtual_process_from_physical_offset(addr_space, self._config.OFFSET)]
else:
data = self.filter_tasks(tasks.pslist(addr_space))
if self._config.REGEX:
try:
if self._config.IGNORE_CASE:
mod_re = re.compile(self._config.REGEX, re.I)
else:
mod_re = re.compile(self._config.REGEX)
except re.error, e:
debug.error('Error parsing regular expression: %s' % e)
for proc in data:
ps_ad = proc.get_process_address_space()
if ps_ad == None:
continue
mods = dict((mod.DllBase.v(), mod) for mod in proc.get_load_modules())
if self._config.BASE:
if mods.has_key(self._config.BASE):
mod_name = mods[self._config.BASE].BaseDllName
else:
mod_name = "UNKNOWN"
yield proc, ps_ad, int(self._config.BASE), mod_name
else:
for mod in mods.values():
if self._config.REGEX:
if not mod_re.search(str(mod.FullDllName or '')) and not mod_re.search(str(mod.BaseDllName or '')):
continue
yield proc, ps_ad, mod.DllBase.v(), mod.BaseDllName
def generator(self, data):
for proc, ps_ad, mod_base, mod_name in data:
if not ps_ad.is_valid_address(mod_base):
result = "Error: DllBase is unavailable (possibly due to paging)"
else:
process_offset = ps_ad.vtop(proc.obj_offset)
dump_file = "module.{0}.{1:x}.{2:x}.dll".format(proc.UniqueProcessId, process_offset, mod_base)
result = self.dump_pe(ps_ad, mod_base, dump_file)
yield (0,
[Address(proc.obj_offset),
str(proc.ImageFileName),
Address(mod_base),
str(mod_name or ''),
str(result)])
def unified_output(self, data):
return renderers.TreeGrid(
[("Process(V)", Address),
("Name", str),
("Module Base", Address),
("Module Name", str),
("Result", str)], self.generator(data))
def render_text(self, outfd, data):
if self._config.DUMP_DIR == None:
debug.error("Please specify a dump directory (--dump-dir)")
if not os.path.isdir(self._config.DUMP_DIR):
debug.error(self._config.DUMP_DIR + " is not a directory")
self.table_header(outfd,
[("Process(V)", "[addrpad]"),
("Name", "20"),
("Module Base", "[addrpad]"),
("Module Name", "20"),
("Result", "")])
for proc, ps_ad, mod_base, mod_name in data:
if not ps_ad.is_valid_address(mod_base):
result = "Error: DllBase is paged"
else:
process_offset = ps_ad.vtop(proc.obj_offset)
dump_file = "module.{0}.{1:x}.{2:x}.dll".format(proc.UniqueProcessId, process_offset, mod_base)
result = self.dump_pe(ps_ad, mod_base, dump_file)
self.table_row(outfd,
proc.obj_offset,
proc.ImageFileName,
mod_base, str(mod_name or ''), result)
| gpl-2.0 |
tafaRU/odoo | addons/portal_claim/portal_claim.py | 315 | 1871 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import osv
class crm_claim(osv.osv):
_inherit = "crm.claim"
def _get_default_partner_id(self, cr, uid, context=None):
""" Gives default partner_id """
if context is None:
context = {}
if context.get('portal'):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
# Special case for portal users, as they are not allowed to call name_get on res.partner
# We save this call for the web client by returning it in default get
return self.pool['res.partner'].name_get(cr, SUPERUSER_ID, [user.partner_id.id], context=context)[0]
return False
_defaults = {
'partner_id': lambda s, cr, uid, c: s._get_default_partner_id(cr, uid, c),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yongshengwang/hue | desktop/core/ext-py/Django-1.6.10/django/db/backends/creation.py | 105 | 20484 | import hashlib
import sys
import time
import warnings
from django.conf import settings
from django.db.utils import load_backend
from django.utils.encoding import force_bytes
from django.utils.six.moves import input
from .util import truncate_name
# The prefix to put on the default database name when creating
# the test database.
TEST_DATABASE_PREFIX = 'test_'
class BaseDatabaseCreation(object):
"""
This class encapsulates all backend-specific differences that pertain to
database *creation*, such as the column types to use for particular Django
Fields, the SQL used to create and destroy tables, and the creation and
destruction of test databases.
"""
data_types = {}
def __init__(self, connection):
self.connection = connection
def _digest(self, *args):
"""
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
"""
h = hashlib.md5()
for arg in args:
h.update(force_bytes(arg))
return h.hexdigest()[:8]
def sql_create_model(self, model, style, known_models=set()):
"""
Returns the SQL required to create a single model, as a tuple of:
(list_of_sql, pending_references_dict)
"""
opts = model._meta
if not opts.managed or opts.proxy or opts.swapped:
return [], {}
final_output = []
table_output = []
pending_references = {}
qn = self.connection.ops.quote_name
for f in opts.local_fields:
col_type = f.db_type(connection=self.connection)
tablespace = f.db_tablespace or opts.db_tablespace
if col_type is None:
# Skip ManyToManyFields, because they're not represented as
# database columns in this table.
continue
# Make the definition (e.g. 'foo VARCHAR(30)') for this field.
field_output = [style.SQL_FIELD(qn(f.column)),
style.SQL_COLTYPE(col_type)]
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
null = f.null
if (f.empty_strings_allowed and not f.primary_key and
self.connection.features.interprets_empty_strings_as_nulls):
null = True
if not null:
field_output.append(style.SQL_KEYWORD('NOT NULL'))
if f.primary_key:
field_output.append(style.SQL_KEYWORD('PRIMARY KEY'))
elif f.unique:
field_output.append(style.SQL_KEYWORD('UNIQUE'))
if tablespace and f.unique:
# We must specify the index tablespace inline, because we
# won't be generating a CREATE INDEX statement for this field.
tablespace_sql = self.connection.ops.tablespace_sql(
tablespace, inline=True)
if tablespace_sql:
field_output.append(tablespace_sql)
if f.rel and f.db_constraint:
ref_output, pending = self.sql_for_inline_foreign_key_references(
model, f, known_models, style)
if pending:
pending_references.setdefault(f.rel.to, []).append(
(model, f))
else:
field_output.extend(ref_output)
table_output.append(' '.join(field_output))
for field_constraints in opts.unique_together:
table_output.append(style.SQL_KEYWORD('UNIQUE') + ' (%s)' %
", ".join(
[style.SQL_FIELD(qn(opts.get_field(f).column))
for f in field_constraints]))
full_statement = [style.SQL_KEYWORD('CREATE TABLE') + ' ' +
style.SQL_TABLE(qn(opts.db_table)) + ' (']
for i, line in enumerate(table_output): # Combine and add commas.
full_statement.append(
' %s%s' % (line, ',' if i < len(table_output) - 1 else ''))
full_statement.append(')')
if opts.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(
opts.db_tablespace)
if tablespace_sql:
full_statement.append(tablespace_sql)
full_statement.append(';')
final_output.append('\n'.join(full_statement))
if opts.has_auto_field:
# Add any extra SQL needed to support auto-incrementing primary
# keys.
auto_column = opts.auto_field.db_column or opts.auto_field.name
autoinc_sql = self.connection.ops.autoinc_sql(opts.db_table,
auto_column)
if autoinc_sql:
for stmt in autoinc_sql:
final_output.append(stmt)
return final_output, pending_references
def sql_for_inline_foreign_key_references(self, model, field, known_models, style):
"""
Return the SQL snippet defining the foreign key reference for a field.
"""
qn = self.connection.ops.quote_name
rel_to = field.rel.to
if rel_to in known_models or rel_to == model:
output = [style.SQL_KEYWORD('REFERENCES') + ' ' +
style.SQL_TABLE(qn(rel_to._meta.db_table)) + ' (' +
style.SQL_FIELD(qn(rel_to._meta.get_field(
field.rel.field_name).column)) + ')' +
self.connection.ops.deferrable_sql()
]
pending = False
else:
# We haven't yet created the table to which this field
# is related, so save it for later.
output = []
pending = True
return output, pending
def sql_for_pending_references(self, model, style, pending_references):
"""
Returns any ALTER TABLE statements to add constraints after the fact.
"""
opts = model._meta
if not opts.managed or opts.swapped:
return []
qn = self.connection.ops.quote_name
final_output = []
if model in pending_references:
for rel_class, f in pending_references[model]:
rel_opts = rel_class._meta
r_table = rel_opts.db_table
r_col = f.column
table = opts.db_table
col = opts.get_field(f.rel.field_name).column
# For MySQL, r_name must be unique in the first 64 characters.
# So we are careful with character usage here.
r_name = '%s_refs_%s_%s' % (
r_col, col, self._digest(r_table, table))
final_output.append(style.SQL_KEYWORD('ALTER TABLE') +
' %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' %
(qn(r_table), qn(truncate_name(
r_name, self.connection.ops.max_name_length())),
qn(r_col), qn(table), qn(col),
self.connection.ops.deferrable_sql()))
del pending_references[model]
return final_output
def sql_indexes_for_model(self, model, style):
"""
Returns the CREATE INDEX SQL statements for a single model.
"""
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
output = []
for f in model._meta.local_fields:
output.extend(self.sql_indexes_for_field(model, f, style))
for fs in model._meta.index_together:
fields = [model._meta.get_field_by_name(f)[0] for f in fs]
output.extend(self.sql_indexes_for_fields(model, fields, style))
return output
def sql_indexes_for_field(self, model, f, style):
"""
Return the CREATE INDEX SQL statements for a single model field.
"""
if f.db_index and not f.unique:
return self.sql_indexes_for_fields(model, [f], style)
else:
return []
def sql_indexes_for_fields(self, model, fields, style):
if len(fields) == 1 and fields[0].db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(fields[0].db_tablespace)
elif model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
else:
tablespace_sql = ""
if tablespace_sql:
tablespace_sql = " " + tablespace_sql
field_names = []
qn = self.connection.ops.quote_name
for f in fields:
field_names.append(style.SQL_FIELD(qn(f.column)))
index_name = "%s_%s" % (model._meta.db_table, self._digest([f.name for f in fields]))
return [
style.SQL_KEYWORD("CREATE INDEX") + " " +
style.SQL_TABLE(qn(truncate_name(index_name, self.connection.ops.max_name_length()))) + " " +
style.SQL_KEYWORD("ON") + " " +
style.SQL_TABLE(qn(model._meta.db_table)) + " " +
"(%s)" % style.SQL_FIELD(", ".join(field_names)) +
"%s;" % tablespace_sql,
]
def sql_destroy_model(self, model, references_to_delete, style):
"""
Return the DROP TABLE and restraint dropping statements for a single
model.
"""
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
# Drop the table now
qn = self.connection.ops.quote_name
output = ['%s %s;' % (style.SQL_KEYWORD('DROP TABLE'),
style.SQL_TABLE(qn(model._meta.db_table)))]
if model in references_to_delete:
output.extend(self.sql_remove_table_constraints(
model, references_to_delete, style))
if model._meta.has_auto_field:
ds = self.connection.ops.drop_sequence_sql(model._meta.db_table)
if ds:
output.append(ds)
return output
def sql_remove_table_constraints(self, model, references_to_delete, style):
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
output = []
qn = self.connection.ops.quote_name
for rel_class, f in references_to_delete[model]:
table = rel_class._meta.db_table
col = f.column
r_table = model._meta.db_table
r_col = model._meta.get_field(f.rel.field_name).column
r_name = '%s_refs_%s_%s' % (
col, r_col, self._digest(table, r_table))
output.append('%s %s %s %s;' % \
(style.SQL_KEYWORD('ALTER TABLE'),
style.SQL_TABLE(qn(table)),
style.SQL_KEYWORD(self.connection.ops.drop_foreignkey_sql()),
style.SQL_FIELD(qn(truncate_name(
r_name, self.connection.ops.max_name_length())))))
del references_to_delete[model]
return output
def sql_destroy_indexes_for_model(self, model, style):
"""
Returns the DROP INDEX SQL statements for a single model.
"""
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
output = []
for f in model._meta.local_fields:
output.extend(self.sql_destroy_indexes_for_field(model, f, style))
for fs in model._meta.index_together:
fields = [model._meta.get_field_by_name(f)[0] for f in fs]
output.extend(self.sql_destroy_indexes_for_fields(model, fields, style))
return output
def sql_destroy_indexes_for_field(self, model, f, style):
"""
Return the DROP INDEX SQL statements for a single model field.
"""
if f.db_index and not f.unique:
return self.sql_destroy_indexes_for_fields(model, [f], style)
else:
return []
def sql_destroy_indexes_for_fields(self, model, fields, style):
if len(fields) == 1 and fields[0].db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(fields[0].db_tablespace)
elif model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
else:
tablespace_sql = ""
if tablespace_sql:
tablespace_sql = " " + tablespace_sql
field_names = []
qn = self.connection.ops.quote_name
for f in fields:
field_names.append(style.SQL_FIELD(qn(f.column)))
index_name = "%s_%s" % (model._meta.db_table, self._digest([f.name for f in fields]))
return [
style.SQL_KEYWORD("DROP INDEX") + " " +
style.SQL_TABLE(qn(truncate_name(index_name, self.connection.ops.max_name_length()))) + " " +
";",
]
def create_test_db(self, verbosity=1, autoclobber=False):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
# Don't import django.core.management if it isn't needed.
from django.core.management import call_command
test_database_name = self._get_test_db_name()
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print("Creating test database for alias '%s'%s..." % (
self.connection.alias, test_db_repr))
self._create_test_db(verbosity, autoclobber)
self.connection.close()
settings.DATABASES[self.connection.alias]["NAME"] = test_database_name
self.connection.settings_dict["NAME"] = test_database_name
# Report syncdb messages at one level lower than that requested.
# This ensures we don't get flooded with messages during testing
# (unless you really ask to be flooded)
call_command('syncdb',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias,
load_initial_data=False)
# We need to then do a flush to ensure that any data installed by
# custom SQL has been removed. The only test data should come from
# test fixtures, or autogenerated from post_syncdb triggers.
# This has the side effect of loading initial data (which was
# intentionally skipped in the syncdb).
call_command('flush',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias)
from django.core.cache import get_cache
from django.core.cache.backends.db import BaseDatabaseCache
for cache_alias in settings.CACHES:
cache = get_cache(cache_alias)
if isinstance(cache, BaseDatabaseCache):
call_command('createcachetable', cache._table,
database=self.connection.alias)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
self.connection.cursor()
return test_database_name
def _get_test_db_name(self):
"""
Internal implementation - returns the name of the test DB that will be
created. Only useful when called from create_test_db() and
_create_test_db() and when no external munging is done with the 'NAME'
or 'TEST_NAME' settings.
"""
if self.connection.settings_dict['TEST_NAME']:
return self.connection.settings_dict['TEST_NAME']
return TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
def _create_test_db(self, verbosity, autoclobber):
"""
Internal implementation - creates the test db tables.
"""
suffix = self.sql_table_creation_suffix()
test_database_name = self._get_test_db_name()
qn = self.connection.ops.quote_name
# Create the test database and connect to it.
cursor = self.connection.cursor()
try:
cursor.execute(
"CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
except Exception as e:
sys.stderr.write(
"Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = input(
"Type 'yes' if you would like to try deleting the test "
"database '%s', or 'no' to cancel: " % test_database_name)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test database '%s'..."
% self.connection.alias)
cursor.execute(
"DROP DATABASE %s" % qn(test_database_name))
cursor.execute(
"CREATE DATABASE %s %s" % (qn(test_database_name),
suffix))
except Exception as e:
sys.stderr.write(
"Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
return test_database_name
def destroy_test_db(self, old_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists.
"""
self.connection.close()
test_database_name = self.connection.settings_dict['NAME']
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print("Destroying test database for alias '%s'%s..." % (
self.connection.alias, test_db_repr))
# Temporarily use a new connection and a copy of the settings dict.
# This prevents the production database from being exposed to potential
# child threads while (or after) the test database is destroyed.
# Refs #10868 and #17786.
settings_dict = self.connection.settings_dict.copy()
settings_dict['NAME'] = old_database_name
backend = load_backend(settings_dict['ENGINE'])
new_connection = backend.DatabaseWrapper(
settings_dict,
alias='__destroy_test_db__',
allow_thread_sharing=False)
new_connection.creation._destroy_test_db(test_database_name, verbosity)
def _destroy_test_db(self, test_database_name, verbosity):
"""
Internal implementation - remove the test db tables.
"""
# Remove the test database to clean up after
# ourselves. Connect to the previous database (not the test database)
# to do so, because it's not allowed to delete a database while being
# connected to it.
cursor = self.connection.cursor()
# Wait to avoid "database is being accessed by other users" errors.
time.sleep(1)
cursor.execute("DROP DATABASE %s"
% self.connection.ops.quote_name(test_database_name))
self.connection.close()
def set_autocommit(self):
"""
Make sure a connection is in autocommit mode. - Deprecated, not used
anymore by Django code. Kept for compatibility with user code that
might use it.
"""
warnings.warn(
"set_autocommit was moved from BaseDatabaseCreation to "
"BaseDatabaseWrapper.", PendingDeprecationWarning, stacklevel=2)
return self.connection.set_autocommit(True)
def sql_table_creation_suffix(self):
"""
SQL to append to the end of the test table creation statements.
"""
return ''
def test_db_signature(self):
"""
Returns a tuple with elements of self.connection.settings_dict (a
DATABASES setting value) that uniquely identify a database
accordingly to the RDBMS particularities.
"""
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME']
)
| apache-2.0 |
JamesMGreene/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/servers/http_server_base.py | 126 | 8904 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Base class with common routines between the Apache, Lighttpd, and websocket servers."""
import errno
import logging
import socket
import sys
import tempfile
import time
_log = logging.getLogger(__name__)
class ServerError(Exception):
pass
class HttpServerBase(object):
"""A skeleton class for starting and stopping servers used by the layout tests."""
def __init__(self, port_obj, number_of_servers=None):
self._executive = port_obj._executive
self._filesystem = port_obj._filesystem
self._name = '<virtual>'
self._mappings = {}
self._pid = None
self._pid_file = None
self._port_obj = port_obj
self._number_of_servers = number_of_servers
# We need a non-checkout-dependent place to put lock files, etc. We
# don't use the Python default on the Mac because it defaults to a
# randomly-generated directory under /var/folders and no one would ever
# look there.
tmpdir = tempfile.gettempdir()
if port_obj.host.platform.is_mac():
tmpdir = '/tmp'
self._runtime_path = self._filesystem.join(tmpdir, "WebKit")
self._filesystem.maybe_make_directory(self._runtime_path)
def start(self):
"""Starts the server. It is an error to start an already started server.
This method also stops any stale servers started by a previous instance."""
assert not self._pid, '%s server is already running' % self._name
# Stop any stale servers left over from previous instances.
if self._filesystem.exists(self._pid_file):
try:
self._pid = int(self._filesystem.read_text_file(self._pid_file))
self._stop_running_server()
except (ValueError, UnicodeDecodeError):
# These could be raised if the pid file is corrupt.
self._remove_pid_file()
self._pid = None
self._remove_stale_logs()
self._prepare_config()
self._check_that_all_ports_are_available()
self._pid = self._spawn_process()
if self._wait_for_action(self._is_server_running_on_all_ports):
_log.debug("%s successfully started (pid = %d)" % (self._name, self._pid))
else:
self._stop_running_server()
raise ServerError('Failed to start %s server' % self._name)
def stop(self):
"""Stops the server. Stopping a server that isn't started is harmless."""
actual_pid = None
try:
if self._filesystem.exists(self._pid_file):
try:
actual_pid = int(self._filesystem.read_text_file(self._pid_file))
except (ValueError, UnicodeDecodeError):
# These could be raised if the pid file is corrupt.
pass
if not self._pid:
self._pid = actual_pid
if not self._pid:
return
if not actual_pid:
_log.warning('Failed to stop %s: pid file is missing' % self._name)
return
if self._pid != actual_pid:
_log.warning('Failed to stop %s: pid file contains %d, not %d' %
(self._name, actual_pid, self._pid))
# Try to kill the existing pid, anyway, in case it got orphaned.
self._executive.kill_process(self._pid)
self._pid = None
return
_log.debug("Attempting to shut down %s server at pid %d" % (self._name, self._pid))
self._stop_running_server()
_log.debug("%s server at pid %d stopped" % (self._name, self._pid))
self._pid = None
finally:
# Make sure we delete the pid file no matter what happens.
self._remove_pid_file()
def _prepare_config(self):
"""This routine can be overridden by subclasses to do any sort
of initialization required prior to starting the server that may fail."""
pass
def _remove_stale_logs(self):
"""This routine can be overridden by subclasses to try and remove logs
left over from a prior run. This routine should log warnings if the
files cannot be deleted, but should not fail unless failure to
delete the logs will actually cause start() to fail."""
pass
def _spawn_process(self):
"""This routine must be implemented by subclasses to actually start the server.
This routine returns the pid of the started process, and also ensures that that
pid has been written to self._pid_file."""
raise NotImplementedError()
def _stop_running_server(self):
"""This routine must be implemented by subclasses to actually stop the running server listed in self._pid_file."""
raise NotImplementedError()
# Utility routines.
def _remove_pid_file(self):
if self._filesystem.exists(self._pid_file):
self._filesystem.remove(self._pid_file)
def _remove_log_files(self, folder, starts_with):
files = self._filesystem.listdir(folder)
for file in files:
if file.startswith(starts_with):
full_path = self._filesystem.join(folder, file)
self._filesystem.remove(full_path)
def _wait_for_action(self, action, wait_secs=20.0, sleep_secs=1.0):
"""Repeat the action for wait_sec or until it succeeds, sleeping for sleep_secs
in between each attempt. Returns whether it succeeded."""
start_time = time.time()
while time.time() - start_time < wait_secs:
if action():
return True
_log.debug("Waiting for action: %s" % action)
time.sleep(sleep_secs)
return False
def _is_server_running_on_all_ports(self):
"""Returns whether the server is running on all the desired ports."""
if not self._executive.check_running_pid(self._pid):
_log.debug("Server isn't running at all")
raise ServerError("Server exited")
for mapping in self._mappings:
s = socket.socket()
port = mapping['port']
try:
s.connect(('localhost', port))
_log.debug("Server running on %d" % port)
except IOError, e:
if e.errno not in (errno.ECONNREFUSED, errno.ECONNRESET):
raise
_log.debug("Server NOT running on %d: %s" % (port, e))
return False
finally:
s.close()
return True
def _check_that_all_ports_are_available(self):
for mapping in self._mappings:
s = socket.socket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = mapping['port']
try:
s.bind(('localhost', port))
except IOError, e:
if e.errno in (errno.EALREADY, errno.EADDRINUSE):
raise ServerError('Port %d is already in use.' % port)
elif sys.platform == 'win32' and e.errno in (errno.WSAEACCES,): # pylint: disable=E1101
raise ServerError('Port %d is already in use.' % port)
else:
raise
finally:
s.close()
| bsd-3-clause |
tblancher/autokey | src/lib/qtui/settingsdialog.py | 48 | 7928 | # -*- coding: utf-8 -*-
# Copyright (C) 2011 Chris Dekter
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from PyKDE4.kdeui import *
from PyKDE4.kio import KFileDialog
from PyKDE4.kdecore import i18n, KAutostart
from PyQt4.QtGui import *
from PyQt4.QtCore import SIGNAL, Qt
from autokey.configmanager import *
from autokey import iomediator, interface, model
from dialogs import GlobalHotkeyDialog
import generalsettings, specialhotkeysettings, enginesettings
class GeneralSettings(QWidget, generalsettings.Ui_Form):
def __init__(self, parent):
QWidget.__init__(self, parent)
generalsettings.Ui_Form.__init__(self)
self.setupUi(self)
self.promptToSaveCheckbox.setChecked(ConfigManager.SETTINGS[PROMPT_TO_SAVE])
self.showTrayCheckbox.setChecked(ConfigManager.SETTINGS[SHOW_TRAY_ICON])
#self.allowKbNavCheckbox.setChecked(ConfigManager.SETTINGS[MENU_TAKES_FOCUS])
self.allowKbNavCheckbox.setVisible(False)
self.sortByUsageCheckbox.setChecked(ConfigManager.SETTINGS[SORT_BY_USAGE_COUNT])
self.enableUndoCheckbox.setChecked(ConfigManager.SETTINGS[UNDO_USING_BACKSPACE])
def save(self):
ConfigManager.SETTINGS[PROMPT_TO_SAVE] = self.promptToSaveCheckbox.isChecked()
ConfigManager.SETTINGS[SHOW_TRAY_ICON] = self.showTrayCheckbox.isChecked()
#ConfigManager.SETTINGS[MENU_TAKES_FOCUS] = self.allowKbNavCheckbox.isChecked()
ConfigManager.SETTINGS[SORT_BY_USAGE_COUNT] = self.sortByUsageCheckbox.isChecked()
ConfigManager.SETTINGS[UNDO_USING_BACKSPACE] = self.enableUndoCheckbox.isChecked()
class SpecialHotkeySettings(QWidget, specialhotkeysettings.Ui_Form):
KEY_MAP = GlobalHotkeyDialog.KEY_MAP
REVERSE_KEY_MAP = GlobalHotkeyDialog.REVERSE_KEY_MAP
def __init__(self, parent, configManager):
QWidget.__init__(self, parent)
specialhotkeysettings.Ui_Form.__init__(self)
self.setupUi(self)
self.configManager = configManager
self.showConfigDlg = GlobalHotkeyDialog(parent)
self.toggleMonitorDlg = GlobalHotkeyDialog(parent)
self.useConfigHotkey = self.__loadHotkey(configManager.configHotkey, self.configKeyLabel,
self.showConfigDlg, self.clearConfigButton)
self.useServiceHotkey = self.__loadHotkey(configManager.toggleServiceHotkey, self.monitorKeyLabel,
self.toggleMonitorDlg, self.clearMonitorButton)
def __loadHotkey(self, item, label, dialog, clearButton):
dialog.load(item)
if item.enabled:
key = str(item.hotKey.encode("utf-8"))
label.setText(item.get_hotkey_string(key, item.modifiers))
clearButton.setEnabled(True)
return True
else:
label.setText(i18n("(None configured)"))
clearButton.setEnabled(False)
return False
def save(self):
configHotkey = self.configManager.configHotkey
toggleHotkey = self.configManager.toggleServiceHotkey
if configHotkey.enabled:
self.configManager.app.hotkey_removed(configHotkey)
configHotkey.enabled = self.useConfigHotkey
if self.useConfigHotkey:
self.showConfigDlg.save(configHotkey)
self.configManager.app.hotkey_created(configHotkey)
if toggleHotkey.enabled:
self.configManager.app.hotkey_removed(toggleHotkey)
toggleHotkey.enabled = self.useServiceHotkey
if self.useServiceHotkey:
self.toggleMonitorDlg.save(toggleHotkey)
self.configManager.app.hotkey_created(toggleHotkey)
# ---- Signal handlers
def on_setConfigButton_pressed(self):
self.showConfigDlg.exec_()
if self.showConfigDlg.result() == QDialog.Accepted:
self.useConfigHotkey = True
key = self.showConfigDlg.key
modifiers = self.showConfigDlg.build_modifiers()
self.configKeyLabel.setText(self.showConfigDlg.targetItem.get_hotkey_string(key, modifiers))
self.clearConfigButton.setEnabled(True)
def on_clearConfigButton_pressed(self):
self.useConfigHotkey = False
self.clearConfigButton.setEnabled(False)
self.configKeyLabel.setText(i18n("(None configured)"))
self.showConfigDlg.reset()
def on_setMonitorButton_pressed(self):
self.toggleMonitorDlg.exec_()
if self.toggleMonitorDlg.result() == QDialog.Accepted:
self.useServiceHotkey = True
key = self.toggleMonitorDlg.key
modifiers = self.toggleMonitorDlg.build_modifiers()
self.monitorKeyLabel.setText(self.toggleMonitorDlg.targetItem.get_hotkey_string(key, modifiers))
self.clearMonitorButton.setEnabled(True)
def on_clearMonitorButton_pressed(self):
self.useServiceHotkey = False
self.clearMonitorButton.setEnabled(False)
self.monitorKeyLabel.setText(i18n("(None configured)"))
self.toggleMonitorDlg.reset()
class EngineSettings(QWidget, enginesettings.Ui_Form):
def __init__(self, parent, configManager):
QWidget.__init__(self, parent)
enginesettings.Ui_Form.__init__(self)
self.setupUi(self)
self.configManager = configManager
if configManager.userCodeDir is not None:
self.folderLabel.setText(configManager.userCodeDir)
if configManager.userCodeDir in sys.path:
sys.path.remove(configManager.userCodeDir)
self.path = configManager.userCodeDir
def save(self):
if self.path is not None:
self.configManager.userCodeDir = self.path
sys.path.append(self.path)
def on_browseButton_pressed(self):
path = KFileDialog.getExistingDirectory(self.parentWidget(), i18n("Choose Directory"))
if path != '':
self.path = path
self.folderLabel.setText(self.path)
class SettingsDialog(KPageDialog):
def __init__(self, parent):
KPageDialog.__init__(self, parent)
self.app = parent.topLevelWidget().app # Used by GlobalHotkeyDialog
self.genPage = self.addPage(GeneralSettings(self), i18n("General"))
self.genPage.setIcon(KIcon("preferences-other"))
self.hkPage = self.addPage(SpecialHotkeySettings(self, parent.app.configManager), i18n("Special Hotkeys"))
self.hkPage.setIcon(KIcon("preferences-desktop-keyboard"))
self.ePage = self.addPage(EngineSettings(self, parent.app.configManager), i18n("Script Engine"))
self.ePage.setIcon(KIcon("text-x-script"))
self.setCaption(i18n("Settings"))
def slotButtonClicked(self, button):
if button == KDialog.Ok:
self.genPage.widget().save()
self.hkPage.widget().save()
self.ePage.widget().save()
self.app.configManager.config_altered(True)
self.app.update_notifier_visibility()
KDialog.slotButtonClicked(self, button)
| gpl-3.0 |
cchurch/ansible-modules-core | cloud/azure/azure_rm_virtualmachineimage_facts.py | 46 | 7563 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: azure_rm_virtualmachineimage_facts
version_added: "2.1"
short_description: Get virtual machine image facts.
description:
- Get facts for virtual machine images.
options:
name:
description:
- Only show results for a specific security group.
default: null
required: false
location:
description:
- Azure location value (ie. westus, eastus, eastus2, northcentralus, etc.). Supplying only a
location value will yield a list of available publishers for the location.
required: true
publisher:
description:
- Name of an image publisher. List image offerings associated with a particular publisher.
default: null
required: false
offer:
description:
- Name of an image offering. Combine with sku to see a list of available image versions.
default: null
required: false
sku:
description:
- Image offering SKU. Combine with offer to see a list of available versions.
default: null
required: false
version:
description:
- Specific version number of an image.
default: null
required: false
extends_documentation_fragment:
- azure
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Get facts for a specific image
azure_rm_virtualmachineimage_facts:
location: eastus
publisher: OpenLogic
offer: CentOS
sku: '7.1'
version: '7.1.20160308'
- name: List available versions
azure_rm_virtualmachineimage_facts:
location: eastus
publisher: OpenLogic
offer: CentOS
sku: '7.1'
- name: List available offers
azure_rm_virtualmachineimage_facts:
location: eastus
publisher: OpenLogic
- name: List available publishers
azure_rm_virtualmachineimage_facts:
location: eastus
'''
RETURN = '''
azure_vmimages:
description: List of image dicts.
returned: always
type: list
example: []
'''
from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import *
try:
from msrestazure.azure_exceptions import CloudError
from azure.common import AzureMissingResourceHttpError, AzureHttpError
except:
# This is handled in azure_rm_common
pass
AZURE_ENUM_MODULES = ['azure.mgmt.compute.models.compute_management_client_enums']
class AzureRMVirtualMachineImageFacts(AzureRMModuleBase):
def __init__(self, **kwargs):
self.module_arg_spec = dict(
location=dict(type='str', required=True),
publisher=dict(type='str'),
offer=dict(type='str'),
sku=dict(type='str'),
version=dict(type='str')
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_vmimages=[])
)
self.location = None
self.publisher = None
self.offer = None
self.sku = None
self.version = None
super(AzureRMVirtualMachineImageFacts, self).__init__(self.module_arg_spec)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.location and self.publisher and self.offer and self.sku and self.version:
self.results['ansible_facts']['azure_vmimages'] = self.get_item()
elif self.location and self.publisher and self.offer and self.sku:
self.results['ansible_facts']['azure_vmimages'] = self.list_images()
elif self.location and self.publisher:
self.results['ansible_facts']['azure_vmimages'] = self.list_offers()
elif self.location:
self.results['ansible_facts']['azure_vmimages'] = self.list_publishers()
return self.results
def get_item(self):
item = None
result = []
try:
item = self.compute_client.virtual_machine_images.get(self.location,
self.publisher,
self.offer,
self.sku,
self.version)
except CloudError:
pass
if item:
result = [self.serialize_obj(item, 'VirtualMachineImage', enum_modules=AZURE_ENUM_MODULES)]
return result
def list_images(self):
response = None
results = []
try:
response = self.compute_client.virtual_machine_images.list(self.location,
self.publisher,
self.offer,
self.sku,)
except CloudError:
pass
except Exception as exc:
self.fail("Failed to list images: {0}".format(str(exc)))
if response:
for item in response:
results.append(self.serialize_obj(item, 'VirtualMachineImageResource',
enum_modules=AZURE_ENUM_MODULES))
return results
def list_offers(self):
response = None
results = []
try:
response = self.compute_client.virtual_machine_images.list_offers(self.location,
self.publisher)
except CloudError:
pass
except Exception as exc:
self.fail("Failed to list offers: {0}".format(str(exc)))
if response:
for item in response:
results.append(self.serialize_obj(item, 'VirtualMachineImageResource',
enum_modules=AZURE_ENUM_MODULES))
return results
def list_publishers(self):
response = None
results = []
try:
response = self.compute_client.virtual_machine_images.list_publishers(self.location)
except CloudError:
pass
except Exception as exc:
self.fail("Failed to list publishers: {0}".format(str(exc)))
if response:
for item in response:
results.append(self.serialize_obj(item, 'VirtualMachineImageResource',
enum_modules=AZURE_ENUM_MODULES))
return results
def main():
AzureRMVirtualMachineImageFacts()
if __name__ == '__main__':
main()
| gpl-3.0 |
jakevdp/altair | altair/utils/deprecation.py | 1 | 1447 | import warnings
# import functools
class AltairDeprecationWarning(UserWarning):
pass
def _deprecated(obj, name=None, message=None):
"""Return a version of a class or function that raises a deprecation warning.
Parameters
----------
obj : class or function
The object to create a deprecated version of.
name : string (optional)
The name of the deprecated object
message : string (optional)
The deprecation message
Returns
-------
deprecated_obj :
The deprecated version of obj
Examples
--------
>>> class Foo(object): pass
>>> OldFoo = _deprecated(Foo, "OldFoo")
>>> f = OldFoo() # doctest: +SKIP
AltairDeprecationWarning: alt.OldFoo is deprecated. Use alt.Foo instead.
"""
if message is None:
message = ("alt.{} is deprecated. Use alt.{} instead."
"".format(name, obj.__name__))
if isinstance(obj, type):
return type(name, (obj,),
{'__doc__': obj.__doc__,
'__init__': _deprecated(obj.__init__, "__init__", message)})
elif callable(obj):
# @functools.wraps(obj) # TODO: use this in Py3 only
def new_obj(*args, **kwargs):
warnings.warn(message, AltairDeprecationWarning)
return obj(*args, **kwargs)
return new_obj
else:
raise ValueError("Cannot deprecate object of type {}".format(type(obj)))
| bsd-3-clause |
twiest/openshift-tools | openshift/installer/vendored/openshift-ansible-3.7.52-1/roles/lib_openshift/src/class/oc_adm_policy_group.py | 33 | 7719 | # pylint: skip-file
# flake8: noqa
class PolicyGroupException(Exception):
''' PolicyGroup exception'''
pass
class PolicyGroupConfig(OpenShiftCLIConfig):
''' PolicyGroupConfig is a DTO for group related policy. '''
def __init__(self, namespace, kubeconfig, policy_options):
super(PolicyGroupConfig, self).__init__(policy_options['name']['value'],
namespace, kubeconfig, policy_options)
self.kind = self.get_kind()
self.namespace = namespace
def get_kind(self):
''' return the kind we are working with '''
if self.config_options['resource_kind']['value'] == 'role':
return 'rolebinding'
elif self.config_options['resource_kind']['value'] == 'cluster-role':
return 'clusterrolebinding'
elif self.config_options['resource_kind']['value'] == 'scc':
return 'scc'
return None
# pylint: disable=too-many-return-statements
class PolicyGroup(OpenShiftCLI):
''' Class to handle attaching policies to users '''
def __init__(self,
config,
verbose=False):
''' Constructor for PolicyGroup '''
super(PolicyGroup, self).__init__(config.namespace, config.kubeconfig, verbose)
self.config = config
self.verbose = verbose
self._rolebinding = None
self._scc = None
self._cluster_role_bindings = None
self._role_bindings = None
@property
def rolebindings(self):
if self._role_bindings is None:
results = self._get('rolebindings', None)
if results['returncode'] != 0:
raise OpenShiftCLIError('Could not retrieve rolebindings')
self._role_bindings = results['results'][0]['items']
return self._role_bindings
@property
def clusterrolebindings(self):
if self._cluster_role_bindings is None:
results = self._get('clusterrolebindings', None)
if results['returncode'] != 0:
raise OpenShiftCLIError('Could not retrieve clusterrolebindings')
self._cluster_role_bindings = results['results'][0]['items']
return self._cluster_role_bindings
@property
def role_binding(self):
''' role_binding getter '''
return self._rolebinding
@role_binding.setter
def role_binding(self, binding):
''' role_binding setter '''
self._rolebinding = binding
@property
def security_context_constraint(self):
''' security_context_constraint getter '''
return self._scc
@security_context_constraint.setter
def security_context_constraint(self, scc):
''' security_context_constraint setter '''
self._scc = scc
def get(self):
'''fetch the desired kind'''
resource_name = self.config.config_options['name']['value']
if resource_name == 'cluster-reader':
resource_name += 's'
# oc adm policy add-... creates policy bindings with the name
# "[resource_name]-binding", however some bindings in the system
# simply use "[resource_name]". So try both.
results = self._get(self.config.kind, resource_name)
if results['returncode'] == 0:
return results
# Now try -binding naming convention
return self._get(self.config.kind, resource_name + "-binding")
def exists_role_binding(self):
''' return whether role_binding exists '''
bindings = None
if self.config.config_options['resource_kind']['value'] == 'cluster-role':
bindings = self.clusterrolebindings
else:
bindings = self.rolebindings
if bindings is None:
return False
for binding in bindings:
if binding['roleRef']['name'] == self.config.config_options['name']['value'] and \
binding['groupNames'] is not None and \
self.config.config_options['group']['value'] in binding['groupNames']:
self.role_binding = binding
return True
return False
def exists_scc(self):
''' return whether scc exists '''
results = self.get()
if results['returncode'] == 0:
self.security_context_constraint = SecurityContextConstraints(results['results'][0])
if self.security_context_constraint.find_group(self.config.config_options['group']['value']) != None:
return True
return False
return results
def exists(self):
'''does the object exist?'''
if self.config.config_options['resource_kind']['value'] == 'cluster-role':
return self.exists_role_binding()
elif self.config.config_options['resource_kind']['value'] == 'role':
return self.exists_role_binding()
elif self.config.config_options['resource_kind']['value'] == 'scc':
return self.exists_scc()
return False
def perform(self):
'''perform action on resource'''
cmd = ['policy',
self.config.config_options['action']['value'],
self.config.config_options['name']['value'],
self.config.config_options['group']['value']]
return self.openshift_cmd(cmd, oadm=True)
@staticmethod
def run_ansible(params, check_mode):
'''run the idempotent ansible code'''
state = params['state']
action = None
if state == 'present':
action = 'add-' + params['resource_kind'] + '-to-group'
else:
action = 'remove-' + params['resource_kind'] + '-from-group'
nconfig = PolicyGroupConfig(params['namespace'],
params['kubeconfig'],
{'action': {'value': action, 'include': False},
'group': {'value': params['group'], 'include': False},
'resource_kind': {'value': params['resource_kind'], 'include': False},
'name': {'value': params['resource_name'], 'include': False},
})
policygroup = PolicyGroup(nconfig, params['debug'])
# Run the oc adm policy group related command
########
# Delete
########
if state == 'absent':
if not policygroup.exists():
return {'changed': False, 'state': 'absent'}
if check_mode:
return {'changed': False, 'msg': 'CHECK_MODE: would have performed a delete.'}
api_rval = policygroup.perform()
if api_rval['returncode'] != 0:
return {'msg': api_rval}
return {'changed': True, 'results' : api_rval, state:'absent'}
if state == 'present':
########
# Create
########
results = policygroup.exists()
if isinstance(results, dict) and 'returncode' in results and results['returncode'] != 0:
return {'msg': results}
if not results:
if check_mode:
return {'changed': False, 'msg': 'CHECK_MODE: would have performed a create.'}
api_rval = policygroup.perform()
if api_rval['returncode'] != 0:
return {'msg': api_rval}
return {'changed': True, 'results': api_rval, state: 'present'}
return {'changed': False, state: 'present'}
return {'failed': True, 'changed': False, 'results': 'Unknown state passed. %s' % state, state: 'unknown'}
| apache-2.0 |
andymckay/django | django/core/management/commands/test.py | 111 | 2965 | import sys
import os
from optparse import make_option, OptionParser
from django.conf import settings
from django.core.management.base import BaseCommand
from django.test.utils import get_runner
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noinput',
action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--failfast',
action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first '
'failed test.'),
make_option('--testrunner',
action='store', dest='testrunner',
help='Tells Django to use specified test runner class instead of '
'the one specified by the TEST_RUNNER setting.'),
make_option('--liveserver',
action='store', dest='liveserver', default=None,
help='Overrides the default address where the live server (used '
'with LiveServerTestCase) is expected to run from. The '
'default value is localhost:8081.'),
)
help = ('Runs the test suite for the specified applications, or the '
'entire site if no apps are specified.')
args = '[appname ...]'
requires_model_validation = False
def __init__(self):
self.test_runner = None
super(Command, self).__init__()
def run_from_argv(self, argv):
"""
Pre-parse the command line to extract the value of the --testrunner
option. This allows a test runner to define additional command line
arguments.
"""
option = '--testrunner='
for arg in argv[2:]:
if arg.startswith(option):
self.test_runner = arg[len(option):]
break
super(Command, self).run_from_argv(argv)
def create_parser(self, prog_name, subcommand):
test_runner_class = get_runner(settings, self.test_runner)
options = self.option_list + getattr(
test_runner_class, 'option_list', ())
return OptionParser(prog=prog_name,
usage=self.usage(subcommand),
version=self.get_version(),
option_list=options)
def handle(self, *test_labels, **options):
from django.conf import settings
from django.test.utils import get_runner
TestRunner = get_runner(settings, options.get('testrunner'))
options['verbosity'] = int(options.get('verbosity'))
if options.get('liveserver') is not None:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = options['liveserver']
del options['liveserver']
test_runner = TestRunner(**options)
failures = test_runner.run_tests(test_labels)
if failures:
sys.exit(bool(failures))
| bsd-3-clause |
rubyinhell/brython | www/src/Lib/encodings/mac_farsi.py | 37 | 15477 | """ Python Character Mapping Codec mac_farsi generated from 'MAPPINGS/VENDORS/APPLE/FARSI.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-farsi',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> CONTROL CHARACTER
'\x01' # 0x01 -> CONTROL CHARACTER
'\x02' # 0x02 -> CONTROL CHARACTER
'\x03' # 0x03 -> CONTROL CHARACTER
'\x04' # 0x04 -> CONTROL CHARACTER
'\x05' # 0x05 -> CONTROL CHARACTER
'\x06' # 0x06 -> CONTROL CHARACTER
'\x07' # 0x07 -> CONTROL CHARACTER
'\x08' # 0x08 -> CONTROL CHARACTER
'\t' # 0x09 -> CONTROL CHARACTER
'\n' # 0x0A -> CONTROL CHARACTER
'\x0b' # 0x0B -> CONTROL CHARACTER
'\x0c' # 0x0C -> CONTROL CHARACTER
'\r' # 0x0D -> CONTROL CHARACTER
'\x0e' # 0x0E -> CONTROL CHARACTER
'\x0f' # 0x0F -> CONTROL CHARACTER
'\x10' # 0x10 -> CONTROL CHARACTER
'\x11' # 0x11 -> CONTROL CHARACTER
'\x12' # 0x12 -> CONTROL CHARACTER
'\x13' # 0x13 -> CONTROL CHARACTER
'\x14' # 0x14 -> CONTROL CHARACTER
'\x15' # 0x15 -> CONTROL CHARACTER
'\x16' # 0x16 -> CONTROL CHARACTER
'\x17' # 0x17 -> CONTROL CHARACTER
'\x18' # 0x18 -> CONTROL CHARACTER
'\x19' # 0x19 -> CONTROL CHARACTER
'\x1a' # 0x1A -> CONTROL CHARACTER
'\x1b' # 0x1B -> CONTROL CHARACTER
'\x1c' # 0x1C -> CONTROL CHARACTER
'\x1d' # 0x1D -> CONTROL CHARACTER
'\x1e' # 0x1E -> CONTROL CHARACTER
'\x1f' # 0x1F -> CONTROL CHARACTER
' ' # 0x20 -> SPACE, left-right
'!' # 0x21 -> EXCLAMATION MARK, left-right
'"' # 0x22 -> QUOTATION MARK, left-right
'#' # 0x23 -> NUMBER SIGN, left-right
'$' # 0x24 -> DOLLAR SIGN, left-right
'%' # 0x25 -> PERCENT SIGN, left-right
'&' # 0x26 -> AMPERSAND, left-right
"'" # 0x27 -> APOSTROPHE, left-right
'(' # 0x28 -> LEFT PARENTHESIS, left-right
')' # 0x29 -> RIGHT PARENTHESIS, left-right
'*' # 0x2A -> ASTERISK, left-right
'+' # 0x2B -> PLUS SIGN, left-right
',' # 0x2C -> COMMA, left-right; in Arabic-script context, displayed as 0x066C ARABIC THOUSANDS SEPARATOR
'-' # 0x2D -> HYPHEN-MINUS, left-right
'.' # 0x2E -> FULL STOP, left-right; in Arabic-script context, displayed as 0x066B ARABIC DECIMAL SEPARATOR
'/' # 0x2F -> SOLIDUS, left-right
'0' # 0x30 -> DIGIT ZERO; in Arabic-script context, displayed as 0x06F0 EXTENDED ARABIC-INDIC DIGIT ZERO
'1' # 0x31 -> DIGIT ONE; in Arabic-script context, displayed as 0x06F1 EXTENDED ARABIC-INDIC DIGIT ONE
'2' # 0x32 -> DIGIT TWO; in Arabic-script context, displayed as 0x06F2 EXTENDED ARABIC-INDIC DIGIT TWO
'3' # 0x33 -> DIGIT THREE; in Arabic-script context, displayed as 0x06F3 EXTENDED ARABIC-INDIC DIGIT THREE
'4' # 0x34 -> DIGIT FOUR; in Arabic-script context, displayed as 0x06F4 EXTENDED ARABIC-INDIC DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE; in Arabic-script context, displayed as 0x06F5 EXTENDED ARABIC-INDIC DIGIT FIVE
'6' # 0x36 -> DIGIT SIX; in Arabic-script context, displayed as 0x06F6 EXTENDED ARABIC-INDIC DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN; in Arabic-script context, displayed as 0x06F7 EXTENDED ARABIC-INDIC DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT; in Arabic-script context, displayed as 0x06F8 EXTENDED ARABIC-INDIC DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE; in Arabic-script context, displayed as 0x06F9 EXTENDED ARABIC-INDIC DIGIT NINE
':' # 0x3A -> COLON, left-right
';' # 0x3B -> SEMICOLON, left-right
'<' # 0x3C -> LESS-THAN SIGN, left-right
'=' # 0x3D -> EQUALS SIGN, left-right
'>' # 0x3E -> GREATER-THAN SIGN, left-right
'?' # 0x3F -> QUESTION MARK, left-right
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET, left-right
'\\' # 0x5C -> REVERSE SOLIDUS, left-right
']' # 0x5D -> RIGHT SQUARE BRACKET, left-right
'^' # 0x5E -> CIRCUMFLEX ACCENT, left-right
'_' # 0x5F -> LOW LINE, left-right
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET, left-right
'|' # 0x7C -> VERTICAL LINE, left-right
'}' # 0x7D -> RIGHT CURLY BRACKET, left-right
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> CONTROL CHARACTER
'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xa0' # 0x81 -> NO-BREAK SPACE, right-left
'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
'\u06ba' # 0x8B -> ARABIC LETTER NOON GHUNNA
'\xab' # 0x8C -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
'\u2026' # 0x93 -> HORIZONTAL ELLIPSIS, right-left
'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
'\xbb' # 0x98 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0x9B -> DIVISION SIGN, right-left
'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
' ' # 0xA0 -> SPACE, right-left
'!' # 0xA1 -> EXCLAMATION MARK, right-left
'"' # 0xA2 -> QUOTATION MARK, right-left
'#' # 0xA3 -> NUMBER SIGN, right-left
'$' # 0xA4 -> DOLLAR SIGN, right-left
'\u066a' # 0xA5 -> ARABIC PERCENT SIGN
'&' # 0xA6 -> AMPERSAND, right-left
"'" # 0xA7 -> APOSTROPHE, right-left
'(' # 0xA8 -> LEFT PARENTHESIS, right-left
')' # 0xA9 -> RIGHT PARENTHESIS, right-left
'*' # 0xAA -> ASTERISK, right-left
'+' # 0xAB -> PLUS SIGN, right-left
'\u060c' # 0xAC -> ARABIC COMMA
'-' # 0xAD -> HYPHEN-MINUS, right-left
'.' # 0xAE -> FULL STOP, right-left
'/' # 0xAF -> SOLIDUS, right-left
'\u06f0' # 0xB0 -> EXTENDED ARABIC-INDIC DIGIT ZERO, right-left (need override)
'\u06f1' # 0xB1 -> EXTENDED ARABIC-INDIC DIGIT ONE, right-left (need override)
'\u06f2' # 0xB2 -> EXTENDED ARABIC-INDIC DIGIT TWO, right-left (need override)
'\u06f3' # 0xB3 -> EXTENDED ARABIC-INDIC DIGIT THREE, right-left (need override)
'\u06f4' # 0xB4 -> EXTENDED ARABIC-INDIC DIGIT FOUR, right-left (need override)
'\u06f5' # 0xB5 -> EXTENDED ARABIC-INDIC DIGIT FIVE, right-left (need override)
'\u06f6' # 0xB6 -> EXTENDED ARABIC-INDIC DIGIT SIX, right-left (need override)
'\u06f7' # 0xB7 -> EXTENDED ARABIC-INDIC DIGIT SEVEN, right-left (need override)
'\u06f8' # 0xB8 -> EXTENDED ARABIC-INDIC DIGIT EIGHT, right-left (need override)
'\u06f9' # 0xB9 -> EXTENDED ARABIC-INDIC DIGIT NINE, right-left (need override)
':' # 0xBA -> COLON, right-left
'\u061b' # 0xBB -> ARABIC SEMICOLON
'<' # 0xBC -> LESS-THAN SIGN, right-left
'=' # 0xBD -> EQUALS SIGN, right-left
'>' # 0xBE -> GREATER-THAN SIGN, right-left
'\u061f' # 0xBF -> ARABIC QUESTION MARK
'\u274a' # 0xC0 -> EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left
'\u0621' # 0xC1 -> ARABIC LETTER HAMZA
'\u0622' # 0xC2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
'\u0623' # 0xC3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
'\u0624' # 0xC4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
'\u0625' # 0xC5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
'\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
'\u0627' # 0xC7 -> ARABIC LETTER ALEF
'\u0628' # 0xC8 -> ARABIC LETTER BEH
'\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA
'\u062a' # 0xCA -> ARABIC LETTER TEH
'\u062b' # 0xCB -> ARABIC LETTER THEH
'\u062c' # 0xCC -> ARABIC LETTER JEEM
'\u062d' # 0xCD -> ARABIC LETTER HAH
'\u062e' # 0xCE -> ARABIC LETTER KHAH
'\u062f' # 0xCF -> ARABIC LETTER DAL
'\u0630' # 0xD0 -> ARABIC LETTER THAL
'\u0631' # 0xD1 -> ARABIC LETTER REH
'\u0632' # 0xD2 -> ARABIC LETTER ZAIN
'\u0633' # 0xD3 -> ARABIC LETTER SEEN
'\u0634' # 0xD4 -> ARABIC LETTER SHEEN
'\u0635' # 0xD5 -> ARABIC LETTER SAD
'\u0636' # 0xD6 -> ARABIC LETTER DAD
'\u0637' # 0xD7 -> ARABIC LETTER TAH
'\u0638' # 0xD8 -> ARABIC LETTER ZAH
'\u0639' # 0xD9 -> ARABIC LETTER AIN
'\u063a' # 0xDA -> ARABIC LETTER GHAIN
'[' # 0xDB -> LEFT SQUARE BRACKET, right-left
'\\' # 0xDC -> REVERSE SOLIDUS, right-left
']' # 0xDD -> RIGHT SQUARE BRACKET, right-left
'^' # 0xDE -> CIRCUMFLEX ACCENT, right-left
'_' # 0xDF -> LOW LINE, right-left
'\u0640' # 0xE0 -> ARABIC TATWEEL
'\u0641' # 0xE1 -> ARABIC LETTER FEH
'\u0642' # 0xE2 -> ARABIC LETTER QAF
'\u0643' # 0xE3 -> ARABIC LETTER KAF
'\u0644' # 0xE4 -> ARABIC LETTER LAM
'\u0645' # 0xE5 -> ARABIC LETTER MEEM
'\u0646' # 0xE6 -> ARABIC LETTER NOON
'\u0647' # 0xE7 -> ARABIC LETTER HEH
'\u0648' # 0xE8 -> ARABIC LETTER WAW
'\u0649' # 0xE9 -> ARABIC LETTER ALEF MAKSURA
'\u064a' # 0xEA -> ARABIC LETTER YEH
'\u064b' # 0xEB -> ARABIC FATHATAN
'\u064c' # 0xEC -> ARABIC DAMMATAN
'\u064d' # 0xED -> ARABIC KASRATAN
'\u064e' # 0xEE -> ARABIC FATHA
'\u064f' # 0xEF -> ARABIC DAMMA
'\u0650' # 0xF0 -> ARABIC KASRA
'\u0651' # 0xF1 -> ARABIC SHADDA
'\u0652' # 0xF2 -> ARABIC SUKUN
'\u067e' # 0xF3 -> ARABIC LETTER PEH
'\u0679' # 0xF4 -> ARABIC LETTER TTEH
'\u0686' # 0xF5 -> ARABIC LETTER TCHEH
'\u06d5' # 0xF6 -> ARABIC LETTER AE
'\u06a4' # 0xF7 -> ARABIC LETTER VEH
'\u06af' # 0xF8 -> ARABIC LETTER GAF
'\u0688' # 0xF9 -> ARABIC LETTER DDAL
'\u0691' # 0xFA -> ARABIC LETTER RREH
'{' # 0xFB -> LEFT CURLY BRACKET, right-left
'|' # 0xFC -> VERTICAL LINE, right-left
'}' # 0xFD -> RIGHT CURLY BRACKET, right-left
'\u0698' # 0xFE -> ARABIC LETTER JEH
'\u06d2' # 0xFF -> ARABIC LETTER YEH BARREE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-3-clause |
wolfram74/numerical_methods_iserles_notes | venv/lib/python2.7/site-packages/sympy/physics/vector/functions.py | 74 | 23381 | from __future__ import print_function, division
from sympy import (sympify, diff, sin, cos, Matrix, Symbol, integrate,
trigsimp, Function, symbols)
from sympy.core.basic import S
from sympy.core.compatibility import reduce
from .vector import Vector, _check_vector
from .frame import CoordinateSym, _check_frame
from .dyadic import Dyadic
from .printing import vprint, vsprint, vpprint, vlatex, init_vprinting
from sympy.utilities.iterables import iterable
__all__ = ['cross', 'dot', 'express', 'time_derivative', 'outer',
'kinematic_equations', 'get_motion_params', 'partial_velocity',
'dynamicsymbols', 'vprint', 'vsprint', 'vpprint', 'vlatex',
'init_vprinting']
def cross(vec1, vec2):
"""Cross product convenience wrapper for Vector.cross(): \n"""
if not isinstance(vec1, (Vector, Dyadic)):
raise TypeError('Cross product is between two vectors')
return vec1 ^ vec2
cross.__doc__ += Vector.cross.__doc__
def dot(vec1, vec2):
"""Dot product convenience wrapper for Vector.dot(): \n"""
if not isinstance(vec1, (Vector, Dyadic)):
raise TypeError('Dot product is between two vectors')
return vec1 & vec2
dot.__doc__ += Vector.dot.__doc__
def express(expr, frame, frame2=None, variables=False):
"""
Global function for 'express' functionality.
Re-expresses a Vector, scalar(sympyfiable) or Dyadic in given frame.
Refer to the local methods of Vector and Dyadic for details.
If 'variables' is True, then the coordinate variables (CoordinateSym
instances) of other frames present in the vector/scalar field or
dyadic expression are also substituted in terms of the base scalars of
this frame.
Parameters
==========
expr : Vector/Dyadic/scalar(sympyfiable)
The expression to re-express in ReferenceFrame 'frame'
frame: ReferenceFrame
The reference frame to express expr in
frame2 : ReferenceFrame
The other frame required for re-expression(only for Dyadic expr)
variables : boolean
Specifies whether to substitute the coordinate variables present
in expr, in terms of those of frame
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer, dynamicsymbols
>>> N = ReferenceFrame('N')
>>> q = dynamicsymbols('q')
>>> B = N.orientnew('B', 'Axis', [q, N.z])
>>> d = outer(N.x, N.x)
>>> from sympy.physics.vector import express
>>> express(d, B, N)
cos(q)*(B.x|N.x) - sin(q)*(B.y|N.x)
>>> express(B.x, N)
cos(q)*N.x + sin(q)*N.y
>>> express(N[0], B, variables=True)
B_x*cos(q(t)) - B_y*sin(q(t))
"""
_check_frame(frame)
if expr == 0:
return expr
if isinstance(expr, Vector):
#Given expr is a Vector
if variables:
#If variables attribute is True, substitute
#the coordinate variables in the Vector
frame_list = [x[-1] for x in expr.args]
subs_dict = {}
for f in frame_list:
subs_dict.update(f.variable_map(frame))
expr = expr.subs(subs_dict)
#Re-express in this frame
outvec = Vector([])
for i, v in enumerate(expr.args):
if v[1] != frame:
temp = frame.dcm(v[1]) * v[0]
if Vector.simp:
temp = temp.applyfunc(lambda x:
trigsimp(x, method='fu'))
outvec += Vector([(temp, frame)])
else:
outvec += Vector([v])
return outvec
if isinstance(expr, Dyadic):
if frame2 is None:
frame2 = frame
_check_frame(frame2)
ol = Dyadic(0)
for i, v in enumerate(expr.args):
ol += express(v[0], frame, variables=variables) * \
(express(v[1], frame, variables=variables) |
express(v[2], frame2, variables=variables))
return ol
else:
if variables:
#Given expr is a scalar field
frame_set = set([])
expr = sympify(expr)
#Subsitute all the coordinate variables
for x in expr.free_symbols:
if isinstance(x, CoordinateSym)and x.frame != frame:
frame_set.add(x.frame)
subs_dict = {}
for f in frame_set:
subs_dict.update(f.variable_map(frame))
return expr.subs(subs_dict)
return expr
def time_derivative(expr, frame, order=1):
"""
Calculate the time derivative of a vector/scalar field function
or dyadic expression in given frame.
References
==========
http://en.wikipedia.org/wiki/Rotating_reference_frame#Time_derivatives_in_the_two_frames
Parameters
==========
expr : Vector/Dyadic/sympifyable
The expression whose time derivative is to be calculated
frame : ReferenceFrame
The reference frame to calculate the time derivative in
order : integer
The order of the derivative to be calculated
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, dynamicsymbols
>>> from sympy import Symbol
>>> q1 = Symbol('q1')
>>> u1 = dynamicsymbols('u1')
>>> N = ReferenceFrame('N')
>>> A = N.orientnew('A', 'Axis', [q1, N.x])
>>> v = u1 * N.x
>>> A.set_ang_vel(N, 10*A.x)
>>> from sympy.physics.vector import time_derivative
>>> time_derivative(v, N)
u1'*N.x
>>> time_derivative(u1*A[0], N)
N_x*Derivative(u1(t), t)
>>> B = N.orientnew('B', 'Axis', [u1, N.z])
>>> from sympy.physics.vector import outer
>>> d = outer(N.x, N.x)
>>> time_derivative(d, B)
- u1'*(N.y|N.x) - u1'*(N.x|N.y)
"""
t = dynamicsymbols._t
_check_frame(frame)
if order == 0:
return expr
if order % 1 != 0 or order < 0:
raise ValueError("Unsupported value of order entered")
if isinstance(expr, Vector):
outvec = Vector(0)
for i, v in enumerate(expr.args):
if v[1] == frame:
outvec += Vector([(express(v[0], frame,
variables=True).diff(t), frame)])
else:
outvec += time_derivative(Vector([v]), v[1]) + \
(v[1].ang_vel_in(frame) ^ Vector([v]))
return time_derivative(outvec, frame, order - 1)
if isinstance(expr, Dyadic):
ol = Dyadic(0)
for i, v in enumerate(expr.args):
ol += (v[0].diff(t) * (v[1] | v[2]))
ol += (v[0] * (time_derivative(v[1], frame) | v[2]))
ol += (v[0] * (v[1] | time_derivative(v[2], frame)))
return time_derivative(ol, frame, order - 1)
else:
return diff(express(expr, frame, variables=True), t, order)
def outer(vec1, vec2):
"""Outer product convenience wrapper for Vector.outer():\n"""
if not isinstance(vec1, Vector):
raise TypeError('Outer product is between two Vectors')
return vec1 | vec2
outer.__doc__ += Vector.outer.__doc__
def kinematic_equations(speeds, coords, rot_type, rot_order=''):
"""Gives equations relating the qdot's to u's for a rotation type.
Supply rotation type and order as in orient. Speeds are assumed to be
body-fixed; if we are defining the orientation of B in A using by rot_type,
the angular velocity of B in A is assumed to be in the form: speed[0]*B.x +
speed[1]*B.y + speed[2]*B.z
Parameters
==========
speeds : list of length 3
The body fixed angular velocity measure numbers.
coords : list of length 3 or 4
The coordinates used to define the orientation of the two frames.
rot_type : str
The type of rotation used to create the equations. Body, Space, or
Quaternion only
rot_order : str
If applicable, the order of a series of rotations.
Examples
========
>>> from sympy.physics.vector import dynamicsymbols
>>> from sympy.physics.vector import kinematic_equations, vprint
>>> u1, u2, u3 = dynamicsymbols('u1 u2 u3')
>>> q1, q2, q3 = dynamicsymbols('q1 q2 q3')
>>> vprint(kinematic_equations([u1,u2,u3], [q1,q2,q3], 'body', '313'),
... order=None)
[-(u1*sin(q3) + u2*cos(q3))/sin(q2) + q1', -u1*cos(q3) + u2*sin(q3) + q2', (u1*sin(q3) + u2*cos(q3))*cos(q2)/sin(q2) - u3 + q3']
"""
# Code below is checking and sanitizing input
approved_orders = ('123', '231', '312', '132', '213', '321', '121', '131',
'212', '232', '313', '323', '1', '2', '3', '')
rot_order = str(rot_order).upper() # Now we need to make sure XYZ = 123
rot_type = rot_type.upper()
rot_order = [i.replace('X', '1') for i in rot_order]
rot_order = [i.replace('Y', '2') for i in rot_order]
rot_order = [i.replace('Z', '3') for i in rot_order]
rot_order = ''.join(rot_order)
if not isinstance(speeds, (list, tuple)):
raise TypeError('Need to supply speeds in a list')
if len(speeds) != 3:
raise TypeError('Need to supply 3 body-fixed speeds')
if not isinstance(coords, (list, tuple)):
raise TypeError('Need to supply coordinates in a list')
if rot_type.lower() in ['body', 'space']:
if rot_order not in approved_orders:
raise ValueError('Not an acceptable rotation order')
if len(coords) != 3:
raise ValueError('Need 3 coordinates for body or space')
# Actual hard-coded kinematic differential equations
q1, q2, q3 = coords
q1d, q2d, q3d = [diff(i, dynamicsymbols._t) for i in coords]
w1, w2, w3 = speeds
s1, s2, s3 = [sin(q1), sin(q2), sin(q3)]
c1, c2, c3 = [cos(q1), cos(q2), cos(q3)]
if rot_type.lower() == 'body':
if rot_order == '123':
return [q1d - (w1 * c3 - w2 * s3) / c2, q2d - w1 * s3 - w2 *
c3, q3d - (-w1 * c3 + w2 * s3) * s2 / c2 - w3]
if rot_order == '231':
return [q1d - (w2 * c3 - w3 * s3) / c2, q2d - w2 * s3 - w3 *
c3, q3d - w1 - (- w2 * c3 + w3 * s3) * s2 / c2]
if rot_order == '312':
return [q1d - (-w1 * s3 + w3 * c3) / c2, q2d - w1 * c3 - w3 *
s3, q3d - (w1 * s3 - w3 * c3) * s2 / c2 - w2]
if rot_order == '132':
return [q1d - (w1 * c3 + w3 * s3) / c2, q2d + w1 * s3 - w3 *
c3, q3d - (w1 * c3 + w3 * s3) * s2 / c2 - w2]
if rot_order == '213':
return [q1d - (w1 * s3 + w2 * c3) / c2, q2d - w1 * c3 + w2 *
s3, q3d - (w1 * s3 + w2 * c3) * s2 / c2 - w3]
if rot_order == '321':
return [q1d - (w2 * s3 + w3 * c3) / c2, q2d - w2 * c3 + w3 *
s3, q3d - w1 - (w2 * s3 + w3 * c3) * s2 / c2]
if rot_order == '121':
return [q1d - (w2 * s3 + w3 * c3) / s2, q2d - w2 * c3 + w3 *
s3, q3d - w1 + (w2 * s3 + w3 * c3) * c2 / s2]
if rot_order == '131':
return [q1d - (-w2 * c3 + w3 * s3) / s2, q2d - w2 * s3 - w3 *
c3, q3d - w1 - (w2 * c3 - w3 * s3) * c2 / s2]
if rot_order == '212':
return [q1d - (w1 * s3 - w3 * c3) / s2, q2d - w1 * c3 - w3 *
s3, q3d - (-w1 * s3 + w3 * c3) * c2 / s2 - w2]
if rot_order == '232':
return [q1d - (w1 * c3 + w3 * s3) / s2, q2d + w1 * s3 - w3 *
c3, q3d + (w1 * c3 + w3 * s3) * c2 / s2 - w2]
if rot_order == '313':
return [q1d - (w1 * s3 + w2 * c3) / s2, q2d - w1 * c3 + w2 *
s3, q3d + (w1 * s3 + w2 * c3) * c2 / s2 - w3]
if rot_order == '323':
return [q1d - (-w1 * c3 + w2 * s3) / s2, q2d - w1 * s3 - w2 *
c3, q3d - (w1 * c3 - w2 * s3) * c2 / s2 - w3]
if rot_type.lower() == 'space':
if rot_order == '123':
return [q1d - w1 - (w2 * s1 + w3 * c1) * s2 / c2, q2d - w2 *
c1 + w3 * s1, q3d - (w2 * s1 + w3 * c1) / c2]
if rot_order == '231':
return [q1d - (w1 * c1 + w3 * s1) * s2 / c2 - w2, q2d + w1 *
s1 - w3 * c1, q3d - (w1 * c1 + w3 * s1) / c2]
if rot_order == '312':
return [q1d - (w1 * s1 + w2 * c1) * s2 / c2 - w3, q2d - w1 *
c1 + w2 * s1, q3d - (w1 * s1 + w2 * c1) / c2]
if rot_order == '132':
return [q1d - w1 - (-w2 * c1 + w3 * s1) * s2 / c2, q2d - w2 *
s1 - w3 * c1, q3d - (w2 * c1 - w3 * s1) / c2]
if rot_order == '213':
return [q1d - (w1 * s1 - w3 * c1) * s2 / c2 - w2, q2d - w1 *
c1 - w3 * s1, q3d - (-w1 * s1 + w3 * c1) / c2]
if rot_order == '321':
return [q1d - (-w1 * c1 + w2 * s1) * s2 / c2 - w3, q2d - w1 *
s1 - w2 * c1, q3d - (w1 * c1 - w2 * s1) / c2]
if rot_order == '121':
return [q1d - w1 + (w2 * s1 + w3 * c1) * c2 / s2, q2d - w2 *
c1 + w3 * s1, q3d - (w2 * s1 + w3 * c1) / s2]
if rot_order == '131':
return [q1d - w1 - (w2 * c1 - w3 * s1) * c2 / s2, q2d - w2 *
s1 - w3 * c1, q3d - (-w2 * c1 + w3 * s1) / s2]
if rot_order == '212':
return [q1d - (-w1 * s1 + w3 * c1) * c2 / s2 - w2, q2d - w1 *
c1 - w3 * s1, q3d - (w1 * s1 - w3 * c1) / s2]
if rot_order == '232':
return [q1d + (w1 * c1 + w3 * s1) * c2 / s2 - w2, q2d + w1 *
s1 - w3 * c1, q3d - (w1 * c1 + w3 * s1) / s2]
if rot_order == '313':
return [q1d + (w1 * s1 + w2 * c1) * c2 / s2 - w3, q2d - w1 *
c1 + w2 * s1, q3d - (w1 * s1 + w2 * c1) / s2]
if rot_order == '323':
return [q1d - (w1 * c1 - w2 * s1) * c2 / s2 - w3, q2d - w1 *
s1 - w2 * c1, q3d - (-w1 * c1 + w2 * s1) / s2]
elif rot_type.lower() == 'quaternion':
if rot_order != '':
raise ValueError('Cannot have rotation order for quaternion')
if len(coords) != 4:
raise ValueError('Need 4 coordinates for quaternion')
# Actual hard-coded kinematic differential equations
e0, e1, e2, e3 = coords
w = Matrix(speeds + [0])
E = Matrix([[e0, -e3, e2, e1], [e3, e0, -e1, e2], [-e2, e1, e0, e3],
[-e1, -e2, -e3, e0]])
edots = Matrix([diff(i, dynamicsymbols._t) for i in [e1, e2, e3, e0]])
return list(edots.T - 0.5 * w.T * E.T)
else:
raise ValueError('Not an approved rotation type for this function')
def get_motion_params(frame, **kwargs):
"""
Returns the three motion parameters - (acceleration, velocity, and
position) as vectorial functions of time in the given frame.
If a higher order differential function is provided, the lower order
functions are used as boundary conditions. For example, given the
acceleration, the velocity and position parameters are taken as
boundary conditions.
The values of time at which the boundary conditions are specified
are taken from timevalue1(for position boundary condition) and
timevalue2(for velocity boundary condition).
If any of the boundary conditions are not provided, they are taken
to be zero by default (zero vectors, in case of vectorial inputs). If
the boundary conditions are also functions of time, they are converted
to constants by substituting the time values in the dynamicsymbols._t
time Symbol.
This function can also be used for calculating rotational motion
parameters. Have a look at the Parameters and Examples for more clarity.
Parameters
==========
frame : ReferenceFrame
The frame to express the motion parameters in
acceleration : Vector
Acceleration of the object/frame as a function of time
velocity : Vector
Velocity as function of time or as boundary condition
of velocity at time = timevalue1
position : Vector
Velocity as function of time or as boundary condition
of velocity at time = timevalue1
timevalue1 : sympyfiable
Value of time for position boundary condition
timevalue2 : sympyfiable
Value of time for velocity boundary condition
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, get_motion_params, dynamicsymbols
>>> from sympy import symbols
>>> R = ReferenceFrame('R')
>>> v1, v2, v3 = dynamicsymbols('v1 v2 v3')
>>> v = v1*R.x + v2*R.y + v3*R.z
>>> get_motion_params(R, position = v)
(v1''*R.x + v2''*R.y + v3''*R.z, v1'*R.x + v2'*R.y + v3'*R.z, v1*R.x + v2*R.y + v3*R.z)
>>> a, b, c = symbols('a b c')
>>> v = a*R.x + b*R.y + c*R.z
>>> get_motion_params(R, velocity = v)
(0, a*R.x + b*R.y + c*R.z, a*t*R.x + b*t*R.y + c*t*R.z)
>>> parameters = get_motion_params(R, acceleration = v)
>>> parameters[1]
a*t*R.x + b*t*R.y + c*t*R.z
>>> parameters[2]
a*t**2/2*R.x + b*t**2/2*R.y + c*t**2/2*R.z
"""
##Helper functions
def _process_vector_differential(vectdiff, condition, \
variable, ordinate, frame):
"""
Helper function for get_motion methods. Finds derivative of vectdiff wrt
variable, and its integral using the specified boundary condition at
value of variable = ordinate.
Returns a tuple of - (derivative, function and integral) wrt vectdiff
"""
#Make sure boundary condition is independent of 'variable'
if condition != 0:
condition = express(condition, frame, variables=True)
#Special case of vectdiff == 0
if vectdiff == Vector(0):
return (0, 0, condition)
#Express vectdiff completely in condition's frame to give vectdiff1
vectdiff1 = express(vectdiff, frame)
#Find derivative of vectdiff
vectdiff2 = time_derivative(vectdiff, frame)
#Integrate and use boundary condition
vectdiff0 = Vector(0)
lims = (variable, ordinate, variable)
for dim in frame:
function1 = vectdiff1.dot(dim)
abscissa = dim.dot(condition).subs({variable : ordinate})
# Indefinite integral of 'function1' wrt 'variable', using
# the given initial condition (ordinate, abscissa).
vectdiff0 += (integrate(function1, lims) + abscissa) * dim
#Return tuple
return (vectdiff2, vectdiff, vectdiff0)
##Function body
_check_frame(frame)
#Decide mode of operation based on user's input
if 'acceleration' in kwargs:
mode = 2
elif 'velocity' in kwargs:
mode = 1
else:
mode = 0
#All the possible parameters in kwargs
#Not all are required for every case
#If not specified, set to default values(may or may not be used in
#calculations)
conditions = ['acceleration', 'velocity', 'position',
'timevalue', 'timevalue1', 'timevalue2']
for i, x in enumerate(conditions):
if x not in kwargs:
if i < 3:
kwargs[x] = Vector(0)
else:
kwargs[x] = S(0)
elif i < 3:
_check_vector(kwargs[x])
else:
kwargs[x] = sympify(kwargs[x])
if mode == 2:
vel = _process_vector_differential(kwargs['acceleration'],
kwargs['velocity'],
dynamicsymbols._t,
kwargs['timevalue2'], frame)[2]
pos = _process_vector_differential(vel, kwargs['position'],
dynamicsymbols._t,
kwargs['timevalue1'], frame)[2]
return (kwargs['acceleration'], vel, pos)
elif mode == 1:
return _process_vector_differential(kwargs['velocity'],
kwargs['position'],
dynamicsymbols._t,
kwargs['timevalue1'], frame)
else:
vel = time_derivative(kwargs['position'], frame)
acc = time_derivative(vel, frame)
return (acc, vel, kwargs['position'])
def partial_velocity(vel_list, u_list, frame):
"""Returns a list of partial velocities.
For a list of velocity or angular velocity vectors the partial derivatives
with respect to the supplied generalized speeds are computed, in the
specified ReferenceFrame.
The output is a list of lists. The outer list has a number of elements
equal to the number of supplied velocity vectors. The inner lists are, for
each velocity vector, the partial derivatives of that velocity vector with
respect to the generalized speeds supplied.
Parameters
==========
vel_list : list
List of velocities of Point's and angular velocities of ReferenceFrame's
u_list : list
List of independent generalized speeds.
frame : ReferenceFrame
The ReferenceFrame the partial derivatives are going to be taken in.
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> from sympy.physics.vector import dynamicsymbols
>>> from sympy.physics.vector import partial_velocity
>>> u = dynamicsymbols('u')
>>> N = ReferenceFrame('N')
>>> P = Point('P')
>>> P.set_vel(N, u * N.x)
>>> vel_list = [P.vel(N)]
>>> u_list = [u]
>>> partial_velocity(vel_list, u_list, N)
[[N.x]]
"""
if not iterable(vel_list):
raise TypeError('Provide velocities in an iterable')
if not iterable(u_list):
raise TypeError('Provide speeds in an iterable')
list_of_pvlists = []
for i in vel_list:
pvlist = []
for j in u_list:
vel = i.diff(j, frame)
pvlist += [vel]
list_of_pvlists += [pvlist]
return list_of_pvlists
def dynamicsymbols(names, level=0):
"""Uses symbols and Function for functions of time.
Creates a SymPy UndefinedFunction, which is then initialized as a function
of a variable, the default being Symbol('t').
Parameters
==========
names : str
Names of the dynamic symbols you want to create; works the same way as
inputs to symbols
level : int
Level of differentiation of the returned function; d/dt once of t,
twice of t, etc.
Examples
========
>>> from sympy.physics.vector import dynamicsymbols
>>> from sympy import diff, Symbol
>>> q1 = dynamicsymbols('q1')
>>> q1
q1(t)
>>> diff(q1, Symbol('t'))
Derivative(q1(t), t)
"""
esses = symbols(names, cls=Function)
t = dynamicsymbols._t
if iterable(esses):
esses = [reduce(diff, [t] * level, e(t)) for e in esses]
return esses
else:
return reduce(diff, [t] * level, esses(t))
dynamicsymbols._t = Symbol('t')
dynamicsymbols._str = '\''
| mit |
TraurigeNarr/ThirdParties | googletest-master/googletest/test/gtest_throw_on_failure_test.py | 2917 | 5766 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes gtest_throw_on_failure_test_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the gtest_throw_on_failure_test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_throw_on_failure_test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print 'Running "%s". . .' % ' '.join(command)
p = gtest_test_utils.Subprocess(command)
return p.exited and p.exit_code == 0
# The tests. TODO(wan@google.com): refactor the class to share common
# logic with code in gtest_break_on_failure_unittest.py.
class ThrowOnFailureTest(gtest_test_utils.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs gtest_throw_on_failure_test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True iff the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-2.0 |
elpaso/QGIS | python/plugins/processing/algs/grass7/ext/i_gensigset.py | 16 | 1699 | # -*- coding: utf-8 -*-
"""
***************************************************************************
i_gensigset.py
--------------
Date : March 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'March 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
import os
from .i import regroupRasters, exportSigFile
def processCommand(alg, parameters, context, feedback):
# We need to extract the basename of the signature file
signatureFile = alg.parameterAsString(parameters, 'signaturefile', context)
shortSigFile = os.path.basename(signatureFile)
parameters['signaturefile'] = shortSigFile
# Regroup rasters
group, subgroup = regroupRasters(alg, parameters, context, 'input', 'group', 'subgroup')
alg.processCommand(parameters, context, feedback)
# Re-add signature files
parameters['signaturefile'] = signatureFile
# Export signature file
exportSigFile(alg, group, subgroup, signatureFile, 'sigset')
| gpl-2.0 |
gameduell/duell | bin/mac/python2.7.9/lib/python2.7/multiprocessing/queues.py | 103 | 12318 | #
# Module implementing queues
#
# multiprocessing/queues.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = ['Queue', 'SimpleQueue', 'JoinableQueue']
import sys
import os
import threading
import collections
import time
import atexit
import weakref
from Queue import Empty, Full
import _multiprocessing
from multiprocessing import Pipe
from multiprocessing.synchronize import Lock, BoundedSemaphore, Semaphore, Condition
from multiprocessing.util import debug, info, Finalize, register_after_fork
from multiprocessing.forking import assert_spawning
#
# Queue type using a pipe, buffer and thread
#
class Queue(object):
def __init__(self, maxsize=0):
if maxsize <= 0:
maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
self._maxsize = maxsize
self._reader, self._writer = Pipe(duplex=False)
self._rlock = Lock()
self._opid = os.getpid()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = Lock()
self._sem = BoundedSemaphore(maxsize)
self._after_fork()
if sys.platform != 'win32':
register_after_fork(self, Queue._after_fork)
def __getstate__(self):
assert_spawning(self)
return (self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid)
def __setstate__(self, state):
(self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid) = state
self._after_fork()
def _after_fork(self):
debug('Queue._after_fork()')
self._notempty = threading.Condition(threading.Lock())
self._buffer = collections.deque()
self._thread = None
self._jointhread = None
self._joincancelled = False
self._closed = False
self._close = None
self._send = self._writer.send
self._recv = self._reader.recv
self._poll = self._reader.poll
def put(self, obj, block=True, timeout=None):
assert not self._closed
if not self._sem.acquire(block, timeout):
raise Full
self._notempty.acquire()
try:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._notempty.notify()
finally:
self._notempty.release()
def get(self, block=True, timeout=None):
if block and timeout is None:
self._rlock.acquire()
try:
res = self._recv()
self._sem.release()
return res
finally:
self._rlock.release()
else:
if block:
deadline = time.time() + timeout
if not self._rlock.acquire(block, timeout):
raise Empty
try:
if block:
timeout = deadline - time.time()
if timeout < 0 or not self._poll(timeout):
raise Empty
elif not self._poll():
raise Empty
res = self._recv()
self._sem.release()
return res
finally:
self._rlock.release()
def qsize(self):
# Raises NotImplementedError on Mac OSX because of broken sem_getvalue()
return self._maxsize - self._sem._semlock._get_value()
def empty(self):
return not self._poll()
def full(self):
return self._sem._semlock._is_zero()
def get_nowait(self):
return self.get(False)
def put_nowait(self, obj):
return self.put(obj, False)
def close(self):
self._closed = True
self._reader.close()
if self._close:
self._close()
def join_thread(self):
debug('Queue.join_thread()')
assert self._closed
if self._jointhread:
self._jointhread()
def cancel_join_thread(self):
debug('Queue.cancel_join_thread()')
self._joincancelled = True
try:
self._jointhread.cancel()
except AttributeError:
pass
def _start_thread(self):
debug('Queue._start_thread()')
# Start thread which transfers data from buffer to pipe
self._buffer.clear()
self._thread = threading.Thread(
target=Queue._feed,
args=(self._buffer, self._notempty, self._send,
self._wlock, self._writer.close),
name='QueueFeederThread'
)
self._thread.daemon = True
debug('doing self._thread.start()')
self._thread.start()
debug('... done self._thread.start()')
# On process exit we will wait for data to be flushed to pipe.
if not self._joincancelled:
self._jointhread = Finalize(
self._thread, Queue._finalize_join,
[weakref.ref(self._thread)],
exitpriority=-5
)
# Send sentinel to the thread queue object when garbage collected
self._close = Finalize(
self, Queue._finalize_close,
[self._buffer, self._notempty],
exitpriority=10
)
@staticmethod
def _finalize_join(twr):
debug('joining queue thread')
thread = twr()
if thread is not None:
thread.join()
debug('... queue thread joined')
else:
debug('... queue thread already dead')
@staticmethod
def _finalize_close(buffer, notempty):
debug('telling queue thread to quit')
notempty.acquire()
try:
buffer.append(_sentinel)
notempty.notify()
finally:
notempty.release()
@staticmethod
def _feed(buffer, notempty, send, writelock, close):
debug('starting thread to feed data to pipe')
from .util import is_exiting
nacquire = notempty.acquire
nrelease = notempty.release
nwait = notempty.wait
bpopleft = buffer.popleft
sentinel = _sentinel
if sys.platform != 'win32':
wacquire = writelock.acquire
wrelease = writelock.release
else:
wacquire = None
try:
while 1:
nacquire()
try:
if not buffer:
nwait()
finally:
nrelease()
try:
while 1:
obj = bpopleft()
if obj is sentinel:
debug('feeder thread got sentinel -- exiting')
close()
return
if wacquire is None:
send(obj)
else:
wacquire()
try:
send(obj)
finally:
wrelease()
except IndexError:
pass
except Exception, e:
# Since this runs in a daemon thread the resources it uses
# may be become unusable while the process is cleaning up.
# We ignore errors which happen after the process has
# started to cleanup.
try:
if is_exiting():
info('error in queue thread: %s', e)
else:
import traceback
traceback.print_exc()
except Exception:
pass
_sentinel = object()
#
# A queue type which also supports join() and task_done() methods
#
# Note that if you do not call task_done() for each finished task then
# eventually the counter's semaphore may overflow causing Bad Things
# to happen.
#
class JoinableQueue(Queue):
def __init__(self, maxsize=0):
Queue.__init__(self, maxsize)
self._unfinished_tasks = Semaphore(0)
self._cond = Condition()
def __getstate__(self):
return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
def __setstate__(self, state):
Queue.__setstate__(self, state[:-2])
self._cond, self._unfinished_tasks = state[-2:]
def put(self, obj, block=True, timeout=None):
assert not self._closed
if not self._sem.acquire(block, timeout):
raise Full
self._notempty.acquire()
self._cond.acquire()
try:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._unfinished_tasks.release()
self._notempty.notify()
finally:
self._cond.release()
self._notempty.release()
def task_done(self):
self._cond.acquire()
try:
if not self._unfinished_tasks.acquire(False):
raise ValueError('task_done() called too many times')
if self._unfinished_tasks._semlock._is_zero():
self._cond.notify_all()
finally:
self._cond.release()
def join(self):
self._cond.acquire()
try:
if not self._unfinished_tasks._semlock._is_zero():
self._cond.wait()
finally:
self._cond.release()
#
# Simplified Queue type -- really just a locked pipe
#
class SimpleQueue(object):
def __init__(self):
self._reader, self._writer = Pipe(duplex=False)
self._rlock = Lock()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = Lock()
self._make_methods()
def empty(self):
return not self._reader.poll()
def __getstate__(self):
assert_spawning(self)
return (self._reader, self._writer, self._rlock, self._wlock)
def __setstate__(self, state):
(self._reader, self._writer, self._rlock, self._wlock) = state
self._make_methods()
def _make_methods(self):
recv = self._reader.recv
racquire, rrelease = self._rlock.acquire, self._rlock.release
def get():
racquire()
try:
return recv()
finally:
rrelease()
self.get = get
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self.put = self._writer.send
else:
send = self._writer.send
wacquire, wrelease = self._wlock.acquire, self._wlock.release
def put(obj):
wacquire()
try:
return send(obj)
finally:
wrelease()
self.put = put
| bsd-2-clause |
jamielennox/python-kiteclient | kiteclient/tests/v1/test_esek.py | 1 | 3094 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_esek
----------------------------------
Tests for `esek` module.
"""
from kiteclient.openstack.common.crypto import utils as cryptoutils
from kiteclient.tests import base
from kiteclient.tests.v1 import utils
from kiteclient.v1 import esek
from kiteclient.v1 import key
import base64
import six
class TestEsek(base.TestCase):
def setUp(self):
super(base.TestCase, self).setUp()
key_ses = utils.DummyKeyResponse(gen=20)
skey_data = "gTqLlW7x2oyNi3k+9YXTpQ=="
self.srckey = key.Key('testkey', skey_data, session=key_ses)
dkey_data = "uoUUn/+ZL+hNUwJ0cxTScg=="
self.dstkey = key.Key('destkey', dkey_data, session=key_ses)
self.skey = "uZnhYaRtzA7QdnDN1hVSWw=="
self.ekey = "fAlG9eGL44ew6q8uTMMKJw=="
self.esek_data = (
"LZ6WWNvCot49sEhnwn0Is/xGWYGQF72rCw8emEKHGmZpDcSQ4K0c5Ld0+fmR"
"T8PjzozEzWK97gNJQHZWSAh1JhmvMO+bjkUNlEdepOjTXrIW6QxdNvMY+Bkd"
"dDwrkKga4wZnoGgeMgK+B7cdGsQ8yAPE3vDjbpmIOvHjHXniCUs=")
def _encrypt(self, data):
crypto = cryptoutils.SymmetricCrypto(enctype='AES',
hashtype='SHA256')
enc = crypto.encrypt(base64.b64decode(self.ekey),
six.b(data), b64encode=True)
sig = crypto.sign(base64.b64decode(self.skey),
six.b(data), b64encode=True)
return enc, sig
def test_integrity(self):
esek_obj = esek.Esek(self.srckey.key_name,
self.dstkey,
self.esek_data)
b64_sig_key = base64.b64encode(esek_obj.sig_key)
b64_enc_key = base64.b64encode(esek_obj.enc_key)
self.assertEqual(six.b(self.skey), b64_sig_key)
self.assertEqual(six.b(self.ekey), b64_enc_key)
def test_decryption(self):
esek_obj = esek.Esek(self.srckey.key_name,
self.dstkey,
self.esek_data)
message = "MESSAGE"
enc, sig = self._encrypt(message)
new_message = esek_obj.decrypt(enc, sig)
self.assertEqual(six.b(message), new_message)
def test_bad_signature_throws(self):
esek_obj = esek.Esek(self.srckey.key_name,
self.dstkey,
self.esek_data)
message = "MESSAGE"
enc, _ = self._encrypt(message)
sig = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA="
self.assertRaises(ValueError, esek_obj.decrypt, enc, sig) | apache-2.0 |
zaqwes8811/serial-cs | extern/gmock-1.6.0/gtest/test/gtest_shuffle_test.py | 3023 | 12549 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
| apache-2.0 |
boddulavineela/mase | python101/code/homophone.py | 14 | 1749 | """This module contains code from
Think Python by Allen B. Downey
http://thinkpython.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from pronounce import read_dictionary
def make_word_dict():
"""Read the words in words.txt and return a dictionary
that contains the words as keys"""
d = dict()
fin = open('words.txt')
for line in fin:
word = line.strip().lower()
d[word] = word
return d
def homophones(a, b, phonetic):
"""Checks if words two can be pronounced the same way.
If either word is not in the pronouncing dictionary, return False
a, b: strings
phonetic: map from words to pronunciation codes
"""
if a not in phonetic or b not in phonetic:
return False
return phonetic[a] == phonetic[b]
def check_word(word, word_dict, phonetic):
"""Checks to see if the word has the following property:
removing the first letter yields a word with the same
pronunciation, and removing the second letter yields a word
with the same pronunciation.
word: string
word_dict: dictionary with words as keys
phonetic: map from words to pronunciation codes
"""
word1 = word[1:]
if word1 not in word_dict:
return False
if not homophones(word, word1, phonetic):
return False
word2 = word[0] + word[2:]
if word2 not in word_dict:
return False
if not homophones(word, word2, phonetic):
return False
return True
if __name__ == '__main__':
phonetic = read_dictionary()
word_dict = make_word_dict()
for word in word_dict:
if check_word(word, word_dict, phonetic):
print word, word[1:], word[0] + word[2:]
| unlicense |
krasin/omim | 3party/Alohalytics/tests/googletest/test/gtest_shuffle_test.py | 3023 | 12549 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
| apache-2.0 |
wkennington/rethinkdb | external/v8_3.30.33.16/build/gyp/test/generator-output/gyptest-subdir2-deep.py | 216 | 1034 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a target from a .gyp file a few subdirectories
deep when the --generator-output= option is used to put the build
configuration files in a separate directory tree.
"""
import TestGyp
# Android doesn't support --generator-output.
test = TestGyp.TestGyp(formats=['!android'])
test.writable(test.workpath('src'), False)
test.writable(test.workpath('src/subdir2/deeper/build'), True)
test.run_gyp('deeper.gyp',
'-Dset_symroot=1',
'--generator-output=' + test.workpath('gypfiles'),
chdir='src/subdir2/deeper')
test.build('deeper.gyp', test.ALL, chdir='gypfiles')
chdir = 'gypfiles'
if test.format == 'xcode':
chdir = 'src/subdir2/deeper'
test.run_built_executable('deeper',
chdir=chdir,
stdout="Hello from deeper.c\n")
test.pass_test()
| agpl-3.0 |
BitcoinUnlimited/BitcoinUnlimited | contrib/seeds/generate-seeds.py | 71 | 4378 | #!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 8333)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 18333)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| mit |
mschurenko/ansible-modules-core | system/user.py | 9 | 71259 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Stephen Fromm <sfromm@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: user
author: Stephen Fromm
version_added: "0.2"
short_description: Manage user accounts
requirements: [ useradd, userdel, usermod ]
description:
- Manage user accounts and user attributes.
options:
name:
required: true
aliases: [ "user" ]
description:
- Name of the user to create, remove or modify.
comment:
required: false
description:
- Optionally sets the description (aka I(GECOS)) of user account.
uid:
required: false
description:
- Optionally sets the I(UID) of the user.
non_unique:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- Optionally when used with the -u option, this option allows to
change the user ID to a non-unique value.
version_added: "1.1"
group:
required: false
description:
- Optionally sets the user's primary group (takes a group name).
groups:
required: false
description:
- Puts the user in this comma-delimited list of groups. When set to
the empty string ('groups='), the user is removed from all groups
except the primary group.
append:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- If C(yes), will only add groups, not set them to just the list
in I(groups).
shell:
required: false
description:
- Optionally set the user's shell.
home:
required: false
description:
- Optionally set the user's home directory.
password:
required: false
description:
- Optionally set the user's password to this crypted value. See
the user example in the github examples directory for what this looks
like in a playbook. See U(http://docs.ansible.com/faq.html#how-do-i-generate-crypted-passwords-for-the-user-module)
for details on various ways to generate these password values.
Note on Darwin system, this value has to be cleartext.
Beware of security issues.
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the account should exist or not, taking action if the state is different from what is stated.
createhome:
required: false
default: "yes"
choices: [ "yes", "no" ]
description:
- Unless set to C(no), a home directory will be made for the user
when the account is created or if the home directory does not
exist.
move_home:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- If set to C(yes) when used with C(home=), attempt to move the
user's home directory to the specified directory if it isn't there
already.
system:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- When creating an account, setting this to C(yes) makes the user a
system account. This setting cannot be changed on existing users.
force:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- When used with C(state=absent), behavior is as with
C(userdel --force).
login_class:
required: false
description:
- Optionally sets the user's login class for FreeBSD, OpenBSD and NetBSD systems.
remove:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- When used with C(state=absent), behavior is as with
C(userdel --remove).
generate_ssh_key:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "0.9"
description:
- Whether to generate a SSH key for the user in question.
This will B(not) overwrite an existing SSH key.
ssh_key_bits:
required: false
default: 2048
version_added: "0.9"
description:
- Optionally specify number of bits in SSH key to create.
ssh_key_type:
required: false
default: rsa
version_added: "0.9"
description:
- Optionally specify the type of SSH key to generate.
Available SSH key types will depend on implementation
present on target host.
ssh_key_file:
required: false
default: .ssh/id_rsa
version_added: "0.9"
description:
- Optionally specify the SSH key filename. If this is a relative
filename then it will be relative to the user's home directory.
ssh_key_comment:
required: false
default: ansible-generated on $HOSTNAME
version_added: "0.9"
description:
- Optionally define the comment for the SSH key.
ssh_key_passphrase:
required: false
version_added: "0.9"
description:
- Set a passphrase for the SSH key. If no
passphrase is provided, the SSH key will default to
having no passphrase.
update_password:
required: false
default: always
choices: ['always', 'on_create']
version_added: "1.3"
description:
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
expires:
version_added: "1.9"
required: false
default: "None"
description:
- An expiry time for the user in epoch, it will be ignored on platforms that do not support this.
Currently supported on Linux and FreeBSD.
'''
EXAMPLES = '''
# Add the user 'johnd' with a specific uid and a primary group of 'admin'
- user: name=johnd comment="John Doe" uid=1040 group=admin
# Add the user 'james' with a bash shell, appending the group 'admins' and 'developers' to the user's groups
- user: name=james shell=/bin/bash groups=admins,developers append=yes
# Remove the user 'johnd'
- user: name=johnd state=absent remove=yes
# Create a 2048-bit SSH key for user jsmith in ~jsmith/.ssh/id_rsa
- user: name=jsmith generate_ssh_key=yes ssh_key_bits=2048 ssh_key_file=.ssh/id_rsa
# added a consultant whose account you want to expire
- user: name=james18 shell=/bin/zsh groups=developers expires=1422403387
'''
import os
import pwd
import grp
import syslog
import platform
import socket
import time
try:
import spwd
HAVE_SPWD=True
except:
HAVE_SPWD=False
class User(object):
"""
This is a generic User manipulation class that is subclassed
based on platform.
A subclass may wish to override the following action methods:-
- create_user()
- remove_user()
- modify_user()
- ssh_key_gen()
- ssh_key_fingerprint()
- user_exists()
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
SHADOWFILE = '/etc/shadow'
DATE_FORMAT = '%Y-%m-%d'
def __new__(cls, *args, **kwargs):
return load_platform_subclass(User, args, kwargs)
def __init__(self, module):
self.module = module
self.state = module.params['state']
self.name = module.params['name']
self.uid = module.params['uid']
self.non_unique = module.params['non_unique']
self.group = module.params['group']
self.groups = module.params['groups']
self.comment = module.params['comment']
self.home = module.params['home']
self.shell = module.params['shell']
self.password = module.params['password']
self.force = module.params['force']
self.remove = module.params['remove']
self.createhome = module.params['createhome']
self.move_home = module.params['move_home']
self.system = module.params['system']
self.login_class = module.params['login_class']
self.append = module.params['append']
self.sshkeygen = module.params['generate_ssh_key']
self.ssh_bits = module.params['ssh_key_bits']
self.ssh_type = module.params['ssh_key_type']
self.ssh_comment = module.params['ssh_key_comment']
self.ssh_passphrase = module.params['ssh_key_passphrase']
self.update_password = module.params['update_password']
self.expires = None
if module.params['expires']:
try:
self.expires = time.gmtime(module.params['expires'])
except Exception,e:
module.fail_json("Invalid expires time %s: %s" %(self.expires, str(e)))
if module.params['ssh_key_file'] is not None:
self.ssh_file = module.params['ssh_key_file']
else:
self.ssh_file = os.path.join('.ssh', 'id_%s' % self.ssh_type)
# select whether we dump additional debug info through syslog
self.syslogging = False
def execute_command(self, cmd, use_unsafe_shell=False, data=None):
if self.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd))
return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.force:
cmd.append('-f')
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user_useradd(self, command_name='useradd'):
cmd = [self.module.get_bin_path(command_name, True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
elif self.group_exists(self.name):
# use the -N option (no user group) if a group already
# exists with the same name as the user to prevent
# errors from useradd trying to create a group when
# USERGROUPS_ENAB is set in /etc/login.defs.
if os.path.exists('/etc/redhat-release'):
dist = platform.dist()
major_release = int(dist[1].split('.')[0])
if major_release <= 5:
cmd.append('-n')
else:
cmd.append('-N')
else:
cmd.append('-N')
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.expires:
cmd.append('--expiredate')
cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.createhome:
cmd.append('-m')
else:
cmd.append('-M')
if self.system:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def _check_usermod_append(self):
# check if this version of usermod can append groups
usermod_path = self.module.get_bin_path('usermod', True)
# for some reason, usermod --help cannot be used by non root
# on RH/Fedora, due to lack of execute bit for others
if not os.access(usermod_path, os.X_OK):
return False
cmd = [usermod_path]
cmd.append('--help')
rc, data1, data2 = self.execute_command(cmd)
helpout = data1 + data2
# check if --append exists
lines = helpout.split('\n')
for line in lines:
if line.strip().startswith('-a, --append'):
return True
return False
def modify_user_usermod(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
has_append = self._check_usermod_append()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set(remove_existing=False)
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
if has_append:
cmd.append('-a')
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
if self.append and not has_append:
cmd.append('-A')
cmd.append(','.join(group_diff))
else:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
cmd.append('-d')
cmd.append(self.home)
if self.move_home:
cmd.append('-m')
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.expires:
cmd.append('--expiredate')
cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
elif self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
def group_exists(self,group):
try:
# Try group as a gid first
grp.getgrgid(int(group))
return True
except (ValueError, KeyError):
try:
grp.getgrnam(group)
return True
except KeyError:
return False
def group_info(self, group):
if not self.group_exists(group):
return False
try:
# Try group as a gid first
return list(grp.getgrgid(int(group)))
except (ValueError, KeyError):
return list(grp.getgrnam(group))
def get_groups_set(self, remove_existing=True):
if self.groups is None:
return None
info = self.user_info()
groups = set(filter(None, self.groups.split(',')))
for g in set(groups):
if not self.group_exists(g):
self.module.fail_json(msg="Group %s does not exist" % (g))
if info and remove_existing and self.group_info(g)[2] == info[3]:
groups.remove(g)
return groups
def user_group_membership(self):
groups = []
info = self.get_pwd_info()
for group in grp.getgrall():
if self.name in group.gr_mem and not info[3] == group.gr_gid:
groups.append(group[0])
return groups
def user_exists(self):
try:
if pwd.getpwnam(self.name):
return True
except KeyError:
return False
def get_pwd_info(self):
if not self.user_exists():
return False
return list(pwd.getpwnam(self.name))
def user_info(self):
if not self.user_exists():
return False
info = self.get_pwd_info()
if len(info[1]) == 1 or len(info[1]) == 0:
info[1] = self.user_password()
return info
def user_password(self):
passwd = ''
if HAVE_SPWD:
try:
passwd = spwd.getspnam(self.name)[1]
except KeyError:
return passwd
if not self.user_exists():
return passwd
elif self.SHADOWFILE:
# Read shadow file for user's encrypted password string
if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
for line in open(self.SHADOWFILE).readlines():
if line.startswith('%s:' % self.name):
passwd = line.split(':')[1]
return passwd
def get_ssh_key_path(self):
info = self.user_info()
if os.path.isabs(self.ssh_file):
ssh_key_file = self.ssh_file
else:
ssh_key_file = os.path.join(info[5], self.ssh_file)
return ssh_key_file
def ssh_key_gen(self):
info = self.user_info()
if not os.path.exists(info[5]):
return (1, '', 'User %s home directory does not exist' % self.name)
ssh_key_file = self.get_ssh_key_path()
ssh_dir = os.path.dirname(ssh_key_file)
if not os.path.exists(ssh_dir):
try:
os.mkdir(ssh_dir, 0700)
os.chown(ssh_dir, info[2], info[3])
except OSError, e:
return (1, '', 'Failed to create %s: %s' % (ssh_dir, str(e)))
if os.path.exists(ssh_key_file):
return (None, 'Key already exists', '')
cmd = [self.module.get_bin_path('ssh-keygen', True)]
cmd.append('-t')
cmd.append(self.ssh_type)
cmd.append('-b')
cmd.append(self.ssh_bits)
cmd.append('-C')
cmd.append(self.ssh_comment)
cmd.append('-f')
cmd.append(ssh_key_file)
cmd.append('-N')
if self.ssh_passphrase is not None:
cmd.append(self.ssh_passphrase)
else:
cmd.append('')
(rc, out, err) = self.execute_command(cmd)
if rc == 0:
# If the keys were successfully created, we should be able
# to tweak ownership.
os.chown(ssh_key_file, info[2], info[3])
os.chown('%s.pub' % ssh_key_file, info[2], info[3])
return (rc, out, err)
def ssh_key_fingerprint(self):
ssh_key_file = self.get_ssh_key_path()
if not os.path.exists(ssh_key_file):
return (1, 'SSH Key file %s does not exist' % ssh_key_file, '')
cmd = [ self.module.get_bin_path('ssh-keygen', True) ]
cmd.append('-l')
cmd.append('-f')
cmd.append(ssh_key_file)
return self.execute_command(cmd)
def get_ssh_public_key(self):
ssh_public_key_file = '%s.pub' % self.get_ssh_key_path()
try:
f = open(ssh_public_key_file)
ssh_public_key = f.read().strip()
f.close()
except IOError:
return None
return ssh_public_key
def create_user(self):
# by default we use the create_user_useradd method
return self.create_user_useradd()
def remove_user(self):
# by default we use the remove_user_userdel method
return self.remove_user_userdel()
def modify_user(self):
# by default we use the modify_user_usermod method
return self.modify_user_usermod()
def create_homedir(self, path):
if not os.path.exists(path):
# use /etc/skel if possible
if os.path.exists('/etc/skel'):
try:
shutil.copytree('/etc/skel', path, symlinks=True)
except OSError, e:
self.module.exit_json(failed=True, msg="%s" % e)
else:
try:
os.makedirs(path)
except OSError, e:
self.module.exit_json(failed=True, msg="%s" % e)
def chown_homedir(self, uid, gid, path):
try:
os.chown(path, uid, gid)
for root, dirs, files in os.walk(path):
for d in dirs:
os.chown(path, uid, gid)
for f in files:
os.chown(os.path.join(root, f), uid, gid)
except OSError, e:
self.module.exit_json(failed=True, msg="%s" % e)
# ===========================================
class FreeBsdUser(User):
"""
This is a FreeBSD User manipulation class - it uses the pw command
to manipulate the user database, followed by the chpass command
to change the password.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'FreeBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def remove_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'userdel',
'-n',
self.name
]
if self.remove:
cmd.append('-r')
return self.execute_command(cmd)
def create_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'useradd',
'-n',
self.name,
]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.createhome:
cmd.append('-m')
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.expires:
days =( time.mktime(self.expires) - time.time() ) / 86400
cmd.append('-e')
cmd.append(str(int(days)))
# system cannot be handled currently - should we error if its requested?
# create the user
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
# we have to set the password in a second command
if self.password is not None:
cmd = [
self.module.get_bin_path('chpass', True),
'-p',
self.password,
self.name
]
return self.execute_command(cmd)
return (rc, out, err)
def modify_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'usermod',
'-n',
self.name
]
cmd_len = len(cmd)
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
# find current login class
user_login_class = None
if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
for line in open(self.SHADOWFILE).readlines():
if line.startswith('%s:' % self.name):
user_login_class = line.split(':')[4]
# act only if login_class change
if self.login_class != user_login_class:
cmd.append('-L')
cmd.append(self.login_class)
if self.groups is not None:
current_groups = self.user_group_membership()
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
groups_need_mod = False
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
new_groups = groups
if self.append:
new_groups = groups | set(current_groups)
cmd.append(','.join(new_groups))
if self.expires:
days = ( time.mktime(self.expires) - time.time() ) / 86400
cmd.append('-e')
cmd.append(str(int(days)))
# modify the user if cmd will do anything
if cmd_len != len(cmd):
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
else:
(rc, out, err) = (None, '', '')
# we have to set the password in a second command
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd = [
self.module.get_bin_path('chpass', True),
'-p',
self.password,
self.name
]
return self.execute_command(cmd)
return (rc, out, err)
# ===========================================
class OpenBSDUser(User):
"""
This is a OpenBSD User manipulation class.
Main differences are that OpenBSD:-
- has no concept of "system" account.
- has no force delete user
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'OpenBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.createhome:
cmd.append('-m')
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups_option = '-G'
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_option = '-S'
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append(groups_option)
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
# find current login class
user_login_class = None
userinfo_cmd = [self.module.get_bin_path('userinfo', True), self.name]
(rc, out, err) = self.execute_command(userinfo_cmd)
for line in out.splitlines():
tokens = line.split()
if tokens[0] == 'class' and len(tokens) == 2:
user_login_class = tokens[1]
# act only if login_class change
if self.login_class != user_login_class:
cmd.append('-L')
cmd.append(self.login_class)
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
elif self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class NetBSDUser(User):
"""
This is a NetBSD User manipulation class.
Main differences are that NetBSD:-
- has no concept of "system" account.
- has no force delete user
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'NetBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
if len(groups) > 16:
self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.createhome:
cmd.append('-m')
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups = set(current_groups).union(groups)
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
if len(groups) > 16:
self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
elif self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class SunOS(User):
"""
This is a SunOS User manipulation class - The main difference between
this class and the generic user class is that Solaris-type distros
don't support the concept of a "system" account and we need to
edit the /etc/shadow file manually to set a password. (Ugh)
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'SunOS'
distribution = None
SHADOWFILE = '/etc/shadow'
def remove_user(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.createhome:
cmd.append('-m')
cmd.append(self.name)
if self.module.check_mode:
return (0, '', '')
else:
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
# we have to set the password by editing the /etc/shadow file
if self.password is not None:
try:
lines = []
for line in open(self.SHADOWFILE, 'rb').readlines():
fields = line.strip().split(':')
if not fields[0] == self.name:
lines.append(line)
continue
fields[1] = self.password
fields[2] = str(int(time.time() / 86400))
line = ':'.join(fields)
lines.append('%s\n' % line)
open(self.SHADOWFILE, 'w+').writelines(lines)
except Exception, err:
self.module.fail_json(msg="failed to update users password: %s" % str(err))
return (rc, out, err)
def modify_user_usermod(self):
cmd = [self.module.get_bin_path('usermod', True)]
cmd_len = len(cmd)
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
groups_need_mod = False
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
new_groups = groups
if self.append:
new_groups.update(current_groups)
cmd.append(','.join(new_groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.module.check_mode:
return (0, '', '')
else:
# modify the user if cmd will do anything
if cmd_len != len(cmd):
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
else:
(rc, out, err) = (None, '', '')
# we have to set the password by editing the /etc/shadow file
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
try:
lines = []
for line in open(self.SHADOWFILE, 'rb').readlines():
fields = line.strip().split(':')
if not fields[0] == self.name:
lines.append(line)
continue
fields[1] = self.password
fields[2] = str(int(time.time() / 86400))
line = ':'.join(fields)
lines.append('%s\n' % line)
open(self.SHADOWFILE, 'w+').writelines(lines)
rc = 0
except Exception, err:
self.module.fail_json(msg="failed to update users password: %s" % str(err))
return (rc, out, err)
# ===========================================
class DarwinUser(User):
"""
This is a Darwin Mac OS X User manipulation class.
Main differences are that Darwin:-
- Handles accounts in a database managed by dscl(1)
- Has no useradd/groupadd
- Does not create home directories
- User password must be cleartext
- UID must be given
- System users must ben under 500
This overrides the following methods from the generic class:-
- user_exists()
- create_user()
- remove_user()
- modify_user()
"""
platform = 'Darwin'
distribution = None
SHADOWFILE = None
dscl_directory = '.'
fields = [
('comment', 'RealName'),
('home', 'NFSHomeDirectory'),
('shell', 'UserShell'),
('uid', 'UniqueID'),
('group', 'PrimaryGroupID'),
]
def _get_dscl(self):
return [ self.module.get_bin_path('dscl', True), self.dscl_directory ]
def _list_user_groups(self):
cmd = self._get_dscl()
cmd += [ '-search', '/Groups', 'GroupMembership', self.name ]
(rc, out, err) = self.execute_command(cmd)
groups = []
for line in out.splitlines():
if line.startswith(' ') or line.startswith(')'):
continue
groups.append(line.split()[0])
return groups
def _get_user_property(self, property):
'''Return user PROPERTY as given my dscl(1) read or None if not found.'''
cmd = self._get_dscl()
cmd += [ '-read', '/Users/%s' % self.name, property ]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
return None
# from dscl(1)
# if property contains embedded spaces, the list will instead be
# displayed one entry per line, starting on the line after the key.
lines = out.splitlines()
#sys.stderr.write('*** |%s| %s -> %s\n' % (property, out, lines))
if len(lines) == 1:
return lines[0].split(': ')[1]
else:
if len(lines) > 2:
return '\n'.join([ lines[1].strip() ] + lines[2:])
else:
if len(lines) == 2:
return lines[1].strip()
else:
return None
def _get_next_uid(self):
'''Return the next available uid'''
cmd = self._get_dscl()
cmd += ['-list', '/Users', 'UniqueID']
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg="Unable to get the next available uid",
rc=rc,
out=out,
err=err
)
max_uid = 0
for line in out.splitlines():
if max_uid < int(line.split()[1]):
max_uid = int(line.split()[1])
return max_uid + 1
def _change_user_password(self):
'''Change password for SELF.NAME against SELF.PASSWORD.
Please note that password must be cleatext.
'''
# some documentation on how is stored passwords on OSX:
# http://blog.lostpassword.com/2012/07/cracking-mac-os-x-lion-accounts-passwords/
# http://null-byte.wonderhowto.com/how-to/hack-mac-os-x-lion-passwords-0130036/
# http://pastebin.com/RYqxi7Ca
# on OSX 10.8+ hash is SALTED-SHA512-PBKDF2
# https://pythonhosted.org/passlib/lib/passlib.hash.pbkdf2_digest.html
# https://gist.github.com/nueh/8252572
cmd = self._get_dscl()
if self.password:
cmd += [ '-passwd', '/Users/%s' % self.name, self.password]
else:
cmd += [ '-create', '/Users/%s' % self.name, 'Password', '*']
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Error when changing password',
err=err, out=out, rc=rc)
return (rc, out, err)
def _make_group_numerical(self):
'''Convert SELF.GROUP to is stringed numerical value suitable for dscl.'''
if self.group is None:
self.group = 'nogroup'
try:
self.group = grp.getgrnam(self.group).gr_gid
except KeyError:
self.module.fail_json(msg='Group "%s" not found. Try to create it first using "group" module.' % self.group)
# We need to pass a string to dscl
self.group = str(self.group)
def __modify_group(self, group, action):
'''Add or remove SELF.NAME to or from GROUP depending on ACTION.
ACTION can be 'add' or 'remove' otherwhise 'remove' is assumed. '''
if action == 'add':
option = '-a'
else:
option = '-d'
cmd = [ 'dseditgroup', '-o', 'edit', option, self.name,
'-t', 'user', group ]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot %s user "%s" to group "%s".'
% (action, self.name, group),
err=err, out=out, rc=rc)
return (rc, out, err)
def _modify_group(self):
'''Add or remove SELF.NAME to or from GROUP depending on ACTION.
ACTION can be 'add' or 'remove' otherwhise 'remove' is assumed. '''
rc = 0
out = ''
err = ''
changed = False
current = set(self._list_user_groups())
if self.groups is not None:
target = set(self.groups.split(','))
else:
target = set([])
for remove in current - target:
(_rc, _err, _out) = self.__modify_group(remove, 'delete')
rc += rc
out += _out
err += _err
changed = True
for add in target - current:
(_rc, _err, _out) = self.__modify_group(add, 'add')
rc += _rc
out += _out
err += _err
changed = True
return (rc, err, out, changed)
def _update_system_user(self):
'''Hide or show user on login window according SELF.SYSTEM.
Returns 0 if a change has been made, None otherwhise.'''
plist_file = '/Library/Preferences/com.apple.loginwindow.plist'
# http://support.apple.com/kb/HT5017?viewlocale=en_US
cmd = [ 'defaults', 'read', plist_file, 'HiddenUsersList' ]
(rc, out, err) = self.execute_command(cmd)
# returned value is
# (
# "_userA",
# "_UserB",
# userc
# )
hidden_users = []
for x in out.splitlines()[1:-1]:
try:
x = x.split('"')[1]
except IndexError:
x = x.strip()
hidden_users.append(x)
if self.system:
if not self.name in hidden_users:
cmd = [ 'defaults', 'write', plist_file,
'HiddenUsersList', '-array-add', self.name ]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot user "%s" to hidden user list.'
% self.name, err=err, out=out, rc=rc)
return 0
else:
if self.name in hidden_users:
del(hidden_users[hidden_users.index(self.name)])
cmd = [ 'defaults', 'write', plist_file,
'HiddenUsersList', '-array' ] + hidden_users
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot remove user "%s" from hidden user list.'
% self.name, err=err, out=out, rc=rc)
return 0
def user_exists(self):
'''Check is SELF.NAME is a known user on the system.'''
cmd = self._get_dscl()
cmd += [ '-list', '/Users/%s' % self.name]
(rc, out, err) = self.execute_command(cmd)
return rc == 0
def remove_user(self):
'''Delete SELF.NAME. If SELF.FORCE is true, remove its home directory.'''
info = self.user_info()
cmd = self._get_dscl()
cmd += [ '-delete', '/Users/%s' % self.name]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot delete user "%s".'
% self.name, err=err, out=out, rc=rc)
if self.force:
if os.path.exists(info[5]):
shutil.rmtree(info[5])
out += "Removed %s" % info[5]
return (rc, out, err)
def create_user(self, command_name='dscl'):
cmd = self._get_dscl()
cmd += [ '-create', '/Users/%s' % self.name]
(rc, err, out) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot create user "%s".'
% self.name, err=err, out=out, rc=rc)
self._make_group_numerical()
if self.uid is None:
self.uid = str(self._get_next_uid())
# Homedir is not created by default
if self.createhome:
if self.home is None:
self.home = '/Users/%s' % self.name
if not os.path.exists(self.home):
os.makedirs(self.home)
self.chown_homedir(int(self.uid), int(self.group), self.home)
for field in self.fields:
if self.__dict__.has_key(field[0]) and self.__dict__[field[0]]:
cmd = self._get_dscl()
cmd += [ '-create', '/Users/%s' % self.name,
field[1], self.__dict__[field[0]]]
(rc, _err, _out) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot add property "%s" to user "%s".'
% (field[0], self.name), err=err, out=out, rc=rc)
out += _out
err += _err
if rc != 0:
return (rc, _err, _out)
(rc, _err, _out) = self._change_user_password()
out += _out
err += _err
self._update_system_user()
# here we don't care about change status since it is a creation,
# thus changed is always true.
(rc, _out, _err, changed) = self._modify_group()
out += _out
err += _err
return (rc, err, out)
def modify_user(self):
changed = None
out = ''
err = ''
self._make_group_numerical()
for field in self.fields:
if self.__dict__.has_key(field[0]) and self.__dict__[field[0]]:
current = self._get_user_property(field[1])
if current is None or current != self.__dict__[field[0]]:
cmd = self._get_dscl()
cmd += [ '-create', '/Users/%s' % self.name,
field[1], self.__dict__[field[0]]]
(rc, _err, _out) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot update property "%s" for user "%s".'
% (field[0], self.name), err=err, out=out, rc=rc)
changed = rc
out += _out
err += _err
if self.update_password == 'always' and self.password is not None:
(rc, _err, _out) = self._change_user_password()
out += _out
err += _err
changed = rc
(rc, _out, _err, _changed) = self._modify_group()
out += _out
err += _err
if _changed is True:
changed = rc
rc = self._update_system_user()
if rc == 0:
changed = rc
return (changed, out, err)
# ===========================================
class AIX(User):
"""
This is a AIX User manipulation class.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'AIX'
distribution = None
SHADOWFILE = '/etc/security/passwd'
def remove_user(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user_useradd(self, command_name='useradd'):
cmd = [self.module.get_bin_path(command_name, True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.createhome:
cmd.append('-m')
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
# set password with chpasswd
if self.password is not None:
cmd = []
cmd.append(self.module.get_bin_path('chpasswd', True))
cmd.append('-e')
cmd.append('-c')
self.execute_command(' '.join(cmd), data="%s:%s" % (self.name, self.password))
return (rc, out, err)
def modify_user_usermod(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
# skip if no changes to be made
if len(cmd) == 1:
(rc, out, err) = (None, '', '')
elif self.module.check_mode:
return (True, '', '')
else:
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
# set password with chpasswd
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd = []
cmd.append(self.module.get_bin_path('chpasswd', True))
cmd.append('-e')
cmd.append('-c')
(rc2, out2, err2) = self.execute_command(' '.join(cmd), data="%s:%s" % (self.name, self.password))
else:
(rc2, out2, err2) = (None, '', '')
if rc != None:
return (rc, out+out2, err+err2)
else:
return (rc2, out+out2, err+err2)
# ===========================================
class HPUX(User):
"""
This is a HP-UX User manipulation class.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'HP-UX'
distribution = None
SHADOWFILE = '/etc/shadow'
def create_user(self):
cmd = ['/usr/sam/lbin/useradd.sam']
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.createhome:
cmd.append('-m')
else:
cmd.append('-M')
if self.system:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user(self):
cmd = ['/usr/sam/lbin/userdel.sam']
if self.force:
cmd.append('-F')
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = ['/usr/sam/lbin/usermod.sam']
info = self.user_info()
has_append = self._check_usermod_append()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set(remove_existing=False)
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
if has_append:
cmd.append('-a')
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
if self.append and not has_append:
cmd.append('-A')
cmd.append(','.join(group_diff))
else:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
cmd.append('-d')
cmd.append(self.home)
if self.move_home:
cmd.append('-m')
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
elif self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
def main():
ssh_defaults = {
'bits': '2048',
'type': 'rsa',
'passphrase': None,
'comment': 'ansible-generated on %s' % socket.gethostname()
}
module = AnsibleModule(
argument_spec = dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, aliases=['user'], type='str'),
uid=dict(default=None, type='str'),
non_unique=dict(default='no', type='bool'),
group=dict(default=None, type='str'),
groups=dict(default=None, type='str'),
comment=dict(default=None, type='str'),
home=dict(default=None, type='str'),
shell=dict(default=None, type='str'),
password=dict(default=None, type='str'),
login_class=dict(default=None, type='str'),
# following options are specific to userdel
force=dict(default='no', type='bool'),
remove=dict(default='no', type='bool'),
# following options are specific to useradd
createhome=dict(default='yes', type='bool'),
system=dict(default='no', type='bool'),
# following options are specific to usermod
move_home=dict(default='no', type='bool'),
append=dict(default='no', type='bool'),
# following are specific to ssh key generation
generate_ssh_key=dict(type='bool'),
ssh_key_bits=dict(default=ssh_defaults['bits'], type='str'),
ssh_key_type=dict(default=ssh_defaults['type'], type='str'),
ssh_key_file=dict(default=None, type='str'),
ssh_key_comment=dict(default=ssh_defaults['comment'], type='str'),
ssh_key_passphrase=dict(default=None, type='str'),
update_password=dict(default='always',choices=['always','on_create'],type='str'),
expires=dict(default=None, type='float'),
),
supports_check_mode=True
)
user = User(module)
if user.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'User instantiated - platform %s' % user.platform)
if user.distribution:
syslog.syslog(syslog.LOG_NOTICE, 'User instantiated - distribution %s' % user.distribution)
rc = None
out = ''
err = ''
result = {}
result['name'] = user.name
result['state'] = user.state
if user.state == 'absent':
if user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.remove_user()
if rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
result['force'] = user.force
result['remove'] = user.remove
elif user.state == 'present':
if not user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.create_user()
result['system'] = user.system
result['createhome'] = user.createhome
else:
# modify user (note: this function is check mode aware)
(rc, out, err) = user.modify_user()
result['append'] = user.append
result['move_home'] = user.move_home
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if user.password is not None:
result['password'] = 'NOT_LOGGING_PASSWORD'
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
if user.user_exists():
info = user.user_info()
if info == False:
result['msg'] = "failed to look up user name: %s" % user.name
result['failed'] = True
result['uid'] = info[2]
result['group'] = info[3]
result['comment'] = info[4]
result['home'] = info[5]
result['shell'] = info[6]
result['uid'] = info[2]
if user.groups is not None:
result['groups'] = user.groups
# handle missing homedirs
info = user.user_info()
if user.home is None:
user.home = info[5]
if not os.path.exists(user.home) and user.createhome:
if not module.check_mode:
user.create_homedir(user.home)
user.chown_homedir(info[2], info[3], user.home)
result['changed'] = True
# deal with ssh key
if user.sshkeygen:
(rc, out, err) = user.ssh_key_gen()
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if rc == 0:
result['changed'] = True
(rc, out, err) = user.ssh_key_fingerprint()
if rc == 0:
result['ssh_fingerprint'] = out.strip()
else:
result['ssh_fingerprint'] = err.strip()
result['ssh_key_file'] = user.get_ssh_key_path()
result['ssh_public_key'] = user.get_ssh_public_key()
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
kd0aij/matrixpilot_old | Tools/MAVLink/MAVProxy/modules/antenna.py | 1 | 2346 | #!/usr/bin/env python
'''
antenna pointing module
Andrew Tridgell
June 2012
'''
import sys, os, time
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'cuav', 'lib'))
import cuav_util
mpstate = None
class module_state(object):
def __init__(self):
self.gcs_location = None
self.last_bearing = 0
self.last_announce = 0
def name():
'''return module name'''
return "antenna"
def description():
'''return module description'''
return "antenna pointing module"
def cmd_antenna(args):
'''set gcs location'''
state = mpstate.antenna_state
usage = "antenna lat lon"
if len(args) != 2:
if state.gcs_location is None:
print("GCS location not set")
else:
print("GCS location %s" % str(state.gcs_location))
return
state.gcs_location = (float(args[0]), float(args[1]))
def init(_mpstate):
'''initialise module'''
global mpstate
mpstate = _mpstate
mpstate.antenna_state = module_state()
mpstate.command_map['antenna'] = (cmd_antenna, "antenna link control")
def unload():
'''unload module'''
pass
def mavlink_packet(m):
'''handle an incoming mavlink packet'''
state = mpstate.antenna_state
if state.gcs_location is None and mpstate.status.wploader.count() > 0:
home = mpstate.status.wploader.wp(0)
mpstate.antenna_state.gcs_location = (home.x, home.y)
print("Antenna home set")
if state.gcs_location is None:
return
if m.get_type() == 'GPS_RAW' and state.gcs_location is not None:
(gcs_lat, gcs_lon) = state.gcs_location
bearing = cuav_util.gps_bearing(gcs_lat, gcs_lon, m.lat, m.lon)
elif m.get_type() == 'GPS_RAW_INT' and state.gcs_location is not None:
(gcs_lat, gcs_lon) = state.gcs_location
bearing = cuav_util.gps_bearing(gcs_lat, gcs_lon, m.lat/1.0e7, m.lon/1.0e7)
else:
return
mpstate.console.set_status('Antenna', 'Antenna %.0f' % bearing, row=0)
if abs(bearing - state.last_bearing) > 5 and (time.time() - state.last_announce) > 15:
state.last_bearing = bearing
state.last_announce = time.time()
mpstate.functions.say("Antenna %u" % int(bearing+0.5))
| gpl-3.0 |
shastah/spacewalk | client/rhel/rhn-client-tools/src/up2date_client/rhnregGui.py | 5 | 56683 | #
# Copyright (c) 1999--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the
# OpenSSL library under certain conditions as described in each
# individual source file, and distribute linked combinations
# including the two.
# You must obey the GNU General Public License in all respects
# for all of the code used other than OpenSSL. If you modify
# file(s) with this exception, you may extend this exception to your
# version of the file(s), but you are not obligated to do so. If you
# do not wish to do so, delete this exception statement from your
# version. If you delete this exception statement from all source
# files in the program, then also delete it here.
# this is a module containing classes for the registration related windows in
# gui.py. The code is split up so we can reuse it in the firstboot modules
"""
Explanation of the RHN registration gui and how it is used from both
rhn_register and firstboot (from alikins):
Most of the "work" happens in rhnregGui.py. Thats where the
logic for the screens is.
gui.py has Gui which is the big monster class (using druid) that makes up the
main gui wizard for up2date/rhn_register. Gui implements showing the pages for
up2date/rhn_register. For up2date/rhnreg, it has methods that load the classes
from rhnregGui (by multiple inheritance...), but it's not too bad, it's all
mixin stuff, nothing wacky, no overridden methods or anything.
firstboot/* does more or less the same thing, but with a different style of
wrapper just to present the firstboot style api's. (Each "page" in firstboot is
a module with a class that inherits FirstBootGuiWindow.)
"""
import sys
import os
import gettext
t = gettext.translation('rhn-client-tools', fallback=True)
# Python 3 translations don't have a ugettext method
if not hasattr(t, 'ugettext'):
t.ugettext = t.gettext
_ = t.ugettext
from up2date_client import rhnreg
from up2date_client.rhnreg import ActivationResult
from up2date_client import up2dateErrors
from up2date_client import hardware
from up2date_client import messageWindow
from up2date_client import progress
from up2date_client import pkgUtils
from up2date_client import up2dateAuth
from up2date_client import up2dateUtils
from up2date_client import config
import OpenSSL
from up2date_client import up2dateLog
from rhn import rpclib
from rhn.connections import idn_puny_to_unicode
from up2date_client import rhnreg_constants
from up2date_client.pmPlugin import PM_PLUGIN_NAME, PM_PLUGIN_CONF
from up2date_client.gtk_compat import gtk, gobject
try: # python2
import urlparse
except ImportError: # python3
import urllib.parse as urlparse
cfg = config.initUp2dateConfig()
log = up2dateLog.initLog()
gladefile = "/usr/share/rhn/up2date_client/rh_register.glade"
# we need to carry these values between screen, so stash at module scope
username = None
password = None
productInfo = None
hw_activation_code = None
serverType = None
chosen_channel = None
# _hasBaseChannelAndUpdates gets set by the code in create profile which
# registers the system and used by hasBaseChannelAndUpdates()
_hasBaseChannelAndUpdates = False
_autoActivatedNumbers = False # used by autoActivateNumbersOnce()
class ReviewLog:
def __init__(self):
self._text = gtk.TextBuffer()
self._boldTag = self._text.create_tag(weight=700)
def prependBoldText(self, text):
"""Adds a blob of bolded text to the beggining specified section. Adds a newline
after the text.
"""
self.prependText(text)
# Make it bold
startOfText = self._text.get_start_iter()
endOfText = self._text.get_start_iter()
endOfText.forward_chars(len(text) +1 )
self._text.apply_tag(self._boldTag, startOfText, endOfText)
def addBoldText(self, text):
"""Adds a blob of bolded text to the specified section. Adds a newline
after the text.
"""
self.addText(text)
# Make it bold
startOfText = self._text.get_end_iter()
startOfText.backward_chars(len(text) +1 )
end = self._text.get_end_iter()
self._text.apply_tag(self._boldTag, startOfText, end)
def prependText(self, text):
""" Insert a blob of text at the beggining of section. Adds a newline
after the text.
"""
start = self._text.get_start_iter()
self._text.insert(start, text + '\n')
def addText(self, text):
"""Adds a blob of text to the specified section. Adds a newline after
the text.
"""
end = self._text.get_end_iter()
self._text.insert(end, text + '\n')
def addBulletedText(self, text):
self.addText(u'\u2022' + ' ' + text)
def getTextBuffer(self):
return self._text
def usedUniversalActivationKey(self, keyName):
self.addBoldText(_("Notice"))
keys = ', '.join(keyName)
self.addText(rhnreg_constants.ACTIVATION_KEY % (keys))
self.addText('') # adds newline
def pm_plugin_warning(self):
""" Add to review screen warning that plugin is not installed """
# prepending -> reverse order
self.prependText('') # adds newline
self.prependText(rhnreg_constants.PM_PLUGIN_WARNING)
self.prependBoldText(_("Warning"))
def pm_plugin_conf_changed(self):
""" Add to review screen warning that plugin config file has been changed """
# prepending -> reverse order
self.prependText('') # adds newline
self.prependText(rhnreg_constants.PM_PLUGIN_CONF_CHANGED)
self.prependBoldText(_("Notice"))
def pm_plugin_conf_error(self):
""" Add to review screen warning that plugin config file can not be open """
# prepending -> reverse order
self.prependText('') # adds newline
self.prependText(rhnreg_constants.PM_PLUGIN_CONF_ERROR)
self.prependBoldText(_("Warning"))
def channels(self, subscribedChannels, failedChannels):
self.addBoldText(rhnreg_constants.CHANNELS_TITLE)
if len(subscribedChannels) > 0:
self.addText(rhnreg_constants.OK_CHANNELS)
for channel in subscribedChannels:
self.addBulletedText(channel)
self.addText(rhnreg_constants.CHANNELS_SAT_WARNING)
else:
self.addText(rhnreg_constants.NO_BASE_CHANNEL)
if len(failedChannels) > 0:
self.addText(rhnreg_constants.FAILED_CHANNELS)
for channel in failedChannels:
self.addBulletedText(channel)
self.addText('') # adds newline
def systemSlots(self, slots, failedSlots):
self.addBoldText(rhnreg_constants.SLOTS_TITLE)
self.addText(rhnreg_constants.OK_SLOTS)
if len(slots) > 0:
for slot in slots:
self.addBulletedText(slot)
else:
self.addText(rhnreg_constants.NO_SYS_ENTITLEMENT)
if len(failedSlots) > 0:
self.addText(rhnreg_constants.FAILED_SLOTS)
for slot in failedSlots:
self.addBulletedText(slot)
self.addText('') # adds newline
reviewLog = ReviewLog()
class StartPage:
"""There is a section of this page which asks if the user wants to register,
which will only be shown in firstboot. This is specified by the arg to the
constructor.
"""
def __init__(self, firstboot=False):
self.startXml = gtk.glade.XML(gladefile, "startWindowVbox",
domain="rhn-client-tools")
self.startXml.signal_autoconnect({
"onWhyRegisterButtonClicked" : self.startPageWhyRegisterButton,
"onMoreInfoButtonClicked" : self.startPageMoreInfoButton,
})
self.registerNowButton = self.startXml.get_widget("registerNowButton")
if not firstboot:
startWindowVbox = self.startXml.get_widget("startWindowVbox")
chooseToRegisterVbox = self.startXml.get_widget('chooseToRegisterVbox')
startWindowVbox.remove(chooseToRegisterVbox)
def startPageVbox(self):
return self.startXml.get_widget("startWindowVbox")
def startPageWhyRegisterButton(self, button):
WhyRegisterDialog()
def startPageMoreInfoButton(self, button):
MoreInfoDialog()
def startPageRegisterNow(self):
"""Returns True if the user has selected to register now. False if
they've selected to register later.
"""
return self.registerNowButton.get_active()
class ChooseServerPage:
def __init__(self):
self.chooseServerXml = gtk.glade.XML(gladefile,
"chooseServerWindowVbox",
domain="rhn-client-tools")
self.chooseServerXml.signal_autoconnect ({
"onAdvancedNetworkConfigurationButtonClicked" : self.showNetworkConfigDialog
})
self.customServerEntry = self.chooseServerXml.get_widget('satelliteServerEntry')
self.customServerBox = self.chooseServerXml.get_widget('customServerTable')
def chooseServerPagePrepare(self):
self.server = config.getServerlURL()[0]
log.log_debug("server is %s" % self.server)
self.customServerEntry.set_text(self.server)
def chooseServerPageVbox(self):
return self.chooseServerXml.get_widget("chooseServerWindowVbox")
def showNetworkConfigDialog(self, button):
NetworkConfigDialog()
def chooseServerPageApply(self):
"""Returns True if an error happened so we shouldn't advance to the next
screen, but it was already dealt with. False if everything is peachy.
Can raise an SSLCertificateVerifyFailedError.
"""
status = callAndFilterExceptions(
self._chooseServerPageApply,
[up2dateErrors.SSLCertificateVerifyFailedError, up2dateErrors.SSLCertificateFileNotFound],
_("There was an error while applying your choice.")
)
if status is False:
return False
else:
return True
def _chooseServerPageApply(self):
"""Returns True if an error happened so we shouldn't advance to the next
screen, but it was already dealt with. False if everything is peachy.
Can probably raise all sorts of exceptions, but I wish it only raised
SSLCertificateVerifyFailedError.
"""
global serverType
up2dateConfig = config.initUp2dateConfig()
customServer = self.customServerEntry.get_text()
try:
customServer = rhnreg.makeNiceServerUrl(customServer)
except up2dateErrors.InvalidProtocolError:
errorWindow(_('You specified an invalid protocol. Only '
'https and http are allowed.'))
return True
# If they changed the value, write it back to the config file.
if customServer != self.server:
config.setServerURL(customServer)
self.server = customServer
if not cfg['sslCACert']:
up2dateConfig.set('sslCACert',
'/usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT')
serverType = rhnreg.getServerType()
NEED_SERVER_MESSAGE = _("You will not be able to successfully register "
"this system without contacting a Spacewalk server.")
# Try to contact the server to see if we have a good cert
try:
setBusyCursor()
# get the caps info before we show the activastion page which needs the
# caps. _but_ we need to do this after we configure the network...
rhnreg.getCaps()
setArrowCursor()
except up2dateErrors.SSLCertificateVerifyFailedError:
setArrowCursor()
raise
except up2dateErrors.SSLCertificateFileNotFound:
setArrowCursor()
raise
except up2dateErrors.CommunicationError:
setArrowCursor()
log.log_exception(*sys.exc_info())
protocol, host, path, parameters, query, fragmentIdentifier = urlparse.urlparse(config.getServerlURL()[0])
dialog = messageWindow.BulletedOkDialog(_("Cannot contact selected server"))
dialog.add_text(_("We could not contact the Satellite or Proxy "
"at '%s.'") % host)
dialog.add_bullet(_("Double-check the location - is '%s' "
"correct? If not, you can correct it and "
"try again.") % host)
dialog.add_bullet(_("Make sure the network connection on this "
"system is operational."))
dialog.add_text(NEED_SERVER_MESSAGE)
dialog.run()
return True
except up2dateErrors.RhnServerException:
setArrowCursor()
log.log_exception(*sys.exc_info())
dialog = messageWindow.BulletedOkDialog()
dialog.add_text(_("There was an error communicating with Spacewalk server."))
dialog.add_bullet(_("The server may be in outage mode. You may have to try "
"connecting later."))
dialog.add_bullet(_("You may be running a client that is incompatible with "
"the server."))
dialog.add_text(NEED_SERVER_MESSAGE)
dialog.run()
return True
return False
class LoginPage:
def __init__(self):
# Derived classes must implement a function called goToPageAfterLogin
# which the create account dialog will use.
assert hasattr(self, "goToPageAfterLogin"), \
"LoginPage must be derived from, by a class that implements goToPageAfterLogin."
self.loginXml = gtk.glade.XML(gladefile,
"initialLoginWindowVbox", domain="rhn-client-tools")
self.loginXml.signal_autoconnect ({
"onLoginUserEntryActivate" : self.loginPageAccountInfoActivate,
"onLoginPasswordEntryActivate" : self.loginPageAccountInfoActivate,
})
instructionsLabel = self.loginXml.get_widget('instructionsLabel')
self.loginPageHostedLabelText = instructionsLabel.get_label()
def loginPagePrepare(self):
"""Changes the screen slightly for satellite.
"""
assert serverType in ['satellite']
instructionsLabel = self.loginXml.get_widget('instructionsLabel')
tipIconSatellite = self.loginXml.get_widget('tipIconSatellite')
server = config.getServerlURL()[0]
protocol, host, path, parameters, query, fragmentIdentifier = urlparse.urlparse(server)
satelliteText = _("Please enter your account information for the <b>%s</b> Spacewalk server:") % ("\n" + host)
instructionsLabel.set_label(satelliteText)
#forgotInfoSatellite.show()
#tipIconSatellite.show()
def loginPageVbox(self):
return self.loginXml.get_widget("initialLoginWindowVbox")
def loginPageAccountInfoActivate(self, entry):
"""Handles activation (hitting enter) in the username and password fields.
If a password was entered or the focus is already in the password field,
tries to advance the screen if possible. If focus in elsewhere and
nothing is in the password field, puts the focus in there.
"""
passwordEntry = self.loginXml.get_widget("loginPasswordEntry")
if entry == passwordEntry or len(passwordEntry.get_text()) > 0:
# Automatically advance on enter if possible
if hasattr(self, "onLoginPageNext"):
self.onLoginPageNext(None, None)
else:
passwordEntry.grab_focus()
def loginPageVerify(self):
"""Returns True if there's an error with the user input, False
otherwise.
"""
self.loginPw = self.loginXml.get_widget("loginPasswordEntry")
self.loginUname = self.loginXml.get_widget("loginUserEntry")
global username, password
username = self.loginUname.get_text()
password = self.loginPw.get_text()
# validate / check user name
if self.loginUname.get_text() == "":
# we assume someone else creates this method...
setArrowCursor()
errorWindow(_("You must enter a login."))
self.loginUname.grab_focus()
return True
if self.loginPw.get_text() == "":
setArrowCursor()
errorWindow(_("You must enter a password."))
self.loginPw.grab_focus()
return True
return False
def loginPageApply(self):
"""Returns True if an error happened (the user will have gotten an error
message) or False if everything was ok.
"""
status = callAndFilterExceptions(
self._loginPageApply,
[],
_("There was an error while logging in.")
)
if status is False:
return False
else:
return True
def _loginPageApply(self):
"""Returns False if everything's ok, True if there was a problem."""
try:
setBusyCursor()
self.alreadyRegistered = 1
self.alreadyRegistered = rhnreg.reserveUser(self.loginUname.get_text(),
self.loginPw.get_text())
except up2dateErrors.ValidationError:
e = sys.exc_info()[1]
setArrowCursor()
self.alreadyRegistered = 0
log.log_me("An exception was raised causing login to fail. This is "
"usually correct. Exception information:")
log.log_exception(*sys.exc_info())
errorWindow(e.errmsg)
return True
except up2dateErrors.CommunicationError:
e = sys.exc_info()[1]
setArrowCursor()
print(e.errmsg)
self.fatalError(_("There was an error communicating with the registration server. The message was:\n") + e.errmsg)
return True # fatalError in firstboot will return to here
setArrowCursor()
return False
class ReviewSubscriptionPage:
def __init__(self):
self.reviewSubscriptionXml = gtk.glade.XML(gladefile,
"reviewSubscriptionWindowVbox",
domain="rhn-client-tools")
self.reviewTextView = \
self.reviewSubscriptionXml.get_widget("reviewTextView")
def reviewSubscriptionPagePrepare(self):
self.reviewTextView.set_buffer(reviewLog.getTextBuffer())
def reviewSubscriptionPageVbox(self):
return self.reviewSubscriptionXml.get_widget("reviewSubscriptionWindowVbox")
class ConfirmAllUpdatesDialog:
def __init__(self):
self.xml = gtk.glade.XML(gladefile, "confirmAllUpdatesDialog",
domain="rhn-client-tools")
self.dialog = self.xml.get_widget("confirmAllUpdatesDialog")
self.rc = self.dialog.run()
if self.rc != 1:
self.rc = 0
self.dialog.destroy()
class ChooseChannelPage:
def __init__(self):
self.chooseChannelXml = gtk.glade.XML(gladefile,
"chooseChannelWindowVbox",
domain = "rhn-client-tools")
self.chooseChannelList = self.chooseChannelXml.get_widget("chooseChannelList")
self.chooseChannelList.appears_as_list = True
self.limited_updates_button = self.chooseChannelXml.get_widget("limited_updates_button")
self.all_updates_button = self.chooseChannelXml.get_widget("all_updates_button")
self.chose_all_updates = False
self.chose_default_channel = True
def chooseChannelPageVbox(self):
return self.chooseChannelXml.get_widget("chooseChannelWindowVbox")
def channel_changed_cb(self, combobox):
self.limited_updates_button.set_active(True)
def chooseChannelPagePrepare(self):
global username, password
# The self.eus_channels was populated in chooseChannelShouldBeShown
self.channels = self.eus_channels['channels']
self.receiving_updates = self.eus_channels['receiving_updates']
list_entry = gtk.ListStore(gobject.TYPE_STRING)
self.chooseChannelList.set_model(list_entry)
cell = gtk.CellRendererText()
self.chooseChannelList.pack_start(cell, False)
self.chooseChannelList.connect('changed', self.channel_changed_cb)
if hasattr(self.chooseChannelList, 'remove_text'):
self.chooseChannelList.remove_text(0)
else:
self.chooseChannelList.remove(0)
for label, name in self.channels.items():
if label in self.receiving_updates:
self.channels[label] = name + ' *'
channel_values = list(self.channels.values())
channel_values.sort()
for name in channel_values:
self.chooseChannelList.append_text(name)
self.chooseChannelList.set_active(0)
self.all_updates_button.set_active(True)
setArrowCursor()
def chooseChannelPageApply(self):
if self.limited_updates_button.get_active():
global chosen_channel
self.chose_all_updates = False
# Save the label of the chosen channel
for key, value in self.channels.items():
if value == self.chooseChannelList.get_active_text():
chosen_channel = key
if chosen_channel != self.eus_channels['default_channel']:
self.chose_default_channel = False
else:
self.chose_default_channel = True
return True
else:
self.chose_all_updates = True
def chooseChannelShouldBeShown(self):
'''
Returns True if the choose channel window should be shown, else
returns False.
'''
# does the server support eus?
if rhnreg.server_supports_eus():
global username, password
self.eus_channels = rhnreg.getAvailableChannels(username, password)
if len(self.eus_channels['channels']) > 0:
return True
else:
return False
class CreateProfilePage:
def __init__(self):
self.createProfileXml = gtk.glade.XML(gladefile,
"createProfileWindowVbox",
domain="rhn-client-tools")
self.createProfileXml.signal_autoconnect({
"onViewHardwareButtonClicked" : self.createProfilePageShowHardwareDialog,
"onViewPackageListButtonClicked" : self.createProfilePageShowPackageDialog
})
self.initProfile = None # TODO Is this still needed?
self.activationNoPackages = None # used by fb
self.noChannels = None # used by fb
self.serviceNotEnabled = None # used by fb
def createProfilePagePrepare(self):
callAndFilterExceptions(
self._createProfilePagePrepare,
[],
_("There was an error while assembling information for the profile.")
)
def _createProfilePagePrepare(self):
# There was a comment by these calls that said "part of fix for #144704"
# I don't understand how the code fixed that bug. It might be that
# they had originally been run at screen initialization which would
# break stuff and it was changed to only run them when the user got
# to this screen.
self.getHardware()
self.populateProfile()
def createProfilePageVbox(self):
return self.createProfileXml.get_widget("createProfileWindowVbox")
# we cant do this on module load because there might be a valid interface
# but zero connectivity
def getHardware(self):
try:
self.hardware = hardware.Hardware()
except:
print(_("Error running hardware profile"))
def populateProfile(self):
try:
if not self.initProfile:
profileName = None
hostname = None
ipaddr = None
ip6addr = None
if self.hardware:
for hw in self.hardware:
if 'class' in hw:
if hw['class'] == 'NETINFO':
hostname = hw.get('hostname')
ipaddr = hw.get('ipaddr')
ip6addr = hw.get('ip6addr')
# the check against "unknown" is a bit lame, but it's
# the minimal change to fix #144704
if hostname and (hostname != "unknown"):
profileName = hostname
elif ipaddr:
profileName = ipaddr
elif ip6addr:
profileName = ip6addr
if profileName:
self.createProfileXml.get_widget("systemNameEntry").set_text(profileName)
else:
profileName = "unknown"
self.initProfile = True
except:
unexpectedError(_("There was an error while populating the profile."), sys.exc_info())
setArrowCursor()
def createProfilePageShowHardwareDialog(self, button):
HardwareDialog()
def createProfilePageShowPackageDialog(self, button):
PackageDialog()
def createProfilePageVerify(self):
"""Returns True if an error happened (the user will have gotten an error
message) or False if everything was ok.
"""
systemNameEntry = self.createProfileXml.get_widget("systemNameEntry")
sendHardwareButton = self.createProfileXml.get_widget("sendHardwareButton")
sendPackageListButton = self.createProfileXml.get_widget("sendPackageListButton")
self.sendHardware = sendHardwareButton.get_active()
self.sendPackages = sendPackageListButton.get_active()
if systemNameEntry.get_text() == "":
errorWindow(_("You must choose a name for this profile."))
systemNameEntry.grab_focus()
return True
if not self.sendPackages:
self.activationNoPackages = 1
return False
def createProfilePageApply(self):
"""Returns True if an error happened (the user will have gotten an error
message) or False if everything was ok.
"""
status = callAndFilterExceptions(
self._createProfilePageApply,
[],
_("There was an error while creating the profile.")
)
if status is False:
return False
else:
return True
def _createProfilePageApply(self):
"""Returns False if everything's ok or True if something's wrong."""
setBusyCursor()
pwin = progress.Progress()
pwin.setLabel(_("Registering system and sending profile information. Please wait."))
self.systemId = None
global username, password, hw_activation_code, \
_hasBaseChannelAndUpdates, chosen_channel
other = {}
if hw_activation_code:
other['registration_number'] = hw_activation_code
if chosen_channel is not None:
other['channel'] = chosen_channel
(virt_uuid, virt_type) = rhnreg.get_virt_info()
if not virt_uuid is None:
other['virt_uuid'] = virt_uuid
other['virt_type'] = virt_type
profileName = self.createProfileXml.get_widget("systemNameEntry").get_text()
pwin.setProgress(1, 6)
pwin.setStatusLabel(_("Registering System"))
try:
reg_info = rhnreg.registerSystem2(username, password, profileName, other=other)
log.log_me("Registered system.")
self.systemId = reg_info.getSystemId()
_hasBaseChannelAndUpdates = reg_info.hasBaseAndUpdates()
if reg_info.getUniversalActivationKey():
reviewLog.usedUniversalActivationKey(
reg_info.getUniversalActivationKey())
reviewLog.channels(reg_info.getChannels(), reg_info.getFailedChannels())
reviewLog.systemSlots(reg_info.getSystemSlotDescriptions(),
reg_info.getFailedSystemSlotDescriptions())
except up2dateErrors.CommunicationError:
e = sys.exc_info()[1]
pwin.hide()
self.fatalError(_("Problem registering system:\n") + e.errmsg)
return True # fatalError in firstboot will return to here
except up2dateErrors.RhnUuidUniquenessError:
e = sys.exc_info()[1]
pwin.hide()
self.fatalError(_("Problem registering system:\n") + e.errmsg)
return True # fatalError in firstboot will return to here
except up2dateErrors.InsuffMgmntEntsError:
e = sys.exc_info()[1]
pwin.hide()
self.fatalError(_("Problem registering system:\n") + e.errmsg)
except up2dateErrors.RegistrationDeniedError:
e = sys.exc_info()[1]
pwin.hide()
self.fatalError(_("Problem registering system:\n") + e.errmsg)
except up2dateErrors.InvalidProductRegistrationError:
pwin.hide()
errorWindow(_("The installation number [ %s ] provided is not a valid installation number. Please go back to the previous screen and fix it." %
other['registration_number']))
return True
except up2dateErrors.ActivationKeyUsageLimitError:
pwin.hide()
self.fatalError(rhnreg_constants.ACT_KEY_USAGE_LIMIT_ERROR)
return True # fatalError in firstboot will return to here
except:
setArrowCursor()
pwin.hide()
errorWindow(_("Problem registering system."))
log.log_exception(*sys.exc_info())
return True
pwin.setProgress(2,6)
# write the system id out.
if self.systemId:
if not rhnreg.writeSystemId(self.systemId):
setArrowCursor()
pwin.hide()
errorWindow(_("Problem writing out system id to disk."))
return True
log.log_me("Wrote system id to disk.")
else:
setArrowCursor()
pwin.hide()
errorWindow(_("There was a problem registering this system."))
return True
global productInfo # Contains the user's info (name, e-mail, etc)
if cfg['supportsUpdateContactInfo']:
ret = self.__updateContactInfo(productInfo, username, password, pwin)
else:
ret = self.__registerProduct(productInfo, pwin)
if ret:
return ret
pwin.setProgress(3, 6)
# maybe upload hardware profile
if self.sendHardware:
pwin.setStatusLabel(_("Sending hardware information"))
try:
rhnreg.sendHardware(self.systemId, self.hardware)
log.log_me("Sent hardware profile.")
except:
pwin.setStatusLabel(_("Problem sending hardware information."))
import time
time.sleep(1)
pwin.setProgress(4, 6)
if self.sendPackages:
getArch = 0
if cfg['supportsExtendedPackageProfile']:
getArch = 1
packageList = pkgUtils.getInstalledPackageList(progressCallback = lambda amount,
total: gtk.main_iteration_do(False),
getArch=getArch)
## selection = []
# FIXME
selectedPackages = packageList
## for row in range(self.regPackageArea.n_rows):
## rowData = self.regPackageArea.get_row_data(row)
## if rowData[0] == 1:
## selection.append(rowData[1])
## print("gh270")
## selectedPackages = []
## for pkg in packageList:
## if pkg[0] in selection:
## selectedPackages.append(pkg)
pwin.setStatusLabel(_("Sending package information"))
try:
rhnreg.sendPackages(self.systemId, selectedPackages)
log.log_me("Sent package list.")
except:
pwin.setStatusLabel(_("Problem sending package information."))
import time
time.sleep(1)
# Send virtualization information to the server.
rhnreg.sendVirtInfo(self.systemId)
li = None
try:
li = up2dateAuth.updateLoginInfo()
except up2dateErrors.InsuffMgmntEntsError:
self.serviceNotEnabled = 1
self.fatalError(str(sys.exc_info()[1]), wrap=0)
except up2dateErrors.RhnServerException:
self.fatalError(str(sys.exc_info()[1]), wrap=0)
return True # fatalError in firstboot will return to here
if li:
# see if we have any active channels
if li['X-RHN-Auth-Channels'] == []:
# no channels subscribe
self.noChannels = 1
# enable yum-rhn-plugin / dnf-plugin-spacewalk
try:
present, conf_changed = rhnreg.pluginEnable()
if not present:
reviewLog.pm_plugin_warning()
if conf_changed:
reviewLog.pm_plugin_conf_changed()
except IOError:
e = sys.exc_info()[1]
errorWindow(_("Could not open %s\n%s is not enabled.\n") % (PM_PLUGIN_CONF, PM_PLUGIN_NAME) + e.errmsg)
reviewLog.pm_plugin_conf_error()
rhnreg.spawnRhnCheckForUI()
pwin.setProgress(6,6)
pwin.hide()
setArrowCursor()
return False
def __updateContactInfo(self, productInfo, uname, password, pwin):
return False
def __registerProduct(self, productInfo, pwin):
return False
class ProvideCertificatePage:
def __init__(self):
self.provideCertificateXml = gtk.glade.XML(gladefile,
"provideCertificateWindowVbox",
domain="rhn-client-tools")
self.orig_cert_label_template = self.provideCertificateXml.get_widget("SecurityCertLabel").get_text()
def provideCertificatePageVbox(self):
return self.provideCertificateXml.get_widget("provideCertificateWindowVbox")
def setUrlInWidget(self):
"""
sets the security cert label's server url at runtime
"""
securityCertlabel = self.provideCertificateXml.get_widget("SecurityCertLabel")
securityCertlabel.set_text(self.orig_cert_label_template % config.getServerlURL()[0] )
def provideCertificatePageApply(self):
"""If the 'I have a cert' radio button is selected, this function will
copy the cert to /usr/share/rhn. It will name it
RHN-ORG-TRUSTED-SSL-CERT. It will
change the owner to root and the perms to 644. If a file with
that name already exists it will add a '.save<lowest available integer>' to
the end of the old file's name. It will update the config file to point
to the new cert.
Returns:
0- cert was installed
1- the user doesn't want to provide a cert right now
2- an error occurred and the user was notified
3- the cert was installed ok, but the server doesn't support needed
calls
Doesn't raise any exceptions.
"""
status = callAndFilterExceptions(
self._provideCertificatePageApply,
[],
_("There was an error while installing the certificate.")
)
if status == 0 or status == 1 or status == 3:
return status
else:
return 2
def _provideCertificatePageApply(self):
"""Does what the comment for provideCertificatePageApply says, but might
raise various exceptions.
"""
CERT_INSTALLED = 0
NOT_INSTALLING_CERT = 1
ERROR_WAS_HANDLED = 2
SERVER_TOO_OLD = 3
assert serverType in ['satellite']
try:
provideCertButton = self.provideCertificateXml.get_widget("provideCertificateButton")
provideCert = provideCertButton.get_active()
if not provideCert:
return NOT_INSTALLING_CERT
fileChooser = self.provideCertificateXml.get_widget("certificateChooserButton")
certFile = fileChooser.get_filename()
if certFile is None:
errorWindow(_("You must select a certificate."))
return ERROR_WAS_HANDLED
up2dateConfig = config.initUp2dateConfig()
destinationName = '/usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT'
if certFile != destinationName:
if os.path.exists(certFile):
destinationName = certFile
up2dateConfig.set('sslCACert', destinationName)
up2dateConfig.save()
# Take the new cert for a spin
try:
rhnreg.getCaps()
except up2dateErrors.SSLCertificateVerifyFailedError:
server_url = config.getServerlURL()[0]
#TODO: we could point the user to grab the cert from /pub if its sat
#bz439383 - Handle error message for expired certificate
f = open(certFile, "r")
buf = f.read()
f.close()
tempCert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, buf)
if tempCert.has_expired():
errorWindow(rhnreg_constants.SSL_CERT_EXPIRED)
else:
errorWindow(rhnreg_constants.SSL_CERT_ERROR_MSG % (certFile, server_url))
return ERROR_WAS_HANDLED
except OpenSSL.SSL.Error:
# TODO Modify rhnlib to raise a unique exception for the not a
# cert file case.
errorWindow(_("There was an SSL error. This could be because the file you picked was not a certificate file."))
return ERROR_WAS_HANDLED
return CERT_INSTALLED
except IOError:
e = sys.exc_info()[1]
# TODO Provide better messages
message = _("Something went wrong while installing the new certificate:\n")
message = message + e.strerror
errorWindow(message)
return ERROR_WAS_HANDLED
class FinishPage:
"""The finish screen. This can show two different versions: successful and
unsuccessful.
"""
def __init__(self):
self.failedFinishXml = gtk.glade.XML(gladefile,
"failedFinishWindowVbox",
domain="rhn-client-tools")
self.successfulFinishXml = gtk.glade.XML(gladefile,
"successfulFinishWindowVbox",
domain="rhn-client-tools")
# This is an intermediate vbox that this class provides to it's users.
# On prepare, the right version of the screen will be put into it.
self.finishContainerVbox = gtk.VBox()
# The vboxes that contain the two versions of the screen:
self.failedFinishVbox = \
self.failedFinishXml.get_widget("failedFinishWindowVbox")
self.successfulFinishVbox = \
self.successfulFinishXml.get_widget("successfulFinishWindowVbox")
# Put one in now (either one) to make the prepare code simpler
self.finishContainerVbox.pack_start(self.failedFinishVbox,
expand=True, fill=True, padding=0)
def finishPageVbox(self):
return self.finishContainerVbox
def finishPagePrepare(self):
containerChildren = self.finishContainerVbox.get_children()
assert len(containerChildren) == 1
self.finishContainerVbox.remove(containerChildren[0])
if hasBaseChannelAndUpdates():
self.finishContainerVbox.pack_start(self.successfulFinishVbox, True, True, 0)
else:
self.finishContainerVbox.pack_start(self.failedFinishVbox, True, True, 0)
class AlreadyRegisteredDialog:
def __init__(self):
"""Returns when dialog closes. Dialog.rc will be set to 1 if the user
clicked continue, or 0 if they clicked cancel or close the dialog.
"""
self.xml = gtk.glade.XML(gladefile, "alreadyRegisteredDialog",
domain="rhn-client-tools")
self.dialog = self.xml.get_widget("alreadyRegisteredDialog")
server = _('unknown')
oldUsername = _('unknown')
systemId = _('unknown')
try:
# If the serverURL config value is a list, we have no way of knowing
# for sure which one the machine registered against,
# so default to the
# first element.
server = config.getServerlURL()[0]
if server.endswith('/XMLRPC'):
server = server[:-7] # don't display trailing /XMLRPC
systemIdXml = rpclib.xmlrpclib.loads(up2dateAuth.getSystemId())
oldUsername = systemIdXml[0][0]['username']
systemId = systemIdXml[0][0]['system_id']
except:
pass
self.xml.get_widget('serverUrlLabel').set_label(server)
self.xml.get_widget('usernameLabel2').set_label(oldUsername)
self.xml.get_widget('systemIdLabel').set_label(systemId)
self.rc = self.dialog.run()
if self.rc != 1:
self.rc = 0
self.dialog.destroy()
class AlreadyRegisteredSubscriptionManagerDialog:
""" Window with text:
You are already subscribed using subscription manager. Exit. Continue
"""
def __init__(self):
"""Returns when dialog closes. Dialog.rc will be set to 1 if the user
clicked continue, or 0 if they clicked cancel or close the dialog.
"""
self.xml = gtk.glade.XML(gladefile, "alreadyRegisteredSubscriptionManagerDialog",
domain="rhn-client-tools")
self.dialog = self.xml.get_widget("alreadyRegisteredSubscriptionManagerDialog")
self.rc = self.dialog.run()
if self.rc != 1:
self.rc = 0
self.dialog.destroy()
class ConfirmQuitDialog:
def __init__(self, parent):
"""Returns when dialog closes. Dialog.rc will be set to 0 if the user
clicked "take me back" or closed the dialog, or 1 if they clicked "i'll
register later". I've they clicked I'll register later, the remind file
will be written to disk.
"""
self.xml = gtk.glade.XML(gladefile, "confirmQuitDialog",
domain="rhn-client-tools")
self.dialog = self.xml.get_widget("confirmQuitDialog")
self.dialog.set_transient_for(parent)
self.rc = self.dialog.run()
if self.rc == gtk.RESPONSE_NONE:
self.rc = 0
if self.rc == 1:
try:
rhnreg.createSystemRegisterRemindFile()
except (OSError, IOError):
log.log_me("Reminder file couldn't be written. Details: %s" %
sys.exc_info()[1])
self.dialog.destroy()
class MoreInfoDialog:
def __init__(self):
self.moreInfoXml = gtk.glade.XML(gladefile,
"moreInfoDialog", domain="rhn-client-tools")
self.dlg = self.moreInfoXml.get_widget("moreInfoDialog")
self.moreInfoXml.signal_autoconnect({
"onCloseMoreInfoButtonClicked" : self.finish,
})
def finish(self, button):
self.dlg.hide()
self.rc = 1 # What does this do? Is it needed?
class WhyRegisterDialog:
def __init__(self):
self.whyRegisterXml = gtk.glade.XML(gladefile,
"whyRegisterDialog", domain="rhn-client-tools")
self.dlg = self.whyRegisterXml.get_widget("whyRegisterDialog")
self.whyRegisterXml.signal_autoconnect({
"onBackToRegistrationButtonClicked" : self.finish,
})
def finish(self, button):
self.dlg.hide()
self.rc = 1 # What does this do? Is it needed?
class HardwareDialog:
def __init__(self):
self.hwXml = gtk.glade.XML(
gladefile,
"hardwareDialog", domain="rhn-client-tools")
self.dlg = self.hwXml.get_widget("hardwareDialog")
self.hwXml.get_widget("okButton").connect("clicked", self.finish)
callAndFilterExceptions(
self.populateHardware,
[],
_("There was an error getting the list of hardware.")
)
def populateHardware(self):
# Read all hardware in
self.hardware = hardware.Hardware()
for hw in self.hardware:
if hw['class'] == 'CPU':
label = self.hwXml.get_widget("cpuLabel")
label.set_text(hw['model'])
label = self.hwXml.get_widget("speedLabel")
label.set_text(_("%d MHz") % hw['speed'])
elif hw['class'] == 'MEMORY':
label = self.hwXml.get_widget("ramLabel")
try:
label.set_text(_("%s MB") % hw['ram'])
except:
pass
elif hw['class'] == 'NETINFO':
label = self.hwXml.get_widget("hostnameLabel")
try:
label.set_text(idn_puny_to_unicode(hw['hostname']))
except:
pass
label = self.hwXml.get_widget("ipLabel")
try:
label.set_text(hw['ipaddr'])
except:
pass
label = self.hwXml.get_widget("versionLabel")
try:
distversion = up2dateUtils.getVersion()
except up2dateErrors.RpmError:
e = sys.exc_info()[1]
# TODO Do something similar during registration if the same
# situation can happen. Even better, factor out the code to get the
# hardware.
errorWindow(e.errmsg)
distversion = 'unknown'
label.set_text(distversion)
def finish(self, button):
self.dlg.hide()
self.rc = 1
class PackageDialog:
def __init__(self):
self.swXml = gtk.glade.XML(
gladefile,
"packageDialog", domain="rhn-client-tools")
self.dlg = self.swXml.get_widget("packageDialog")
self.swXml.get_widget("okButton2").connect("clicked", self.finish)
callAndFilterExceptions(
self.populateDialog,
[],
_("There was an error building the list of packages.")
)
def populateDialog(self):
# name-version-release, arch
self.packageStore = gtk.ListStore(gobject.TYPE_STRING, gobject.TYPE_STRING)
for package in self.getPackageList():
nvr = "%s-%s-%s" % (package['name'], package['version'], package['release'])
arch = package['arch']
self.packageStore.append((nvr, arch))
self.packageTreeView = self.swXml.get_widget("packageTreeView")
self.packageTreeView.set_model(self.packageStore)
self.packageTreeView.set_rules_hint(True)
col = gtk.TreeViewColumn(_("Package"), gtk.CellRendererText(), text=0)
col.set_sort_column_id(0)
col.set_sort_order(gtk.SORT_ASCENDING)
self.packageTreeView.append_column(col)
col = gtk.TreeViewColumn(_("Arch"), gtk.CellRendererText(), text=1)
self.packageTreeView.append_column(col)
self.packageStore.set_sort_column_id(0, gtk.SORT_ASCENDING)
def getPackageList(self):
pwin = progress.Progress()
pwin.setLabel(_("Building a list of RPM packages installed on your system. Please wait."))
packageDialogPackages = pkgUtils.getInstalledPackageList(progressCallback = pwin.setProgress, getArch=1)
pwin.hide()
return packageDialogPackages
def finish(self, button):
self.dlg.hide()
self.rc = 1
class NetworkConfigDialog:
"""This is the dialog that allows setting http proxy settings.
It uses the instant apply paradigm or whatever you wanna call it that the
gnome HIG recommends. Whenever a toggle button is flipped or a text entry
changed, the new setting will be saved.
"""
def __init__(self):
self.xml = gtk.glade.XML(gladefile, "networkConfigDialog",
domain="rhn-client-tools")
# Get widgets we'll need to access
self.dlg = self.xml.get_widget("networkConfigDialog")
self.enableProxyButton = self.xml.get_widget("enableProxyButton")
self.enableProxyAuthButton = self.xml.get_widget("enableProxyAuthButton")
self.proxyEntry = self.xml.get_widget("proxyEntry")
self.proxyUserEntry = self.xml.get_widget("proxyUserEntry")
self.proxyPasswordEntry = self.xml.get_widget("proxyPasswordEntry")
try:
self.cfg = config.initUp2dateConfig()
except:
gnome.ui.GnomeErrorDialog(_("There was an error loading your "
"configuration. Make sure that\nyou "
"have read access to /etc/sysconfig/rhn."),
self.dlg)
# Need to load values before connecting signals because when the dialog
# starts up it seems to trigger the signals which overwrites the config
# with the blank values.
self.setInitialValues()
self.enableProxyButton.connect("toggled", self.enableAction)
self.enableProxyAuthButton.connect("toggled", self.enableAction)
self.enableProxyButton.connect("toggled", self.writeValues)
self.enableProxyAuthButton.connect("toggled", self.writeValues)
self.proxyEntry.connect("focus-out-event", self.writeValues)
self.proxyUserEntry.connect("focus-out-event", self.writeValues)
self.proxyPasswordEntry.connect("focus-out-event", self.writeValues)
self.xml.get_widget("closeButton").connect("clicked", self.close)
self.dlg.show()
def setInitialValues(self):
self.xml.get_widget("enableProxyButton").set_active(self.cfg["enableProxy"])
self.enableAction(self.xml.get_widget("enableProxyButton"))
self.xml.get_widget("proxyEntry").set_text(self.cfg["httpProxy"])
self.xml.get_widget("enableProxyAuthButton").set_active(self.cfg["enableProxyAuth"])
self.enableAction(self.xml.get_widget("enableProxyAuthButton"))
self.xml.get_widget("proxyUserEntry").set_text(str(self.cfg["proxyUser"]))
self.xml.get_widget("proxyPasswordEntry").set_text(str(self.cfg["proxyPassword"]))
def writeValues(self, widget=None, dummy=None):
self.cfg.set("enableProxy",
int(self.xml.get_widget("enableProxyButton").get_active()))
self.cfg.set("httpProxy",
self.xml.get_widget("proxyEntry").get_text())
self.cfg.set("enableProxyAuth",
int(self.xml.get_widget("enableProxyAuthButton").get_active()))
self.cfg.set("proxyUser",
str(self.xml.get_widget("proxyUserEntry").get_text()))
self.cfg.set("proxyPassword",
str(self.xml.get_widget("proxyPasswordEntry").get_text()))
try:
self.cfg.save()
except:
gnome.ui.GnomeErrorDialog(_(
"There was an error saving your configuration. "\
"Make sure that\nyou own %s.") % self.cfg.fileName,
self.dlg)
def close(self, button):
self.dlg.hide()
def enableAction(self, button):
if button.get_name() == "enableProxyButton":
self.xml.get_widget("proxyEntry").set_sensitive(button.get_active())
self.xml.get_widget("proxyEntry").grab_focus()
elif button.get_name() == "enableProxyAuthButton":
self.xml.get_widget("proxyUserEntry").set_sensitive(button.get_active())
self.xml.get_widget("proxyPasswordEntry").set_sensitive(button.get_active())
self.xml.get_widget("usernameLabel").set_sensitive(button.get_active())
self.xml.get_widget("passwordLabel").set_sensitive(button.get_active())
def errorWindow(message):
messageWindow.ErrorDialog(messageWindow.wrap_text(message))
def unexpectedError(message, exc_info=None):
"""Shows an error dialog with the message and logs that an error happened.
This function is designed to be used in an except block like so:
unexpectedError(_("Your error here."), sys.exc_info())
"""
setArrowCursor()
logFile = cfg['logFile'] or '/var/log/up2date'
message = message + "\n" + (_("This error shouldn't have happened. If you'd "
"like to help us improve this program, please "
"file a bug at bugzilla.redhat.com. Including "
"the relevant parts of '%s' would be very "
"helpful. Thanks!") % logFile)
errorWindow(message)
if exc_info:
(etype, value, stack_trace) = exc_info
log.log_exception(etype, value, stack_trace)
else:
log.log_me("An unexpected error happened, but exc_info wasn't provided.")
def callAndFilterExceptions(function, allowedExceptions,
disallowedExceptionMessage, errorHandler=unexpectedError):
"""Calls function and limits the exceptions that can be raised to those in
the list provided and SystemExit. If an exception is raised which isn't
allowed, errorHandler will be called and then None will be returned.
errorHandler defaults to the unexpectedError function and will be passed
disallowedExceptionMessage. If it is overridden, the function provided must
take a string and a tuple (see below for details). If no exceptions are
raised, functions's return value is returned.
I need this function because if some of the functions in the Pages raise
unexpected exceptions, the druid might advance when it shouldn't or go to
the wrong page. I think it's shorter and more readable to factor this out
rather than have similar functionality in all those functions.
"""
assert callable(function)
allowedExceptions.append(SystemExit)
try:
return function()
except:
(exceptionType, exception, stackTrace) = sys.exc_info()
if exceptionType in allowedExceptions:
raise
else:
errorHandler(disallowedExceptionMessage,
(exceptionType, exception, stackTrace))
def hasBaseChannelAndUpdates():
"""Returns a bool indicating whether the system has registered, subscribed
to a base channel, and has at least update entitlements.
Uses information from the most recent time the create profile screen was run
through.
"""
global _hasBaseChannelAndUpdates
return _hasBaseChannelAndUpdates
def setBusyCursor():
"""Dummy function that will be overidden by rhn_register's standalone gui
and firstboot in different ways.
"""
pass
def setArrowCursor():
"""Dummy function that will be overidden by rhn_register's standalone gui
and firstboot in different ways.
"""
pass
| gpl-2.0 |
xxd3vin/spp-sdk | opt/Python27/Lib/site-packages/numpy/linalg/tests/test_regression.py | 94 | 2246 | """ Test functions for linalg module
"""
from numpy.testing import *
import numpy as np
from numpy import linalg, arange, float64, array, dot, transpose
rlevel = 1
class TestRegression(TestCase):
def test_eig_build(self, level = rlevel):
"""Ticket #652"""
rva = array([1.03221168e+02 +0.j,
-1.91843603e+01 +0.j,
-6.04004526e-01+15.84422474j,
-6.04004526e-01-15.84422474j,
-1.13692929e+01 +0.j,
-6.57612485e-01+10.41755503j,
-6.57612485e-01-10.41755503j,
1.82126812e+01 +0.j,
1.06011014e+01 +0.j ,
7.80732773e+00 +0.j ,
-7.65390898e-01 +0.j,
1.51971555e-15 +0.j ,
-1.51308713e-15 +0.j])
a = arange(13*13, dtype = float64)
a.shape = (13,13)
a = a%17
va, ve = linalg.eig(a)
va.sort()
rva.sort()
assert_array_almost_equal(va, rva)
def test_eigh_build(self, level = rlevel):
"""Ticket 662."""
rvals = [68.60568999, 89.57756725, 106.67185574]
cov = array([[ 77.70273908, 3.51489954, 15.64602427],
[3.51489954, 88.97013878, -1.07431931],
[15.64602427, -1.07431931, 98.18223512]])
vals, vecs = linalg.eigh(cov)
assert_array_almost_equal(vals, rvals)
def test_svd_build(self, level = rlevel):
"""Ticket 627."""
a = array([[ 0., 1.], [ 1., 1.], [ 2., 1.], [ 3., 1.]])
m, n = a.shape
u, s, vh = linalg.svd(a)
b = dot(transpose(u[:, n:]), a)
assert_array_almost_equal(b, np.zeros((2, 2)))
def test_norm_vector_badarg(self):
"""Regression for #786: Froebenius norm for vectors raises
TypeError."""
self.assertRaises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro')
def test_lapack_endian(self):
# For bug #1482
a = array([[5.7998084, -2.1825367 ],
[-2.1825367, 9.85910595]], dtype='>f8')
b = array(a, dtype='<f8')
ap = linalg.cholesky(a)
bp = linalg.cholesky(b)
assert_array_equal(ap, bp)
if __name__ == '__main__':
run_module_suite()
| mit |
tachang/pygeocoder | test.py | 2 | 9214 | #!/usr/bin/env python
#
# Xiao Yu - Montreal - 2010
# Based on googlemaps by John Kleint
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
"""
Unit tests for pygeocoder.
"""
import unittest
from pygeocoder import Geocoder
from pygeolib import GeocoderResult
import json
def searchkey(obj, key):
"""
Does BFS on JSON-like object `obj` to find a dict with a key == to `key`
and returns the associated value. Returns None if it didn't find `key`.
"""
queue = [obj]
while queue:
item = queue.pop(0)
if type(item) is list:
queue.extend(item)
elif type(item) is dict:
for k in item:
if k == key:
return item[k]
else:
queue.append(item[k])
return None
MOCK_DATA = """
[
{
"address_components": [
{
"long_name": "DMV",
"short_name": "DMV",
"types": [
"point_of_interest",
"establishment"
]
},
{
"long_name": "20725",
"short_name": "20725",
"types": [
"street_number"
]
},
{
"long_name": "Sherman Way",
"short_name": "Sherman Way",
"types": [
"route"
]
},
{
"long_name": "Winnetka",
"short_name": "Winnetka",
"types": [
"neighborhood",
"political"
]
},
{
"long_name": "Los Angeles",
"short_name": "Los Angeles",
"types": [
"locality",
"political"
]
},
{
"long_name": "Los Angeles",
"short_name": "Los Angeles",
"types": [
"administrative_area_level_2",
"political"
]
},
{
"long_name": "California",
"short_name": "CA",
"types": [
"administrative_area_level_1",
"political"
]
},
{
"long_name": "United States",
"short_name": "US",
"types": [
"country",
"political"
]
},
{
"long_name": "91306",
"short_name": "91306",
"types": [
"postal_code"
]
}
],
"formatted_address": "DMV, 20725 Sherman Way, Winnetka, CA 91306, USA",
"geometry": {
"location": {
"lat": 34.20133510,
"lng": -118.58479930
},
"location_type": "APPROXIMATE",
"viewport": {
"northeast": {
"lat": 34.2105630,
"lng": -118.56879190
},
"southwest": {
"lat": 34.19210620,
"lng": -118.60080670
}
}
},
"postcode_localities": [],
"types": [
"point_of_interest",
"establishment"
]
},
{
"address_components": [
{
"long_name": "Driver's License Office",
"short_name": "DMV",
"types": [
"point_of_interest",
"establishment"
]
},
{
"long_name": "Audubon Village Shopping Center",
"short_name": "Audubon Village Shopping Center",
"types": [
"establishment"
]
},
{
"long_name": "2447",
"short_name": "2447",
"types": [
"street_number"
]
},
{
"long_name": "North Union Boulevard",
"short_name": "N Union Blvd",
"types": [
"route"
]
},
{
"long_name": "East Colorado Springs",
"short_name": "East Colorado Springs",
"types": [
"neighborhood",
"political"
]
},
{
"long_name": "Colorado Springs",
"short_name": "Colorado Springs",
"types": [
"locality",
"political"
]
},
{
"long_name": "El Paso",
"short_name": "El Paso",
"types": [
"administrative_area_level_2",
"political"
]
},
{
"long_name": "Colorado",
"short_name": "CO",
"types": [
"administrative_area_level_1",
"political"
]
},
{
"long_name": "United States",
"short_name": "US",
"types": [
"country",
"political"
]
},
{
"long_name": "80909",
"short_name": "80909",
"types": [
"postal_code"
]
},
{
"long_name": "1107",
"short_name": "1107",
"types": []
}
],
"formatted_address": "Driver's License Office, Audubon Village Shopping Center, 2447 North Union Boulevard, Colorado Springs, CO 80909, USA",
"geometry": {
"location": {
"lat": 38.86735470,
"lng": -104.79270460
},
"location_type": "APPROXIMATE",
"viewport": {
"northeast": {
"lat": 38.86870368029150,
"lng": -104.7913556197085
},
"southwest": {
"lat": 38.86600571970850,
"lng": -104.7940535802915
}
}
},
"postcode_localities": [],
"types": [
"point_of_interest",
"establishment"
]
}
]
"""
class Test(unittest.TestCase):
"""
Unit tests for googlemaps.
"""
def test_geocoder_results(self):
"""Test GeocoderResult's indexing and iteration access"""
results = GeocoderResult(json.loads(MOCK_DATA))
self.assertEqual(results[1].neighborhood, "East Colorado Springs")
self.assertEqual(results.establishment, "DMV")
for index, result in enumerate(results):
if index == 0:
self.assertEqual(result.neighborhood, "Winnetka")
elif index == 1:
self.assertEqual(result.street_number, "2447")
else:
self.fail()
def test_geocode(self):
"""Test pygeocoder geocode()"""
addr = '1600 amphitheatre mountain view ca'
g = Geocoder()
result = g.geocode(addr)
self.assertEqual(result.country__long_name, 'United States')
self.assertEqual(result.postal_code, '94043')
self.assertEqual(result.street_number, '1600')
self.assertEqual(result.route, 'Amphitheatre Parkway')
self.assertEqual(result.locality, 'Mountain View')
self.assertEqual(result.city, 'Mountain View')
self.assertEqual(result.administrative_area_level_1, 'California')
self.assertEqual(result.state, 'California')
self.assertEqual(result.county, 'Santa Clara')
self.assertEqual(result.country, 'United States')
self.assertEqual(result.formatted_address, '1600 Amphitheatre Parkway, Mountain View, CA 94043, USA')
self.assertEqual(result.valid_address, True)
lat, lng = result.coordinates
self.assertAlmostEqual(lat, 37.4228576, 2)
self.assertAlmostEqual(lng, -122.0850647, 2)
def test_reverse_geocode(self):
"""
Test pygeocoder reverse_geocode()
"""
lat, lng = 38.897096, -77.036545
result = Geocoder.reverse_geocode(lat, lng)
self.assertEqual(result.country__short_name, 'US')
self.assertEqual(result.postal_code, '20500')
self.assertEqual(result.street_number, '1600')
self.assertEqual(result.route, 'Pennsylvania Avenue Northwest')
self.assertEqual(result.administrative_area_level_1, 'District of Columbia')
self.assertEqual(result.city, 'Washington')
self.assertEqual(result.state, 'District of Columbia')
self.assertEqual(result.state__short_name, 'DC')
self.assertEqual(result.country, 'United States')
addr = result.formatted_address
self.assertEqual(addr, "1600 Pennsylvania Avenue Northwest, President's Park, Washington, DC 20500, USA")
lat2, lng2 = result.coordinates
self.assertAlmostEqual(lat, lat2, 3)
self.assertAlmostEqual(lng, lng2, 3)
self.assertAlmostEqual(lat, result.latitude, 3)
self.assertAlmostEqual(lng, result.longitude, 3)
self.assertTrue(result.count > 1)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
nis-sdn/odenos | src/main/python/org/o3project/odenos/core/component/network/flow/flow_set.py | 6 | 2647 | # -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
from copy import deepcopy
from org.o3project.odenos.core.component.network.flow.flow\
import Flow
from org.o3project.odenos.core.component.network.flow.basic.basic_flow\
import BasicFlow
from org.o3project.odenos.core.component.network.flow.ofpflow.ofp_flow\
import OFPFlow
class FlowSet(object):
# property key
TYPE = "type"
VERSION = "version"
PRIORITY = "priority"
FLOWS = "flows"
def __init__(self, type_, version, priority, flows):
self._body = {
self.TYPE: type_,
self.VERSION: version,
self.PRIORITY: priority,
self.FLOWS: flows
}
@property
def type(self):
return self._body[self.TYPE]
@property
def version(self):
return self._body[self.VERSION]
@property
def priority(self):
return self._body[self.PRIORITY]
@property
def flows(self):
return self._body[self.FLOWS]
@classmethod
def create_from_packed(cls, packed):
version = None
if cls.VERSION in packed:
version = packed[cls.VERSION]
flows = {}
for flow_id, flow in packed[cls.FLOWS].items():
flows[flow_id] = globals()[flow[Flow.TYPE]].\
create_from_packed(flow)
return cls(packed[cls.TYPE], version,
packed[cls.PRIORITY], flows)
def packed_object(self):
object_ = deepcopy(self._body)
flows = {}
for flow_id in self.flows:
flows[flow_id] = self.flows[flow_id].packed_object()
object_[self.FLOWS] = flows
return object_
| apache-2.0 |
AlexV1990/saveall | utils/modconf.py | 1 | 7590 | #! /usr/bin/env python3
# coding: utf-8
'''
Fonctions de manipulation et vérifications du fichier de configuration
'''
import json
import utils.misc as misc
CONF_FILE_NAME = "conf/conf.json"
'''
check_conf_valid: vérifie que le fichier de conf est bien dans un format json valide
entrée: pas d'argument (nom du fichier dépend de la variable globale CONF_FILE_NAME)
sortie: retourne 0 si le fichier est valide, -1 sinon
'''
def check_conf_valid():
try:
with open(CONF_FILE_NAME) as data_file:
data = json.load(data_file)
return 0
except:
return -1
'''
get_list_equipment_from_conf: renvoie la liste des équipements contenus dans le fichier conf.json
entrée: pas d'argument
sortie: liste de tuples (nom de l'equipement, ip de l'equipement)
'''
def get_list_equipment_from_conf():
with open(CONF_FILE_NAME) as data_file:
data = json.load(data_file)
list_eq = []
for dat in data["EQUIPEMENTS"]:
var_nom = str(data["EQUIPEMENTS"][dat]["NOM"])
var_ip = str(data["EQUIPEMENTS"][dat]["IP"])
tuple_eq = (var_nom, var_ip)
list_eq.append(tuple_eq)
return list_eq
'''
get_list_equipment_from_conf: renvoie la liste des équipements contenus dans le fichier conf.json
entrée: pas d'argument
sortie: liste de tuples (nom de l'equipement, "", False)
'''
def get_list_equipment_from_conf_for_checklist():
with open(CONF_FILE_NAME) as data_file:
data = json.load(data_file)
list_eq = []
for dat in data["EQUIPEMENTS"]:
var_nom = str(data["EQUIPEMENTS"][dat]["NOM"])
tuple_eq = (var_nom, "", False)
list_eq.append(tuple_eq)
return list_eq
'''
get_list_files_from_conf: renvoie la liste des fichiers contenus dans le fichier conf.json
entrée: pas d'argument
sortie: liste de tuples (nom de l'equipement, ip de l'equipement)
'''
def get_list_files_from_conf():
with open(CONF_FILE_NAME) as data_file:
data = json.load(data_file)
list_fic = []
for dat in data["FICHIERS"]:
var_nom = str(data["FICHIERS"][dat]["NOM"])
var_path = str(data["FICHIERS"][dat]["PATH"])
tuple_eq = (var_nom, var_path)
list_fic.append(tuple_eq)
return list_fic
'''
delete_file_from_conf: supprime un fichier du fichier de configuration
entrée: nom du fichier à supprimer
sortie: 0 si OK, -1 autrement
'''
def delete_file_from_conf(file_name):
try:
with open(CONF_FILE_NAME) as data_file:
data = json.load(data_file)
for element in data["FICHIERS"]:
if file_name == data["FICHIERS"][element]["NOM"]:
data["FICHIERS"].pop(element)
break
with open(CONF_FILE_NAME, 'w') as data_file:
data = json.dump(data, data_file)
return 0
except:
return -1
'''
delete_equipment_from_conf: supprime un équipement du fichier de configuration
entrée: nom de l'équipement à supprimer
sortie: 0 si OK, -1 autrement
'''
def delete_equipment_from_conf(equipment_name):
try:
with open(CONF_FILE_NAME) as data_file:
data = json.load(data_file)
for element in data["EQUIPEMENTS"]:
if equipment_name == data["EQUIPEMENTS"][element]["NOM"]:
data["EQUIPEMENTS"].pop(element)
break
with open(CONF_FILE_NAME, 'w') as data_file:
data = json.dump(data, data_file)
return 0
except:
return -1
'''
add_file_to_conf: ajoute un fichier dans le fichier de configuration
entrée: liste avec les paramètres du fichier [nom, path, type, equipement]
sortie: 0 si OK, -1 si le nom existe déjà, -2 si autre erreur
'''
def add_file_to_conf(list_params_file):
file_name = list_params_file[0]
file_path = list_params_file[1]
file_type = list_params_file[2]
equipment_name = list_params_file[3]
try:
with open(CONF_FILE_NAME) as data_file:
data = json.load(data_file)
#vérification de l'unicité du nom
for element in data["FICHIERS"]:
if file_name == data["FICHIERS"][element]["NOM"]:
return -1
#on formate les paramètres du fichier en JSON
data["FICHIERS"][file_name] = {}
data["FICHIERS"][file_name]["NOM"] = file_name
data["FICHIERS"][file_name]["TYPE"] = file_type
data["FICHIERS"][file_name]["EQUIPEMENT"] = equipment_name
data["FICHIERS"][file_name]["PATH"] = file_path
#On modifie le fichier de configuration
with open(CONF_FILE_NAME, 'w') as data_file:
data = json.dump(data, data_file)
return 0
except:
return -1
'''
add_equipment_to_conf: ajoute un équipement dans le fichier de configuration
entrée: liste avec les paramètres de l'équipement [nom, IP, type, login, MDP]
sortie: 0 si OK, -1 si le nom existe déjà, -2 si autre erreur
'''
def add_equipment_to_conf(list_params_equipment):
equipment_name = list_params_equipment[0]
equipment_ip = list_params_equipment[1]
equipment_type = list_params_equipment[2]
equipment_login = list_params_equipment[3]
equipment_mdp = list_params_equipment[4]
try:
with open(CONF_FILE_NAME) as data_file:
data = json.load(data_file)
#vérification de l'unicité du nom
for element in data["EQUIPEMENTS"]:
if equipment_name == data["EQUIPEMENTS"][element]["NOM"]:
return -1
#on formate les paramètres du fichier en JSON
data["EQUIPEMENTS"][equipment_name] = {}
data["EQUIPEMENTS"][equipment_name]["NOM"] = equipment_name
data["EQUIPEMENTS"][equipment_name]["IP"] = equipment_ip
data["EQUIPEMENTS"][equipment_name]["TYPE"] = equipment_type
data["EQUIPEMENTS"][equipment_name]["LOGIN"] = equipment_login
data["EQUIPEMENTS"][equipment_name]["MDP"] = equipment_mdp
#On modifie le fichier de configuration
with open(CONF_FILE_NAME, 'w') as data_file:
data = json.dump(data, data_file)
return 0
except:
return -1
'''
check_list_equipment_valid: vérifie que la demande de création d'ajout d'un équipement est valide
entrée: liste de paramètres concernant l'équipement [nom, IP, type, login, MDP]
sortie: retourne 0 si l'équipement peut être ajouté
-1 si le nom de l'équipement n'est pas unique
-2 si l'IP fournie n'est pas valable
-3 si l'IP n'est pas unique
-4 si le type n'est pas "DB" (base de données), "S" (serveur), "R" (équipement réseau)
-5 si tous les champs ne sont pas remplis
'''
def check_list_equipment_valid(list_params_equipment):
equipment_name = list_params_equipment[0]
equipment_ip = list_params_equipment[1]
equipment_type = list_params_equipment[2]
equipment_login = list_params_equipment[3]
equipment_mdp = list_params_equipment[4]
#Vérification que tous les champs sont remplis
if equipment_name == "" or equipment_ip == "" or equipment_type == "" or equipment_login == "" or equipment_mdp == "":
return -5
#Ouverture du fichier de conf
with open(CONF_FILE_NAME) as data_file:
data = json.load(data_file)
#Vérification de l'unicité du nom
if equipment_name in data["EQUIPEMENTS"]:
return -1
#Vérification de la validité de l'IP
if misc.is_valid_ipv4_address(equipment_ip) == False:
return -2
#Vérification de l'unicité de l'IP dans le fichier de conf
for element in data["EQUIPEMENTS"]:
if equipment_ip in data["EQUIPEMENTS"][element]["IP"]:
return -3
#Vérification du type d'équipement
if equipment_type != "DB" and equipment_type != "S" and equipment_type != "R":
return -4
return 0
| mit |
naparuba/kunai | opsbro/misc/internalrsa/pyasn1/codec/ber/decoder.py | 185 | 36629 | # BER decoder
from pyasn1.type import tag, base, univ, char, useful, tagmap
from pyasn1.codec.ber import eoo
from pyasn1.compat.octets import oct2int, octs2ints, isOctetsType
from pyasn1 import debug, error
class AbstractDecoder:
protoComponent = None
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
raise error.PyAsn1Error('Decoder not implemented for %s' % (tagSet,))
def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
raise error.PyAsn1Error('Indefinite length mode decoder not implemented for %s' % (tagSet,))
class AbstractSimpleDecoder(AbstractDecoder):
tagFormats = (tag.tagFormatSimple,)
def _createComponent(self, asn1Spec, tagSet, value=None):
if tagSet[0][1] not in self.tagFormats:
raise error.PyAsn1Error('Invalid tag format %r for %r' % (tagSet[0], self.protoComponent,))
if asn1Spec is None:
return self.protoComponent.clone(value, tagSet)
elif value is None:
return asn1Spec
else:
return asn1Spec.clone(value)
class AbstractConstructedDecoder(AbstractDecoder):
tagFormats = (tag.tagFormatConstructed,)
def _createComponent(self, asn1Spec, tagSet, value=None):
if tagSet[0][1] not in self.tagFormats:
raise error.PyAsn1Error('Invalid tag format %r for %r' % (tagSet[0], self.protoComponent,))
if asn1Spec is None:
return self.protoComponent.clone(tagSet)
else:
return asn1Spec.clone()
class EndOfOctetsDecoder(AbstractSimpleDecoder):
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
return eoo.endOfOctets, substrate[length:]
class ExplicitTagDecoder(AbstractSimpleDecoder):
protoComponent = univ.Any('')
tagFormats = (tag.tagFormatConstructed,)
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
if substrateFun:
return substrateFun(
self._createComponent(asn1Spec, tagSet, ''),
substrate, length
)
head, tail = substrate[:length], substrate[length:]
value, _ = decodeFun(head, asn1Spec, tagSet, length)
return value, tail
def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
if substrateFun:
return substrateFun(
self._createComponent(asn1Spec, tagSet, ''),
substrate, length
)
value, substrate = decodeFun(substrate, asn1Spec, tagSet, length)
terminator, substrate = decodeFun(substrate)
if eoo.endOfOctets.isSameTypeWith(terminator) and \
terminator == eoo.endOfOctets:
return value, substrate
else:
raise error.PyAsn1Error('Missing end-of-octets terminator')
explicitTagDecoder = ExplicitTagDecoder()
class IntegerDecoder(AbstractSimpleDecoder):
protoComponent = univ.Integer(0)
precomputedValues = {
'\x00': 0,
'\x01': 1,
'\x02': 2,
'\x03': 3,
'\x04': 4,
'\x05': 5,
'\x06': 6,
'\x07': 7,
'\x08': 8,
'\x09': 9,
'\xff': -1,
'\xfe': -2,
'\xfd': -3,
'\xfc': -4,
'\xfb': -5
}
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet, length,
state, decodeFun, substrateFun):
head, tail = substrate[:length], substrate[length:]
if not head:
return self._createComponent(asn1Spec, tagSet, 0), tail
if head in self.precomputedValues:
value = self.precomputedValues[head]
else:
firstOctet = oct2int(head[0])
if firstOctet & 0x80:
value = -1
else:
value = 0
for octet in head:
value = value << 8 | oct2int(octet)
return self._createComponent(asn1Spec, tagSet, value), tail
class BooleanDecoder(IntegerDecoder):
protoComponent = univ.Boolean(0)
def _createComponent(self, asn1Spec, tagSet, value=None):
return IntegerDecoder._createComponent(self, asn1Spec, tagSet, value and 1 or 0)
class BitStringDecoder(AbstractSimpleDecoder):
protoComponent = univ.BitString(())
tagFormats = (tag.tagFormatSimple, tag.tagFormatConstructed)
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet, length,
state, decodeFun, substrateFun):
head, tail = substrate[:length], substrate[length:]
if tagSet[0][1] == tag.tagFormatSimple: # XXX what tag to check?
if not head:
raise error.PyAsn1Error('Empty substrate')
trailingBits = oct2int(head[0])
if trailingBits > 7:
raise error.PyAsn1Error(
'Trailing bits overflow %s' % trailingBits
)
head = head[1:]
lsb = p = 0; l = len(head)-1; b = ()
while p <= l:
if p == l:
lsb = trailingBits
j = 7
o = oct2int(head[p])
while j >= lsb:
b = b + ((o>>j)&0x01,)
j = j - 1
p = p + 1
return self._createComponent(asn1Spec, tagSet, b), tail
r = self._createComponent(asn1Spec, tagSet, ())
if substrateFun:
return substrateFun(r, substrate, length)
while head:
component, head = decodeFun(head)
r = r + component
return r, tail
def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
r = self._createComponent(asn1Spec, tagSet, '')
if substrateFun:
return substrateFun(r, substrate, length)
while substrate:
component, substrate = decodeFun(substrate)
if eoo.endOfOctets.isSameTypeWith(component) and \
component == eoo.endOfOctets:
break
r = r + component
else:
raise error.SubstrateUnderrunError(
'No EOO seen before substrate ends'
)
return r, substrate
class OctetStringDecoder(AbstractSimpleDecoder):
protoComponent = univ.OctetString('')
tagFormats = (tag.tagFormatSimple, tag.tagFormatConstructed)
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet, length,
state, decodeFun, substrateFun):
head, tail = substrate[:length], substrate[length:]
if tagSet[0][1] == tag.tagFormatSimple: # XXX what tag to check?
return self._createComponent(asn1Spec, tagSet, head), tail
r = self._createComponent(asn1Spec, tagSet, '')
if substrateFun:
return substrateFun(r, substrate, length)
while head:
component, head = decodeFun(head)
r = r + component
return r, tail
def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
r = self._createComponent(asn1Spec, tagSet, '')
if substrateFun:
return substrateFun(r, substrate, length)
while substrate:
component, substrate = decodeFun(substrate)
if eoo.endOfOctets.isSameTypeWith(component) and \
component == eoo.endOfOctets:
break
r = r + component
else:
raise error.SubstrateUnderrunError(
'No EOO seen before substrate ends'
)
return r, substrate
class NullDecoder(AbstractSimpleDecoder):
protoComponent = univ.Null('')
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
head, tail = substrate[:length], substrate[length:]
r = self._createComponent(asn1Spec, tagSet)
if head:
raise error.PyAsn1Error('Unexpected %d-octet substrate for Null' % length)
return r, tail
class ObjectIdentifierDecoder(AbstractSimpleDecoder):
protoComponent = univ.ObjectIdentifier(())
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet, length,
state, decodeFun, substrateFun):
head, tail = substrate[:length], substrate[length:]
if not head:
raise error.PyAsn1Error('Empty substrate')
# Get the first subid
subId = oct2int(head[0])
oid = divmod(subId, 40)
index = 1
substrateLen = len(head)
while index < substrateLen:
subId = oct2int(head[index])
index = index + 1
if subId == 128:
# ASN.1 spec forbids leading zeros (0x80) in sub-ID OID
# encoding, tolerating it opens a vulnerability.
# See http://www.cosic.esat.kuleuven.be/publications/article-1432.pdf page 7
raise error.PyAsn1Error('Invalid leading 0x80 in sub-OID')
elif subId > 128:
# Construct subid from a number of octets
nextSubId = subId
subId = 0
while nextSubId >= 128:
subId = (subId << 7) + (nextSubId & 0x7F)
if index >= substrateLen:
raise error.SubstrateUnderrunError(
'Short substrate for sub-OID past %s' % (oid,)
)
nextSubId = oct2int(head[index])
index = index + 1
subId = (subId << 7) + nextSubId
oid = oid + (subId,)
return self._createComponent(asn1Spec, tagSet, oid), tail
class RealDecoder(AbstractSimpleDecoder):
protoComponent = univ.Real()
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
head, tail = substrate[:length], substrate[length:]
if not head:
return self._createComponent(asn1Spec, tagSet, 0.0), tail
fo = oct2int(head[0]); head = head[1:]
if fo & 0x80: # binary enoding
n = (fo & 0x03) + 1
if n == 4:
n = oct2int(head[0])
eo, head = head[:n], head[n:]
if not eo or not head:
raise error.PyAsn1Error('Real exponent screwed')
e = oct2int(eo[0]) & 0x80 and -1 or 0
while eo: # exponent
e <<= 8
e |= oct2int(eo[0])
eo = eo[1:]
p = 0
while head: # value
p <<= 8
p |= oct2int(head[0])
head = head[1:]
if fo & 0x40: # sign bit
p = -p
value = (p, 2, e)
elif fo & 0x40: # infinite value
value = fo & 0x01 and '-inf' or 'inf'
elif fo & 0xc0 == 0: # character encoding
try:
if fo & 0x3 == 0x1: # NR1
value = (int(head), 10, 0)
elif fo & 0x3 == 0x2: # NR2
value = float(head)
elif fo & 0x3 == 0x3: # NR3
value = float(head)
else:
raise error.SubstrateUnderrunError(
'Unknown NR (tag %s)' % fo
)
except ValueError:
raise error.SubstrateUnderrunError(
'Bad character Real syntax'
)
else:
raise error.SubstrateUnderrunError(
'Unknown encoding (tag %s)' % fo
)
return self._createComponent(asn1Spec, tagSet, value), tail
class SequenceDecoder(AbstractConstructedDecoder):
protoComponent = univ.Sequence()
def _getComponentTagMap(self, r, idx):
try:
return r.getComponentTagMapNearPosition(idx)
except error.PyAsn1Error:
return
def _getComponentPositionByType(self, r, t, idx):
return r.getComponentPositionNearType(t, idx)
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
head, tail = substrate[:length], substrate[length:]
r = self._createComponent(asn1Spec, tagSet)
idx = 0
if substrateFun:
return substrateFun(r, substrate, length)
while head:
asn1Spec = self._getComponentTagMap(r, idx)
component, head = decodeFun(head, asn1Spec)
idx = self._getComponentPositionByType(
r, component.getEffectiveTagSet(), idx
)
r.setComponentByPosition(idx, component, asn1Spec is None)
idx = idx + 1
r.setDefaultComponents()
r.verifySizeSpec()
return r, tail
def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
r = self._createComponent(asn1Spec, tagSet)
if substrateFun:
return substrateFun(r, substrate, length)
idx = 0
while substrate:
asn1Spec = self._getComponentTagMap(r, idx)
component, substrate = decodeFun(substrate, asn1Spec)
if eoo.endOfOctets.isSameTypeWith(component) and \
component == eoo.endOfOctets:
break
idx = self._getComponentPositionByType(
r, component.getEffectiveTagSet(), idx
)
r.setComponentByPosition(idx, component, asn1Spec is None)
idx = idx + 1
else:
raise error.SubstrateUnderrunError(
'No EOO seen before substrate ends'
)
r.setDefaultComponents()
r.verifySizeSpec()
return r, substrate
class SequenceOfDecoder(AbstractConstructedDecoder):
protoComponent = univ.SequenceOf()
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
head, tail = substrate[:length], substrate[length:]
r = self._createComponent(asn1Spec, tagSet)
if substrateFun:
return substrateFun(r, substrate, length)
asn1Spec = r.getComponentType()
idx = 0
while head:
component, head = decodeFun(head, asn1Spec)
r.setComponentByPosition(idx, component, asn1Spec is None)
idx = idx + 1
r.verifySizeSpec()
return r, tail
def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
r = self._createComponent(asn1Spec, tagSet)
if substrateFun:
return substrateFun(r, substrate, length)
asn1Spec = r.getComponentType()
idx = 0
while substrate:
component, substrate = decodeFun(substrate, asn1Spec)
if eoo.endOfOctets.isSameTypeWith(component) and \
component == eoo.endOfOctets:
break
r.setComponentByPosition(idx, component, asn1Spec is None)
idx = idx + 1
else:
raise error.SubstrateUnderrunError(
'No EOO seen before substrate ends'
)
r.verifySizeSpec()
return r, substrate
class SetDecoder(SequenceDecoder):
protoComponent = univ.Set()
def _getComponentTagMap(self, r, idx):
return r.getComponentTagMap()
def _getComponentPositionByType(self, r, t, idx):
nextIdx = r.getComponentPositionByType(t)
if nextIdx is None:
return idx
else:
return nextIdx
class SetOfDecoder(SequenceOfDecoder):
protoComponent = univ.SetOf()
class ChoiceDecoder(AbstractConstructedDecoder):
protoComponent = univ.Choice()
tagFormats = (tag.tagFormatSimple, tag.tagFormatConstructed)
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
head, tail = substrate[:length], substrate[length:]
r = self._createComponent(asn1Spec, tagSet)
if substrateFun:
return substrateFun(r, substrate, length)
if r.getTagSet() == tagSet: # explicitly tagged Choice
component, head = decodeFun(
head, r.getComponentTagMap()
)
else:
component, head = decodeFun(
head, r.getComponentTagMap(), tagSet, length, state
)
if isinstance(component, univ.Choice):
effectiveTagSet = component.getEffectiveTagSet()
else:
effectiveTagSet = component.getTagSet()
r.setComponentByType(effectiveTagSet, component, 0, asn1Spec is None)
return r, tail
def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
r = self._createComponent(asn1Spec, tagSet)
if substrateFun:
return substrateFun(r, substrate, length)
if r.getTagSet() == tagSet: # explicitly tagged Choice
component, substrate = decodeFun(substrate, r.getComponentTagMap())
eooMarker, substrate = decodeFun(substrate) # eat up EOO marker
if not eoo.endOfOctets.isSameTypeWith(eooMarker) or \
eooMarker != eoo.endOfOctets:
raise error.PyAsn1Error('No EOO seen before substrate ends')
else:
component, substrate= decodeFun(
substrate, r.getComponentTagMap(), tagSet, length, state
)
if isinstance(component, univ.Choice):
effectiveTagSet = component.getEffectiveTagSet()
else:
effectiveTagSet = component.getTagSet()
r.setComponentByType(effectiveTagSet, component, 0, asn1Spec is None)
return r, substrate
class AnyDecoder(AbstractSimpleDecoder):
protoComponent = univ.Any()
tagFormats = (tag.tagFormatSimple, tag.tagFormatConstructed)
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
if asn1Spec is None or \
asn1Spec is not None and tagSet != asn1Spec.getTagSet():
# untagged Any container, recover inner header substrate
length = length + len(fullSubstrate) - len(substrate)
substrate = fullSubstrate
if substrateFun:
return substrateFun(self._createComponent(asn1Spec, tagSet),
substrate, length)
head, tail = substrate[:length], substrate[length:]
return self._createComponent(asn1Spec, tagSet, value=head), tail
def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
if asn1Spec is not None and tagSet == asn1Spec.getTagSet():
# tagged Any type -- consume header substrate
header = ''
else:
# untagged Any, recover header substrate
header = fullSubstrate[:-len(substrate)]
r = self._createComponent(asn1Spec, tagSet, header)
# Any components do not inherit initial tag
asn1Spec = self.protoComponent
if substrateFun:
return substrateFun(r, substrate, length)
while substrate:
component, substrate = decodeFun(substrate, asn1Spec)
if eoo.endOfOctets.isSameTypeWith(component) and \
component == eoo.endOfOctets:
break
r = r + component
else:
raise error.SubstrateUnderrunError(
'No EOO seen before substrate ends'
)
return r, substrate
# character string types
class UTF8StringDecoder(OctetStringDecoder):
protoComponent = char.UTF8String()
class NumericStringDecoder(OctetStringDecoder):
protoComponent = char.NumericString()
class PrintableStringDecoder(OctetStringDecoder):
protoComponent = char.PrintableString()
class TeletexStringDecoder(OctetStringDecoder):
protoComponent = char.TeletexString()
class VideotexStringDecoder(OctetStringDecoder):
protoComponent = char.VideotexString()
class IA5StringDecoder(OctetStringDecoder):
protoComponent = char.IA5String()
class GraphicStringDecoder(OctetStringDecoder):
protoComponent = char.GraphicString()
class VisibleStringDecoder(OctetStringDecoder):
protoComponent = char.VisibleString()
class GeneralStringDecoder(OctetStringDecoder):
protoComponent = char.GeneralString()
class UniversalStringDecoder(OctetStringDecoder):
protoComponent = char.UniversalString()
class BMPStringDecoder(OctetStringDecoder):
protoComponent = char.BMPString()
# "useful" types
class GeneralizedTimeDecoder(OctetStringDecoder):
protoComponent = useful.GeneralizedTime()
class UTCTimeDecoder(OctetStringDecoder):
protoComponent = useful.UTCTime()
tagMap = {
eoo.endOfOctets.tagSet: EndOfOctetsDecoder(),
univ.Integer.tagSet: IntegerDecoder(),
univ.Boolean.tagSet: BooleanDecoder(),
univ.BitString.tagSet: BitStringDecoder(),
univ.OctetString.tagSet: OctetStringDecoder(),
univ.Null.tagSet: NullDecoder(),
univ.ObjectIdentifier.tagSet: ObjectIdentifierDecoder(),
univ.Enumerated.tagSet: IntegerDecoder(),
univ.Real.tagSet: RealDecoder(),
univ.Sequence.tagSet: SequenceDecoder(), # conflicts with SequenceOf
univ.Set.tagSet: SetDecoder(), # conflicts with SetOf
univ.Choice.tagSet: ChoiceDecoder(), # conflicts with Any
# character string types
char.UTF8String.tagSet: UTF8StringDecoder(),
char.NumericString.tagSet: NumericStringDecoder(),
char.PrintableString.tagSet: PrintableStringDecoder(),
char.TeletexString.tagSet: TeletexStringDecoder(),
char.VideotexString.tagSet: VideotexStringDecoder(),
char.IA5String.tagSet: IA5StringDecoder(),
char.GraphicString.tagSet: GraphicStringDecoder(),
char.VisibleString.tagSet: VisibleStringDecoder(),
char.GeneralString.tagSet: GeneralStringDecoder(),
char.UniversalString.tagSet: UniversalStringDecoder(),
char.BMPString.tagSet: BMPStringDecoder(),
# useful types
useful.GeneralizedTime.tagSet: GeneralizedTimeDecoder(),
useful.UTCTime.tagSet: UTCTimeDecoder()
}
# Type-to-codec map for ambiguous ASN.1 types
typeMap = {
univ.Set.typeId: SetDecoder(),
univ.SetOf.typeId: SetOfDecoder(),
univ.Sequence.typeId: SequenceDecoder(),
univ.SequenceOf.typeId: SequenceOfDecoder(),
univ.Choice.typeId: ChoiceDecoder(),
univ.Any.typeId: AnyDecoder()
}
( stDecodeTag, stDecodeLength, stGetValueDecoder, stGetValueDecoderByAsn1Spec,
stGetValueDecoderByTag, stTryAsExplicitTag, stDecodeValue,
stDumpRawValue, stErrorCondition, stStop ) = [x for x in range(10)]
class Decoder:
defaultErrorState = stErrorCondition
# defaultErrorState = stDumpRawValue
defaultRawDecoder = AnyDecoder()
def __init__(self, tagMap, typeMap={}):
self.__tagMap = tagMap
self.__typeMap = typeMap
self.__endOfOctetsTagSet = eoo.endOfOctets.getTagSet()
# Tag & TagSet objects caches
self.__tagCache = {}
self.__tagSetCache = {}
def __call__(self, substrate, asn1Spec=None, tagSet=None,
length=None, state=stDecodeTag, recursiveFlag=1,
substrateFun=None):
if debug.logger & debug.flagDecoder:
debug.logger('decoder called at scope %s with state %d, working with up to %d octets of substrate: %s' % (debug.scope, state, len(substrate), debug.hexdump(substrate)))
fullSubstrate = substrate
while state != stStop:
if state == stDecodeTag:
# Decode tag
if not substrate:
raise error.SubstrateUnderrunError(
'Short octet stream on tag decoding'
)
if not isOctetsType(substrate) and \
not isinstance(substrate, univ.OctetString):
raise error.PyAsn1Error('Bad octet stream type')
firstOctet = substrate[0]
substrate = substrate[1:]
if firstOctet in self.__tagCache:
lastTag = self.__tagCache[firstOctet]
else:
t = oct2int(firstOctet)
tagClass = t&0xC0
tagFormat = t&0x20
tagId = t&0x1F
if tagId == 0x1F:
tagId = 0
while 1:
if not substrate:
raise error.SubstrateUnderrunError(
'Short octet stream on long tag decoding'
)
t = oct2int(substrate[0])
tagId = tagId << 7 | (t&0x7F)
substrate = substrate[1:]
if not t&0x80:
break
lastTag = tag.Tag(
tagClass=tagClass, tagFormat=tagFormat, tagId=tagId
)
if tagId < 31:
# cache short tags
self.__tagCache[firstOctet] = lastTag
if tagSet is None:
if firstOctet in self.__tagSetCache:
tagSet = self.__tagSetCache[firstOctet]
else:
# base tag not recovered
tagSet = tag.TagSet((), lastTag)
if firstOctet in self.__tagCache:
self.__tagSetCache[firstOctet] = tagSet
else:
tagSet = lastTag + tagSet
state = stDecodeLength
debug.logger and debug.logger & debug.flagDecoder and debug.logger('tag decoded into %r, decoding length' % tagSet)
if state == stDecodeLength:
# Decode length
if not substrate:
raise error.SubstrateUnderrunError(
'Short octet stream on length decoding'
)
firstOctet = oct2int(substrate[0])
if firstOctet == 128:
size = 1
length = -1
elif firstOctet < 128:
length, size = firstOctet, 1
else:
size = firstOctet & 0x7F
# encoded in size bytes
length = 0
lengthString = substrate[1:size+1]
# missing check on maximum size, which shouldn't be a
# problem, we can handle more than is possible
if len(lengthString) != size:
raise error.SubstrateUnderrunError(
'%s<%s at %s' %
(size, len(lengthString), tagSet)
)
for char in lengthString:
length = (length << 8) | oct2int(char)
size = size + 1
substrate = substrate[size:]
if length != -1 and len(substrate) < length:
raise error.SubstrateUnderrunError(
'%d-octet short' % (length - len(substrate))
)
state = stGetValueDecoder
debug.logger and debug.logger & debug.flagDecoder and debug.logger('value length decoded into %d, payload substrate is: %s' % (length, debug.hexdump(length == -1 and substrate or substrate[:length])))
if state == stGetValueDecoder:
if asn1Spec is None:
state = stGetValueDecoderByTag
else:
state = stGetValueDecoderByAsn1Spec
#
# There're two ways of creating subtypes in ASN.1 what influences
# decoder operation. These methods are:
# 1) Either base types used in or no IMPLICIT tagging has been
# applied on subtyping.
# 2) Subtype syntax drops base type information (by means of
# IMPLICIT tagging.
# The first case allows for complete tag recovery from substrate
# while the second one requires original ASN.1 type spec for
# decoding.
#
# In either case a set of tags (tagSet) is coming from substrate
# in an incremental, tag-by-tag fashion (this is the case of
# EXPLICIT tag which is most basic). Outermost tag comes first
# from the wire.
#
if state == stGetValueDecoderByTag:
if tagSet in self.__tagMap:
concreteDecoder = self.__tagMap[tagSet]
else:
concreteDecoder = None
if concreteDecoder:
state = stDecodeValue
else:
_k = tagSet[:1]
if _k in self.__tagMap:
concreteDecoder = self.__tagMap[_k]
else:
concreteDecoder = None
if concreteDecoder:
state = stDecodeValue
else:
state = stTryAsExplicitTag
if debug.logger and debug.logger & debug.flagDecoder:
debug.logger('codec %s chosen by a built-in type, decoding %s' % (concreteDecoder and concreteDecoder.__class__.__name__ or "<none>", state == stDecodeValue and 'value' or 'as explicit tag'))
debug.scope.push(concreteDecoder is None and '?' or concreteDecoder.protoComponent.__class__.__name__)
if state == stGetValueDecoderByAsn1Spec:
if isinstance(asn1Spec, (dict, tagmap.TagMap)):
if tagSet in asn1Spec:
__chosenSpec = asn1Spec[tagSet]
else:
__chosenSpec = None
if debug.logger and debug.logger & debug.flagDecoder:
debug.logger('candidate ASN.1 spec is a map of:')
for t, v in asn1Spec.getPosMap().items():
debug.logger(' %r -> %s' % (t, v.__class__.__name__))
if asn1Spec.getNegMap():
debug.logger('but neither of: ')
for i in asn1Spec.getNegMap().items():
debug.logger(' %r -> %s' % (t, v.__class__.__name__))
debug.logger('new candidate ASN.1 spec is %s, chosen by %r' % (__chosenSpec is None and '<none>' or __chosenSpec.__class__.__name__, tagSet))
else:
__chosenSpec = asn1Spec
debug.logger and debug.logger & debug.flagDecoder and debug.logger('candidate ASN.1 spec is %s' % asn1Spec.__class__.__name__)
if __chosenSpec is not None and (
tagSet == __chosenSpec.getTagSet() or \
tagSet in __chosenSpec.getTagMap()
):
# use base type for codec lookup to recover untagged types
baseTagSet = __chosenSpec.baseTagSet
if __chosenSpec.typeId is not None and \
__chosenSpec.typeId in self.__typeMap:
# ambiguous type
concreteDecoder = self.__typeMap[__chosenSpec.typeId]
debug.logger and debug.logger & debug.flagDecoder and debug.logger('value decoder chosen for an ambiguous type by type ID %s' % (__chosenSpec.typeId,))
elif baseTagSet in self.__tagMap:
# base type or tagged subtype
concreteDecoder = self.__tagMap[baseTagSet]
debug.logger and debug.logger & debug.flagDecoder and debug.logger('value decoder chosen by base %r' % (baseTagSet,))
else:
concreteDecoder = None
if concreteDecoder:
asn1Spec = __chosenSpec
state = stDecodeValue
else:
state = stTryAsExplicitTag
elif tagSet == self.__endOfOctetsTagSet:
concreteDecoder = self.__tagMap[tagSet]
state = stDecodeValue
debug.logger and debug.logger & debug.flagDecoder and debug.logger('end-of-octets found')
else:
concreteDecoder = None
state = stTryAsExplicitTag
if debug.logger and debug.logger & debug.flagDecoder:
debug.logger('codec %s chosen by ASN.1 spec, decoding %s' % (state == stDecodeValue and concreteDecoder.__class__.__name__ or "<none>", state == stDecodeValue and 'value' or 'as explicit tag'))
debug.scope.push(__chosenSpec is None and '?' or __chosenSpec.__class__.__name__)
if state == stTryAsExplicitTag:
if tagSet and \
tagSet[0][1] == tag.tagFormatConstructed and \
tagSet[0][0] != tag.tagClassUniversal:
# Assume explicit tagging
concreteDecoder = explicitTagDecoder
state = stDecodeValue
else:
concreteDecoder = None
state = self.defaultErrorState
debug.logger and debug.logger & debug.flagDecoder and debug.logger('codec %s chosen, decoding %s' % (concreteDecoder and concreteDecoder.__class__.__name__ or "<none>", state == stDecodeValue and 'value' or 'as failure'))
if state == stDumpRawValue:
concreteDecoder = self.defaultRawDecoder
debug.logger and debug.logger & debug.flagDecoder and debug.logger('codec %s chosen, decoding value' % concreteDecoder.__class__.__name__)
state = stDecodeValue
if state == stDecodeValue:
if recursiveFlag == 0 and not substrateFun: # legacy
substrateFun = lambda a,b,c: (a,b[:c])
if length == -1: # indef length
value, substrate = concreteDecoder.indefLenValueDecoder(
fullSubstrate, substrate, asn1Spec, tagSet, length,
stGetValueDecoder, self, substrateFun
)
else:
value, substrate = concreteDecoder.valueDecoder(
fullSubstrate, substrate, asn1Spec, tagSet, length,
stGetValueDecoder, self, substrateFun
)
state = stStop
debug.logger and debug.logger & debug.flagDecoder and debug.logger('codec %s yields type %s, value:\n%s\n...remaining substrate is: %s' % (concreteDecoder.__class__.__name__, value.__class__.__name__, value.prettyPrint(), substrate and debug.hexdump(substrate) or '<none>'))
if state == stErrorCondition:
raise error.PyAsn1Error(
'%r not in asn1Spec: %r' % (tagSet, asn1Spec)
)
if debug.logger and debug.logger & debug.flagDecoder:
debug.scope.pop()
debug.logger('decoder left scope %s, call completed' % debug.scope)
return value, substrate
decode = Decoder(tagMap, typeMap)
# XXX
# non-recursive decoding; return position rather than substrate
| mit |
onponomarev/ganeti | devel/cert_digest.py | 4 | 2074 | #!/usr/bin/python
# Copyright (C) 2015 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This is a test script to ease debugging of SSL problems. It can be
# applied on any of Ganeti's SSL certificates (for example client.pem
# and server.pem) and will output a digest.
import sys
import OpenSSL
def usage():
print "%s filename" % sys.argv[0]
print
print "'filename' must be a filename of an SSL certificate in PEM format."
if __name__ == "__main__":
if len(sys.argv) < 2:
usage()
cert_fd = open(sys.argv[1], "r")
cert_plain = cert_fd.read()
print "Certificate:"
print cert_plain
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
cert_plain)
print "Digest:"
print cert.digest("sha1")
| bsd-2-clause |
kubernetes-client/python | kubernetes/client/models/v1alpha1_webhook_throttle_config.py | 1 | 4435 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1alpha1WebhookThrottleConfig(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'burst': 'int',
'qps': 'int'
}
attribute_map = {
'burst': 'burst',
'qps': 'qps'
}
def __init__(self, burst=None, qps=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1WebhookThrottleConfig - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._burst = None
self._qps = None
self.discriminator = None
if burst is not None:
self.burst = burst
if qps is not None:
self.qps = qps
@property
def burst(self):
"""Gets the burst of this V1alpha1WebhookThrottleConfig. # noqa: E501
ThrottleBurst is the maximum number of events sent at the same moment default 15 QPS # noqa: E501
:return: The burst of this V1alpha1WebhookThrottleConfig. # noqa: E501
:rtype: int
"""
return self._burst
@burst.setter
def burst(self, burst):
"""Sets the burst of this V1alpha1WebhookThrottleConfig.
ThrottleBurst is the maximum number of events sent at the same moment default 15 QPS # noqa: E501
:param burst: The burst of this V1alpha1WebhookThrottleConfig. # noqa: E501
:type: int
"""
self._burst = burst
@property
def qps(self):
"""Gets the qps of this V1alpha1WebhookThrottleConfig. # noqa: E501
ThrottleQPS maximum number of batches per second default 10 QPS # noqa: E501
:return: The qps of this V1alpha1WebhookThrottleConfig. # noqa: E501
:rtype: int
"""
return self._qps
@qps.setter
def qps(self, qps):
"""Sets the qps of this V1alpha1WebhookThrottleConfig.
ThrottleQPS maximum number of batches per second default 10 QPS # noqa: E501
:param qps: The qps of this V1alpha1WebhookThrottleConfig. # noqa: E501
:type: int
"""
self._qps = qps
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1WebhookThrottleConfig):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1WebhookThrottleConfig):
return True
return self.to_dict() != other.to_dict()
| apache-2.0 |
tm011064/wrongnumberproject | proj.android/build_native.py | 124 | 1365 | #!/usr/bin/python
# build_native.py
# Build native codes
#
# Please use cocos console instead
import sys
import os, os.path
import shutil
from optparse import OptionParser
def build(build_mode):
current_dir = os.path.dirname(os.path.realpath(__file__))
cocos_root = os.path.join(current_dir, "../cocos2d")
app_android_root = os.path.join(current_dir, "../")
if build_mode is None:
build_mode = 'debug'
elif build_mode != 'release':
build_mode = 'debug'
command = 'cocos compile -p android -s %s -m %s' % (app_android_root, build_mode)
if os.system(command) != 0:
raise Exception("Build dynamic library for project [ " + app_android_root + " ] fails!")
# -------------- main --------------
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-n", "--ndk", dest="ndk_build_param", help='it is not used', action="append")
parser.add_option("-p", "--platform", dest="android_platform",
help='it is not used')
parser.add_option("-b", "--build", dest="build_mode",
help='the build mode for java project,debug[default] or release.Get more information,please refer to http://developer.android.com/tools/building/building-cmdline.html')
(opts, args) = parser.parse_args()
print "Please use cocos console instead.\n"
build(opts.build_mode)
| mit |
alexcrichton/gyp | test/product/gyptest-product.py | 290 | 1588 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simplest-possible build of a "Hello, world!" program
using the default build target.
"""
import TestGyp
# Android does not support setting the build directory.
test = TestGyp.TestGyp(formats=['!android'])
test.run_gyp('product.gyp')
test.build('product.gyp')
# executables
test.built_file_must_exist('alt1' + test._exe, test.EXECUTABLE, bare=True)
test.built_file_must_exist('hello2.stuff', test.EXECUTABLE, bare=True)
test.built_file_must_exist('yoalt3.stuff', test.EXECUTABLE, bare=True)
# shared libraries
test.built_file_must_exist(test.dll_ + 'alt4' + test._dll,
test.SHARED_LIB, bare=True)
test.built_file_must_exist(test.dll_ + 'hello5.stuff',
test.SHARED_LIB, bare=True)
test.built_file_must_exist('yoalt6.stuff', test.SHARED_LIB, bare=True)
# static libraries
test.built_file_must_exist(test.lib_ + 'alt7' + test._lib,
test.STATIC_LIB, bare=True)
test.built_file_must_exist(test.lib_ + 'hello8.stuff',
test.STATIC_LIB, bare=True)
test.built_file_must_exist('yoalt9.stuff', test.STATIC_LIB, bare=True)
# alternate product_dir
test.built_file_must_exist('bob/yoalt10.stuff', test.EXECUTABLE, bare=True)
test.built_file_must_exist('bob/yoalt11.stuff', test.EXECUTABLE, bare=True)
test.built_file_must_exist('bob/yoalt12.stuff', test.EXECUTABLE, bare=True)
test.pass_test()
| bsd-3-clause |
JCROM-Android/jcrom_external_chromium_org | tools/telemetry/telemetry/page/block_page_measurement_results_unittest.py | 30 | 1837 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import StringIO
import os
import unittest
from telemetry.page import block_page_measurement_results
from telemetry.page import page_set
BlockPageMeasurementResults = \
block_page_measurement_results.BlockPageMeasurementResults
def _MakePageSet():
return page_set.PageSet.FromDict({
"description": "hello",
"archive_path": "foo.wpr",
"pages": [
{"url": "http://www.foo.com/"},
{"url": "http://www.bar.com/"}
]
}, os.path.dirname(__file__))
class NonPrintingBlockPageMeasurementResults(BlockPageMeasurementResults):
def __init__(self, *args):
super(NonPrintingBlockPageMeasurementResults, self).__init__(*args)
def _PrintPerfResult(self, *args):
pass
class BlockPageMeasurementResultsTest(unittest.TestCase):
def setUp(self):
self._output = StringIO.StringIO()
self._page_set = _MakePageSet()
@property
def lines(self):
lines = StringIO.StringIO(self._output.getvalue()).readlines()
return [line.strip() for line in lines]
@property
def data(self):
return [line.split(': ', 1) for line in self.lines]
def test_with_output_after_every_page(self):
results = NonPrintingBlockPageMeasurementResults(self._output)
results.WillMeasurePage(self._page_set[0])
results.Add('foo', 'seconds', 3)
results.DidMeasurePage()
results.WillMeasurePage(self._page_set[1])
results.Add('bar', 'seconds', 4)
results.DidMeasurePage()
expected = [
['url', 'http://www.foo.com/'],
['foo (seconds)', '3'],
[''],
['url', 'http://www.bar.com/'],
['bar (seconds)', '4'],
['']
]
self.assertEquals(self.data, expected)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.