repo_name
stringlengths 5
104
| path
stringlengths 4
248
| content
stringlengths 102
99.9k
|
|---|---|---|
jumpstarter-io/horizon
|
openstack_dashboard/dashboards/admin/images/properties/forms.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from glanceclient import exc
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
def str2bool(value):
"""Convert a string value to boolean
"""
return value.lower() in ("yes", "true", "1")
# Mapping of property names to type, used for converting input string value
# before submitting.
PROPERTY_TYPES = {'min_disk': long, 'min_ram': long, 'protected': str2bool}
def convert_value(key, value):
"""Convert the property value to the proper type if necessary.
"""
_type = PROPERTY_TYPES.get(key)
if _type:
return _type(value)
return value
class CreateProperty(forms.SelfHandlingForm):
key = forms.CharField(max_length="255", label=_("Key"))
value = forms.CharField(label=_("Value"))
def handle(self, request, data):
try:
api.glance.image_update_properties(request,
self.initial['image_id'],
**{data['key']: convert_value(data['key'], data['value'])})
msg = _('Created custom property "%s".') % data['key']
messages.success(request, msg)
return True
except exc.HTTPForbidden:
msg = _('Unable to create image custom property. Property "%s" '
'is read only.') % data['key']
exceptions.handle(request, msg)
except exc.HTTPConflict:
msg = _('Unable to create image custom property. Property "%s" '
'already exists.') % data['key']
exceptions.handle(request, msg)
except Exception:
msg = _('Unable to create image custom '
'property "%s".') % data['key']
exceptions.handle(request, msg)
class EditProperty(forms.SelfHandlingForm):
key = forms.CharField(widget=forms.widgets.HiddenInput)
value = forms.CharField(label=_("Value"))
def handle(self, request, data):
try:
api.glance.image_update_properties(request,
self.initial['image_id'],
**{data['key']: convert_value(data['key'], data['value'])})
msg = _('Saved custom property "%s".') % data['key']
messages.success(request, msg)
return True
except exc.HTTPForbidden:
msg = _('Unable to edit image custom property. Property "%s" '
'is read only.') % data['key']
exceptions.handle(request, msg)
except Exception:
msg = _('Unable to edit image custom '
'property "%s".') % data['key']
exceptions.handle(request, msg)
|
NeCTAR-RC/horizon
|
openstack_dashboard/dashboards/admin/rbac_policies/views.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.rbac_policies \
import forms as rbac_policy_forms
from openstack_dashboard.dashboards.admin.rbac_policies \
import tables as rbac_policy_tables
from openstack_dashboard.dashboards.admin.rbac_policies \
import tabs as rbac_policy_tabs
class IndexView(tables.DataTableView):
table_class = rbac_policy_tables.RBACPoliciesTable
page_title = _("RBAC Policies")
@memoized.memoized_method
def _get_tenants(self):
try:
tenants, has_more = api.keystone.tenant_list(self.request)
except Exception:
tenants = []
msg = _("Unable to retrieve information about the "
"policies' projects.")
exceptions.handle(self.request, msg)
tenant_dict = OrderedDict([(t.id, t.name) for t in tenants])
return tenant_dict
def _get_networks(self):
try:
networks = api.neutron.network_list(self.request)
except Exception:
networks = []
msg = _("Unable to retrieve information about the "
"policies' networks.")
exceptions.handle(self.request, msg)
return dict((n.id, n.name) for n in networks)
def _get_qos_policies(self):
qos_policies = []
try:
if api.neutron.is_extension_supported(self.request,
extension_alias='qos'):
qos_policies = api.neutron.policy_list(self.request)
except Exception:
msg = _("Unable to retrieve information about the "
"policies' qos policies.")
exceptions.handle(self.request, msg)
return dict((q.id, q.name) for q in qos_policies)
def get_data(self):
try:
rbac_policies = api.neutron.rbac_policy_list(self.request)
except Exception:
rbac_policies = []
messages.error(self.request,
_("Unable to retrieve RBAC policies."))
if rbac_policies:
tenant_dict = self._get_tenants()
network_dict = self._get_networks()
qos_policy_dict = self._get_qos_policies()
for p in rbac_policies:
# Set tenant name and object name
p.tenant_name = tenant_dict.get(p.tenant_id, p.tenant_id)
p.target_tenant_name = tenant_dict.get(p.target_tenant,
p.target_tenant)
if p.object_type == "network":
p.object_name = network_dict.get(p.object_id, p.object_id)
elif p.object_type == "qos_policy":
p.object_name = qos_policy_dict.get(p.object_id,
p.object_id)
return rbac_policies
class CreateView(forms.ModalFormView):
template_name = 'admin/rbac_policies/create.html'
form_id = "create_rbac_policy_form"
form_class = rbac_policy_forms.CreatePolicyForm
submit_label = _("Create RBAC Policy")
submit_url = reverse_lazy("horizon:admin:rbac_policies:create")
success_url = reverse_lazy("horizon:admin:rbac_policies:index")
page_title = _("Create A RBAC Policy")
class UpdateView(forms.ModalFormView):
context_object_name = 'rbac_policies'
template_name = 'admin/rbac_policies/update.html'
form_class = rbac_policy_forms.UpdatePolicyForm
form_id = "update_rbac_policy_form"
submit_label = _("Save Changes")
submit_url = 'horizon:admin:rbac_policies:update'
success_url = reverse_lazy('horizon:admin:rbac_policies:index')
page_title = _("Update RBAC Policy")
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
args = (self.kwargs['rbac_policy_id'],)
context["rbac_policy_id"] = self.kwargs['rbac_policy_id']
context["submit_url"] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
rbac_policy_id = self.kwargs['rbac_policy_id']
try:
return api.neutron.rbac_policy_get(self.request, rbac_policy_id)
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve rbac policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
rbac_policy = self._get_object()
return {'rbac_policy_id': rbac_policy['id'],
'target_tenant': rbac_policy['target_tenant']}
class DetailView(tabs.TabView):
tab_group_class = rbac_policy_tabs.RBACDetailsTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ rbac_policy.id }}"
|
thomasem/nova
|
nova/objects/external_event.py
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.objects import base as obj_base
from nova.objects import fields
EVENT_NAMES = [
# Network has changed for this instance, rebuild info_cache
'network-changed',
# VIF plugging notifications, tag is port_id
'network-vif-plugged',
'network-vif-unplugged',
]
EVENT_STATUSES = ['failed', 'completed', 'in-progress']
# TODO(berrange): Remove NovaObjectDictCompat
class InstanceExternalEvent(obj_base.NovaObject,
obj_base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Supports network-changed and vif-plugged
VERSION = '1.0'
fields = {
'instance_uuid': fields.UUIDField(),
'name': fields.EnumField(valid_values=EVENT_NAMES),
'status': fields.StringField(),
'tag': fields.StringField(nullable=True),
'data': fields.DictOfStringsField(),
}
@staticmethod
def make_key(name, tag=None):
if tag is not None:
return '%s-%s' % (name, tag)
else:
return name
@property
def key(self):
return self.make_key(self.name, self.tag)
|
mrknow/filmkodi
|
plugin.video.fanfilm/resources/lib/resolvers/sawlive.py
|
# -*- coding: utf-8 -*-
'''
FanFilm Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,base64
from resources.lib.libraries import client
from resources.lib.libraries import jsunpack
def resolve(url):
try:
page = re.compile('//.+?/(?:embed|v)/([0-9a-zA-Z-_]+)').findall(url)[0]
page = 'http://sawlive.tv/embed/%s' % page
try: referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
except: referer = page
result = client.request(page, referer=referer)
unpacked = ''
packed = result.split('\n')
for i in packed:
try: unpacked += jsunpack.unpack(i)
except: pass
result += unpacked
result = urllib.unquote_plus(result)
result = re.compile('<iframe(.+?)</iframe>').findall(result)[-1]
url = re.compile('src\s*=\s*[\'|\"](.+?)[\'|\"].+?[\'|\"](.+?)[\'|\"]').findall(result)[0]
url = '/'.join(url)
result = client.request(url, referer=referer)
strm = re.compile("'streamer'.+?'(.+?)'").findall(result)[0]
file = re.compile("'file'.+?'(.+?)'").findall(result)[0]
swf = re.compile("SWFObject\('(.+?)'").findall(result)[0]
url = '%s playpath=%s swfUrl=%s pageUrl=%s live=1 timeout=30' % (strm, file, swf, url)
return url
except:
return
|
jcfr/Midas
|
modules/visualize/python/pvw-plugins/midasvr.py
|
import pwsimple
# Midas volume rendering ParaviewWeb plugin
# Initialize the volume rendering state
def InitViewState (cameraFocalPoint, cameraPosition, colorArrayName, colorMap, sofPoints, viewSize):
if type(colorArrayName) is unicode:
colorArrayName = colorArrayName.encode('ascii', 'ignore')
activeView = pwsimple.CreateIfNeededRenderView()
activeView.CameraFocalPoint = cameraFocalPoint
activeView.CameraPosition = cameraPosition
activeView.CameraViewUp = [0.0, 0.0, 1.0]
activeView.CameraParallelProjection = False
activeView.CenterOfRotation = activeView.CameraFocalPoint
activeView.Background = [0.0, 0.0, 0.0]
activeView.Background2 = [0.0, 0.0, 0.0]
activeView.ViewSize = viewSize
lookupTable = pwsimple.GetLookupTableForArray(colorArrayName, 1)
lookupTable.RGBPoints = colorMap
lookupTable.ScalarRangeInitialized = 1.0
lookupTable.ColorSpace = 0 # 0 corresponds to RGB
# Initial scalar opacity function
sof = pwsimple.CreatePiecewiseFunction()
sof.Points = sofPoints
dataRep = pwsimple.Show()
dataRep.ScalarOpacityFunction = sof
dataRep.Representation = 'Volume'
dataRep.ColorArrayName = colorArrayName
dataRep.LookupTable = lookupTable
retVal = {}
retVal['sof'] = sof
retVal['lookupTable'] = lookupTable
retVal['activeView'] = activeView
return retVal
# Extract a subgrid of the source
def ExtractSubgrid (source, bounds, lookupTable, sof, colorArrayName, toHide):
pwsimple.SetActiveSource(source)
subgrid = pwsimple.ExtractSubset()
subgrid.VOI = bounds
pwsimple.SetActiveSource(subgrid)
if type(colorArrayName) is unicode:
colorArrayName = colorArrayName.encode('ascii', 'ignore')
dataRep = pwsimple.Show()
dataRep.ScalarOpacityFunction = sof
dataRep.Representation = 'Volume'
dataRep.SelectionPointFieldDataArrayName = colorArrayName
dataRep.ColorArrayName = colorArrayName
dataRep.LookupTable = lookupTable
pwsimple.SetActiveSource(source)
pwsimple.Hide(source)
if toHide:
pwsimple.Hide(toHide)
pwsimple.SetActiveSource(subgrid)
return subgrid
|
PaddlePaddle/Paddle
|
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_group_norm.py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
class TrtConvertGroupNormTest(TrtLayerAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
return True
def sample_program_configs(self):
def generate_input(attrs: List[Dict[str, Any]], batch):
if attrs[0]['data_layout'] == 'NCHW':
return np.random.random([batch, 32, 64, 64]).astype(np.float32)
else:
return np.random.random([batch, 64, 64, 32]).astype(np.float32)
def generate_scale():
return np.random.randn(32).astype(np.float32)
def generate_bias():
return np.random.randn(32).astype(np.float32)
for batch in [1, 2, 4]:
for group in [1, 4, 32]:
for epsilon in [0.1, 0.7]:
for data_layout in ['NCHW', 'NHWC']:
for i in [0, 1]:
dics = [{
"epsilon": epsilon,
"groups": group,
"data_layout": data_layout
}, {
"groups": group,
"data_layout": data_layout
}]
ops_config = [{
"op_type": "group_norm",
"op_inputs": {
"X": ["input_data"],
"Scale": ["scale_weight"],
"Bias": ["bias_weight"]
},
"op_outputs": {
"Y": ["y_output"],
"Mean": ["mean_output"],
"Variance": ["variance_output"]
},
"op_attrs": dics[i]
}]
ops = self.generate_op_config(ops_config)
program_config = ProgramConfig(
ops=ops,
weights={
"scale_weight": TensorConfig(
data_gen=partial(generate_scale)),
"bias_weight": TensorConfig(
data_gen=partial(generate_bias))
},
inputs={
"input_data": TensorConfig(data_gen=partial(
generate_input, dics, batch))
},
outputs=["y_output"])
yield program_config
def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs):
self.dynamic_shape.min_input_shape = {"input_data": [1, 16, 32, 32]}
self.dynamic_shape.max_input_shape = {
"input_data": [4, 64, 128, 64]
}
self.dynamic_shape.opt_input_shape = {"input_data": [2, 32, 64, 64]}
def clear_dynamic_shape():
self.dynamic_shape.max_input_shape = {}
self.dynamic_shape.min_input_shape = {}
self.dynamic_shape.opt_input_shape = {}
def generate_trt_nodes_num(attrs, dynamic_shape):
if len(attrs[0]) == 3:
if dynamic_shape:
return 1, 2
else:
return 0, 3
else:
return 0, 3
attrs = [
program_config.ops[i].attrs
for i in range(len(program_config.ops))
]
# for static_shape
clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), (1e-5, 1e-5)
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), (1e-5, 1e-5)
# for dynamic_shape
generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), (1e-5, 1e-5)
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), (1e-5, 1e-5)
def add_skip_trt_case(self):
def teller1(program_config, predictor_config):
if len(self.dynamic_shape.min_input_shape) != 0:
return True
return False
self.add_skip_case(
teller1, SkipReasons.TRT_NOT_IMPLEMENTED,
"The goup_norm plugin will check dim not -1 failed when dynamic fp16 mode."
)
def test(self):
self.add_skip_trt_case()
self.run_test()
if __name__ == "__main__":
unittest.main()
|
eayunstack/python-neutronclient
|
neutronclient/tests/unit/qos/test_cli20_rule.py
|
# Copyright 2015 Huawei Technologies India Pvt Ltd.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from neutronclient.neutron.v2_0.qos import rule as qos_rule
from neutronclient.tests.unit import test_cli20
class CLITestV20QoSRuleJSON(test_cli20.CLITestV20Base):
non_admin_status_resources = ['bandwidth_limit_rule', 'dscp_marking_rule']
def setUp(self):
super(CLITestV20QoSRuleJSON, self).setUp()
def test_list_qos_rule_types(self):
# qos_rule_types.
resources = 'rule_types'
cmd_resources = 'qos_rule_types'
response_contents = [{'type': 'bandwidth_limit',
'type': 'dscp_marking'}]
cmd = qos_rule.ListQoSRuleTypes(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources(resources, cmd, True,
response_contents=response_contents,
cmd_resources=cmd_resources)
|
luci/luci-py
|
appengine/swarming/swarming_bot/swarmingserver_bot_fake.py
|
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import base64
import copy
import json
import os
import sys
import threading
BOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(
0,
os.path.join(os.path.dirname(BOT_DIR), '..', '..', '..', 'client', 'tests'))
import httpserver
sys.path.pop(0)
sys.path.insert(0, os.path.join(os.path.dirname(BOT_DIR), 'server'))
import bot_archive
sys.path.pop(0)
def gen_zip(url):
"""Returns swarming_bot.zip content."""
with open(os.path.join(BOT_DIR, 'config', 'bot_config.py'), 'rb') as f:
bot_config_content = f.read()
return bot_archive.get_swarming_bot_zip(
BOT_DIR, url, '1', {'config/bot_config.py': bot_config_content}, None)
def flatten_task_updates(updates):
"""Flatten a list of task updates into a single result.
This is more or less the equivalent of what task_scheduler.bot_update_task()
would do after all the bot API calls.
"""
out = {}
for update in updates:
if out.get('output') and update.get('output'):
# Accumulate output.
update = update.copy()
out['output'] = base64.b64encode(
base64.b64decode(out['output']) +
base64.b64decode(update.pop('output')))
update.pop('output_chunk_start')
out.update(update)
return out
class Handler(httpserver.Handler):
"""Minimal Swarming bot server fake implementation."""
def do_GET(self):
if self.path == '/swarming/api/v1/bot/server_ping':
self.send_response(200)
self.end_headers()
return None
if self.path == '/auth/api/v1/server/oauth_config':
return self.send_json({
'client_id': 'id',
'client_not_so_secret': 'hunter2',
'primary_url': self.server.url,
})
raise NotImplementedError(self.path)
def do_POST(self):
data = json.loads(self.read_body())
if self.path == '/auth/api/v1/accounts/self/xsrf_token':
return self.send_json({'xsrf_token': 'a'})
if self.path == '/swarming/api/v1/bot/event':
self.server.parent._add_bot_event(data)
return self.send_json({})
if self.path == '/swarming/api/v1/bot/handshake':
return self.send_json({'xsrf_token': 'fine'})
if self.path == '/swarming/api/v1/bot/poll':
self.server.parent.has_polled.set()
return self.send_json({'cmd': 'sleep', 'duration': 60})
if self.path.startswith('/swarming/api/v1/bot/task_update/'):
task_id = self.path[len('/swarming/api/v1/bot/task_update/'):]
must_stop = self.server.parent._on_task_update(task_id, data)
return self.send_json({'ok': True, 'must_stop': must_stop})
if self.path.startswith('/swarming/api/v1/bot/task_error'):
task_id = self.path[len('/swarming/api/v1/bot/task_error/'):]
self.server.parent._add_task_error(task_id, data)
return self.send_json({'resp': 1})
raise NotImplementedError(self.path)
def do_PUT(self):
raise NotImplementedError(self.path)
class Server(httpserver.Server):
"""Fake a Swarming bot API server for local testing."""
_HANDLER_CLS = Handler
def __init__(self):
super(Server, self).__init__()
self._lock = threading.Lock()
# Accumulated bot events.
self._bot_events = []
# Running tasks.
self._tasks = {}
# Bot reported task errors.
self._task_errors = {}
self.has_polled = threading.Event()
self.has_updated_task = threading.Event()
self.must_stop = False
def get_bot_events(self):
"""Returns the events reported by the bots."""
with self._lock:
return self._bot_events[:]
def get_tasks(self):
"""Returns the tasks run by the bots."""
with self._lock:
return copy.deepcopy(self._tasks)
def get_task_errors(self):
"""Returns the task errors reported by the bots."""
with self._lock:
return self._task_errors.copy()
def _add_bot_event(self, data):
# Used by the handler.
with self._lock:
self._bot_events.append(data)
def _on_task_update(self, task_id, data):
with self._lock:
self._tasks.setdefault(task_id, []).append(data)
must_stop = self.must_stop
self.has_updated_task.set()
return must_stop
def _add_task_error(self, task_id, data):
# Used by the handler.
with self._lock:
self._task_errors.setdefault(task_id, []).append(data)
|
hehongliang/tensorflow
|
tensorflow/python/kernel_tests/signal/mfcc_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for mfcc_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import spectral_ops_test_util
from tensorflow.python.ops.signal import mfcc_ops
from tensorflow.python.platform import test
# TODO(rjryan): We have no open source tests for MFCCs at the moment. Internally
# at Google, this code is tested against a reference implementation that follows
# HTK conventions.
class MFCCTest(test.TestCase):
def test_error(self):
# num_mel_bins must be positive.
with self.assertRaises(ValueError):
signal = array_ops.zeros((2, 3, 0))
mfcc_ops.mfccs_from_log_mel_spectrograms(signal)
# signal must be float32
with self.assertRaises(ValueError):
signal = array_ops.zeros((2, 3, 5), dtype=dtypes.float64)
mfcc_ops.mfccs_from_log_mel_spectrograms(signal)
def test_basic(self):
"""A basic test that the op runs on random input."""
with spectral_ops_test_util.fft_kernel_label_map():
with self.session(use_gpu=True):
signal = random_ops.random_normal((2, 3, 5))
mfcc_ops.mfccs_from_log_mel_spectrograms(signal).eval()
def test_unknown_shape(self):
"""A test that the op runs when shape and rank are unknown."""
with spectral_ops_test_util.fft_kernel_label_map():
with self.session(use_gpu=True):
signal = array_ops.placeholder_with_default(
random_ops.random_normal((2, 3, 5)), tensor_shape.TensorShape(None))
self.assertIsNone(signal.shape.ndims)
mfcc_ops.mfccs_from_log_mel_spectrograms(signal).eval()
if __name__ == "__main__":
test.main()
|
yufeldman/arrow
|
python/pyarrow/tests/test_types.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pickle
import pytest
import pyarrow as pa
import pyarrow.types as types
MANY_TYPES = [
pa.null(),
pa.bool_(),
pa.int32(),
pa.time32('s'),
pa.time64('us'),
pa.date32(),
pa.timestamp('us'),
pa.timestamp('us', tz='UTC'),
pa.timestamp('us', tz='Europe/Paris'),
pa.float16(),
pa.float32(),
pa.float64(),
pa.decimal128(19, 4),
pa.string(),
pa.binary(),
pa.binary(10),
pa.list_(pa.int32()),
pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.int8()),
pa.field('c', pa.string())]),
pa.union([pa.field('a', pa.binary(10)),
pa.field('b', pa.string())], mode=pa.lib.UnionMode_DENSE),
pa.union([pa.field('a', pa.binary(10)),
pa.field('b', pa.string())], mode=pa.lib.UnionMode_SPARSE),
# XXX Needs array pickling
# pa.dictionary(pa.int32(), pa.array(['a', 'b', 'c'])),
]
def test_is_boolean():
assert types.is_boolean(pa.bool_())
assert not types.is_boolean(pa.int8())
def test_is_integer():
signed_ints = [pa.int8(), pa.int16(), pa.int32(), pa.int64()]
unsigned_ints = [pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()]
for t in signed_ints + unsigned_ints:
assert types.is_integer(t)
for t in signed_ints:
assert types.is_signed_integer(t)
assert not types.is_unsigned_integer(t)
for t in unsigned_ints:
assert types.is_unsigned_integer(t)
assert not types.is_signed_integer(t)
assert not types.is_integer(pa.float32())
assert not types.is_signed_integer(pa.float32())
def test_is_floating():
for t in [pa.float16(), pa.float32(), pa.float64()]:
assert types.is_floating(t)
assert not types.is_floating(pa.int32())
def test_is_null():
assert types.is_null(pa.null())
assert not types.is_null(pa.list_(pa.int32()))
def test_is_decimal():
assert types.is_decimal(pa.decimal128(19, 4))
assert not types.is_decimal(pa.int32())
def test_is_list():
assert types.is_list(pa.list_(pa.int32()))
assert not types.is_list(pa.int32())
def test_is_dictionary():
assert types.is_dictionary(
pa.dictionary(pa.int32(),
pa.array(['a', 'b', 'c'])))
assert not types.is_dictionary(pa.int32())
def test_is_nested_or_struct():
struct_ex = pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.int8()),
pa.field('c', pa.string())])
assert types.is_struct(struct_ex)
assert not types.is_struct(pa.list_(pa.int32()))
assert types.is_nested(struct_ex)
assert types.is_nested(pa.list_(pa.int32()))
assert not types.is_nested(pa.int32())
def test_is_union():
for mode in [pa.lib.UnionMode_SPARSE, pa.lib.UnionMode_DENSE]:
assert types.is_union(pa.union([pa.field('a', pa.int32()),
pa.field('b', pa.int8()),
pa.field('c', pa.string())],
mode=mode))
assert not types.is_union(pa.list_(pa.int32()))
# TODO(wesm): is_map, once implemented
def test_is_binary_string():
assert types.is_binary(pa.binary())
assert not types.is_binary(pa.string())
assert types.is_string(pa.string())
assert types.is_unicode(pa.string())
assert not types.is_string(pa.binary())
assert types.is_fixed_size_binary(pa.binary(5))
assert not types.is_fixed_size_binary(pa.binary())
def test_is_temporal_date_time_timestamp():
date_types = [pa.date32(), pa.date64()]
time_types = [pa.time32('s'), pa.time64('ns')]
timestamp_types = [pa.timestamp('ms')]
for case in date_types + time_types + timestamp_types:
assert types.is_temporal(case)
for case in date_types:
assert types.is_date(case)
assert not types.is_time(case)
assert not types.is_timestamp(case)
for case in time_types:
assert types.is_time(case)
assert not types.is_date(case)
assert not types.is_timestamp(case)
for case in timestamp_types:
assert types.is_timestamp(case)
assert not types.is_date(case)
assert not types.is_time(case)
assert not types.is_temporal(pa.int32())
def test_timestamp_type():
# See ARROW-1683
assert isinstance(pa.timestamp('ns'), pa.TimestampType)
def test_union_type():
def check_fields(ty, fields):
assert ty.num_children == len(fields)
assert [ty[i] for i in range(ty.num_children)] == fields
fields = [pa.field('x', pa.list_(pa.int32())),
pa.field('y', pa.binary())]
for mode in ('sparse', pa.lib.UnionMode_SPARSE):
ty = pa.union(fields, mode=mode)
assert ty.mode == 'sparse'
check_fields(ty, fields)
for mode in ('dense', pa.lib.UnionMode_DENSE):
ty = pa.union(fields, mode=mode)
assert ty.mode == 'dense'
check_fields(ty, fields)
for mode in ('unknown', 2):
with pytest.raises(ValueError, match='Invalid union mode'):
pa.union(fields, mode=mode)
def test_types_hashable():
in_dict = {}
for i, type_ in enumerate(MANY_TYPES):
assert hash(type_) == hash(type_)
in_dict[type_] = i
assert in_dict[type_] == i
assert len(in_dict) == len(MANY_TYPES)
def test_types_picklable():
for ty in MANY_TYPES:
data = pickle.dumps(ty)
assert pickle.loads(data) == ty
@pytest.mark.parametrize('t,check_func', [
(pa.date32(), types.is_date32),
(pa.date64(), types.is_date64),
(pa.time32('s'), types.is_time32),
(pa.time64('ns'), types.is_time64),
(pa.int8(), types.is_int8),
(pa.int16(), types.is_int16),
(pa.int32(), types.is_int32),
(pa.int64(), types.is_int64),
(pa.uint8(), types.is_uint8),
(pa.uint16(), types.is_uint16),
(pa.uint32(), types.is_uint32),
(pa.uint64(), types.is_uint64),
(pa.float16(), types.is_float16),
(pa.float32(), types.is_float32),
(pa.float64(), types.is_float64)
])
def test_exact_primitive_types(t, check_func):
assert check_func(t)
def test_fixed_size_binary_byte_width():
ty = pa.binary(5)
assert ty.byte_width == 5
def test_decimal_byte_width():
ty = pa.decimal128(19, 4)
assert ty.byte_width == 16
|
phonnz/azure-storage-python
|
tests/test_storage_queue.py
|
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import sys
import datetime
import unittest
from requests import Session
from azure.storage import (
AccessPolicy,
SharedAccessPolicy,
SignedIdentifier,
SignedIdentifiers,
)
from azure.storage.queue import (
QueueService,
QueueSharedAccessPermissions,
)
from azure.common import (
AzureHttpError,
AzureConflictHttpError,
AzureMissingResourceHttpError,
)
from tests.common_recordingtestcase import (
TestMode,
record,
)
from tests.storage_testcase import StorageTestCase
#------------------------------------------------------------------------------
TEST_QUEUE_PREFIX = 'mytestqueue'
#------------------------------------------------------------------------------
class StorageQueueTest(StorageTestCase):
def setUp(self):
super(StorageQueueTest, self).setUp()
self.qs = self._create_storage_service(QueueService, self.settings)
self.test_queues = []
self.creatable_queues = []
for i in range(10):
self.test_queues.append(self.get_resource_name(TEST_QUEUE_PREFIX + str(i)))
for i in range(4):
self.creatable_queues.append(
self.get_resource_name('mycreatablequeue' + str(i)))
if not self.is_playback():
for queue_name in self.test_queues:
self.qs.create_queue(queue_name)
def tearDown(self):
if not self.is_playback():
for queue_name in self.test_queues:
try:
self.qs.delete_queue(queue_name)
except:
pass
for queue_name in self.creatable_queues:
try:
self.qs.delete_queue(queue_name)
except:
pass
return super(StorageQueueTest, self).tearDown()
def _get_shared_access_policy(self, permission):
date_format = "%Y-%m-%dT%H:%M:%SZ"
start = datetime.datetime.utcnow() - datetime.timedelta(minutes=1)
expiry = start + datetime.timedelta(hours=1)
return SharedAccessPolicy(
AccessPolicy(
start.strftime(date_format),
expiry.strftime(date_format),
permission
)
)
@record
def test_get_service_properties(self):
# This api doesn't apply to local storage
if self.qs.use_local_storage:
return
# Action
properties = self.qs.get_queue_service_properties()
# Asserts
self.assertIsNotNone(properties)
self.assertIsNotNone(properties.logging)
self.assertIsNotNone(properties.logging.retention_policy)
self.assertIsNotNone(properties.logging.version)
self.assertIsNotNone(properties.hour_metrics)
self.assertIsNotNone(properties.hour_metrics.retention_policy)
self.assertIsNotNone(properties.hour_metrics.version)
self.assertIsNotNone(properties.minute_metrics)
self.assertIsNotNone(properties.minute_metrics.retention_policy)
self.assertIsNotNone(properties.minute_metrics.version)
@record
def test_set_service_properties(self):
# This api doesn't apply to local storage
if self.qs.use_local_storage:
return
# Action
queue_properties = self.qs.get_queue_service_properties()
queue_properties.logging.read = True
self.qs.set_queue_service_properties(queue_properties)
properties = self.qs.get_queue_service_properties()
# Asserts
self.assertIsNotNone(properties)
self.assertIsNotNone(properties.logging)
self.assertIsNotNone(properties.logging.retention_policy)
self.assertIsNotNone(properties.logging.version)
self.assertIsNotNone(properties.hour_metrics)
self.assertIsNotNone(properties.hour_metrics.retention_policy)
self.assertIsNotNone(properties.hour_metrics.version)
self.assertIsNotNone(properties.minute_metrics)
self.assertIsNotNone(properties.minute_metrics.retention_policy)
self.assertIsNotNone(properties.minute_metrics.version)
self.assertTrue(properties.logging.read)
@record
def test_create_queue(self):
# Action
self.qs.create_queue(self.creatable_queues[0])
result = self.qs.get_queue_metadata(self.creatable_queues[0])
self.qs.delete_queue(self.creatable_queues[0])
# Asserts
self.assertIsNotNone(result)
self.assertEqual(result['x-ms-approximate-messages-count'], '0')
@record
def test_create_queue_already_exist(self):
# Action
created1 = self.qs.create_queue(self.creatable_queues[0])
created2 = self.qs.create_queue(self.creatable_queues[0])
# Asserts
self.assertTrue(created1)
self.assertFalse(created2)
@record
def test_create_queue_fail_on_exist(self):
# Action
created = self.qs.create_queue(self.creatable_queues[0], None, True)
with self.assertRaises(AzureConflictHttpError):
self.qs.create_queue(self.creatable_queues[0], None, True)
# Asserts
self.assertTrue(created)
@record
def test_create_queue_with_options(self):
# Action
self.qs.create_queue(
self.creatable_queues[1],
x_ms_meta_name_values={'val1': 'test', 'val2': 'blah'})
result = self.qs.get_queue_metadata(self.creatable_queues[1])
# Asserts
self.assertIsNotNone(result)
self.assertEqual(3, len(result))
self.assertEqual(result['x-ms-approximate-messages-count'], '0')
self.assertEqual('test', result['x-ms-meta-val1'])
self.assertEqual('blah', result['x-ms-meta-val2'])
@record
def test_delete_queue_not_exist(self):
# Action
deleted = self.qs.delete_queue(self.creatable_queues[0])
# Asserts
self.assertFalse(deleted)
@record
def test_delete_queue_fail_not_exist_not_exist(self):
# Action
with self.assertRaises(AzureMissingResourceHttpError):
self.qs.delete_queue(self.creatable_queues[0], True)
# Asserts
@record
def test_delete_queue_fail_not_exist_already_exist(self):
# Action
created = self.qs.create_queue(self.creatable_queues[0])
deleted = self.qs.delete_queue(self.creatable_queues[0], True)
# Asserts
self.assertTrue(created)
self.assertTrue(deleted)
@record
def test_list_queues(self):
# Action
queues = self.qs.list_queues()
for queue in queues:
pass
# Asserts
self.assertIsNotNone(queues)
self.assertEqual('', queues.marker)
self.assertEqual(0, queues.max_results)
self.assertTrue(len(self.test_queues) <= len(queues))
@record
def test_list_queues_with_options(self):
# Action
queues_1 = self.qs.list_queues(prefix=TEST_QUEUE_PREFIX, maxresults=3)
queues_2 = self.qs.list_queues(
prefix=TEST_QUEUE_PREFIX,
marker=queues_1.next_marker,
include='metadata')
# Asserts
self.assertIsNotNone(queues_1)
self.assertEqual(3, len(queues_1))
self.assertEqual(3, queues_1.max_results)
self.assertEqual('', queues_1.marker)
self.assertIsNotNone(queues_1[0])
self.assertIsNone(queues_1[0].metadata)
self.assertNotEqual('', queues_1[0].name)
# Asserts
self.assertIsNotNone(queues_2)
self.assertTrue(len(self.test_queues) - 3 <= len(queues_2))
self.assertEqual(0, queues_2.max_results)
self.assertEqual(queues_1.next_marker, queues_2.marker)
self.assertIsNotNone(queues_2[0])
self.assertIsNotNone(queues_2[0].metadata)
self.assertNotEqual('', queues_2[0].name)
@record
def test_set_queue_metadata(self):
# Action
self.qs.create_queue(self.creatable_queues[2])
self.qs.set_queue_metadata(
self.creatable_queues[2],
x_ms_meta_name_values={'val1': 'test', 'val2': 'blah'})
result = self.qs.get_queue_metadata(self.creatable_queues[2])
self.qs.delete_queue(self.creatable_queues[2])
# Asserts
self.assertIsNotNone(result)
self.assertEqual(3, len(result))
self.assertEqual('0', result['x-ms-approximate-messages-count'])
self.assertEqual('test', result['x-ms-meta-val1'])
self.assertEqual('blah', result['x-ms-meta-val2'])
@record
def test_put_message(self):
# Action. No exception means pass. No asserts needed.
self.qs.put_message(self.test_queues[0], 'message1')
self.qs.put_message(self.test_queues[0], 'message2')
self.qs.put_message(self.test_queues[0], 'message3')
self.qs.put_message(self.test_queues[0], 'message4')
@record
def test_get_messages(self):
# Action
self.qs.put_message(self.test_queues[1], 'message1')
self.qs.put_message(self.test_queues[1], 'message2')
self.qs.put_message(self.test_queues[1], 'message3')
self.qs.put_message(self.test_queues[1], 'message4')
result = self.qs.get_messages(self.test_queues[1])
# Asserts
self.assertIsNotNone(result)
self.assertEqual(1, len(result))
message = result[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertEqual('message1', message.message_text)
self.assertNotEqual('', message.pop_receipt)
self.assertEqual('1', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertNotEqual('', message.time_next_visible)
@record
def test_get_messages_with_options(self):
# Action
self.qs.put_message(self.test_queues[2], 'message1')
self.qs.put_message(self.test_queues[2], 'message2')
self.qs.put_message(self.test_queues[2], 'message3')
self.qs.put_message(self.test_queues[2], 'message4')
result = self.qs.get_messages(
self.test_queues[2], numofmessages=4, visibilitytimeout=20)
# Asserts
self.assertIsNotNone(result)
self.assertEqual(4, len(result))
for message in result:
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertNotEqual('', message.message_text)
self.assertNotEqual('', message.pop_receipt)
self.assertEqual('1', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertNotEqual('', message.time_next_visible)
@record
def test_peek_messages(self):
# Action
self.qs.put_message(self.test_queues[3], 'message1')
self.qs.put_message(self.test_queues[3], 'message2')
self.qs.put_message(self.test_queues[3], 'message3')
self.qs.put_message(self.test_queues[3], 'message4')
result = self.qs.peek_messages(self.test_queues[3])
# Asserts
self.assertIsNotNone(result)
self.assertEqual(1, len(result))
message = result[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertNotEqual('', message.message_text)
self.assertEqual('', message.pop_receipt)
self.assertEqual('0', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertEqual('', message.time_next_visible)
@record
def test_peek_messages_with_options(self):
# Action
self.qs.put_message(self.test_queues[4], 'message1')
self.qs.put_message(self.test_queues[4], 'message2')
self.qs.put_message(self.test_queues[4], 'message3')
self.qs.put_message(self.test_queues[4], 'message4')
result = self.qs.peek_messages(self.test_queues[4], numofmessages=4)
# Asserts
self.assertIsNotNone(result)
self.assertEqual(4, len(result))
for message in result:
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertNotEqual('', message.message_text)
self.assertEqual('', message.pop_receipt)
self.assertEqual('0', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertEqual('', message.time_next_visible)
@record
def test_clear_messages(self):
# Action
self.qs.put_message(self.test_queues[5], 'message1')
self.qs.put_message(self.test_queues[5], 'message2')
self.qs.put_message(self.test_queues[5], 'message3')
self.qs.put_message(self.test_queues[5], 'message4')
self.qs.clear_messages(self.test_queues[5])
result = self.qs.peek_messages(self.test_queues[5])
# Asserts
self.assertIsNotNone(result)
self.assertEqual(0, len(result))
@record
def test_delete_message(self):
# Action
self.qs.put_message(self.test_queues[6], 'message1')
self.qs.put_message(self.test_queues[6], 'message2')
self.qs.put_message(self.test_queues[6], 'message3')
self.qs.put_message(self.test_queues[6], 'message4')
result = self.qs.get_messages(self.test_queues[6])
self.qs.delete_message(
self.test_queues[6], result[0].message_id, result[0].pop_receipt)
result2 = self.qs.get_messages(self.test_queues[6], numofmessages=32)
# Asserts
self.assertIsNotNone(result2)
self.assertEqual(3, len(result2))
@record
def test_update_message(self):
# Action
self.qs.put_message(self.test_queues[7], 'message1')
list_result1 = self.qs.get_messages(self.test_queues[7])
self.qs.update_message(self.test_queues[7],
list_result1[0].message_id,
'new text',
list_result1[0].pop_receipt,
visibilitytimeout=0)
list_result2 = self.qs.get_messages(self.test_queues[7])
# Asserts
self.assertIsNotNone(list_result2)
message = list_result2[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertEqual('new text', message.message_text)
self.assertNotEqual('', message.pop_receipt)
self.assertEqual('2', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertNotEqual('', message.time_next_visible)
def test_sas_read(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recordingfile(self.test_mode):
return
# Arrange
self.qs.put_message(self.test_queues[0], 'message1')
token = self.qs.generate_shared_access_signature(
self.test_queues[0],
self._get_shared_access_policy(QueueSharedAccessPermissions.READ),
)
# Act
service = QueueService(
account_name=self.settings.STORAGE_ACCOUNT_NAME,
sas_token=token,
)
self._set_service_options(service, self.settings)
result = service.peek_messages(self.test_queues[0])
# Assert
self.assertIsNotNone(result)
self.assertEqual(1, len(result))
message = result[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertEqual('message1', message.message_text)
def test_sas_add(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recordingfile(self.test_mode):
return
# Arrange
token = self.qs.generate_shared_access_signature(
self.test_queues[0],
self._get_shared_access_policy(QueueSharedAccessPermissions.ADD),
)
# Act
service = QueueService(
account_name=self.settings.STORAGE_ACCOUNT_NAME,
sas_token=token,
)
self._set_service_options(service, self.settings)
result = service.put_message(self.test_queues[0], 'addedmessage')
# Assert
result = self.qs.get_messages(self.test_queues[0])
self.assertEqual('addedmessage', result[0].message_text)
def test_sas_update(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recordingfile(self.test_mode):
return
# Arrange
self.qs.put_message(self.test_queues[0], 'message1')
token = self.qs.generate_shared_access_signature(
self.test_queues[0],
self._get_shared_access_policy(QueueSharedAccessPermissions.UPDATE),
)
result = self.qs.get_messages(self.test_queues[0])
# Act
service = QueueService(
account_name=self.settings.STORAGE_ACCOUNT_NAME,
sas_token=token,
)
self._set_service_options(service, self.settings)
service.update_message(
self.test_queues[0],
result[0].message_id,
'updatedmessage1',
result[0].pop_receipt,
visibilitytimeout=0,
)
# Assert
result = self.qs.get_messages(self.test_queues[0])
self.assertEqual('updatedmessage1', result[0].message_text)
def test_sas_process(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recordingfile(self.test_mode):
return
# Arrange
self.qs.put_message(self.test_queues[0], 'message1')
token = self.qs.generate_shared_access_signature(
self.test_queues[0],
self._get_shared_access_policy(QueueSharedAccessPermissions.PROCESS),
)
# Act
service = QueueService(
account_name=self.settings.STORAGE_ACCOUNT_NAME,
sas_token=token,
)
self._set_service_options(service, self.settings)
result = service.get_messages(self.test_queues[0])
# Assert
self.assertIsNotNone(result)
self.assertEqual(1, len(result))
message = result[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertEqual('message1', message.message_text)
def test_sas_signed_identifier(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recordingfile(self.test_mode):
return
# Arrange
si = SignedIdentifier()
si.id = 'testid'
si.access_policy.start = '2011-10-11'
si.access_policy.expiry = '2018-10-12'
si.access_policy.permission = QueueSharedAccessPermissions.READ
identifiers = SignedIdentifiers()
identifiers.signed_identifiers.append(si)
resp = self.qs.set_queue_acl(self.test_queues[0], identifiers)
self.qs.put_message(self.test_queues[0], 'message1')
token = self.qs.generate_shared_access_signature(
self.test_queues[0],
SharedAccessPolicy(signed_identifier=si.id),
)
# Act
service = QueueService(
account_name=self.settings.STORAGE_ACCOUNT_NAME,
sas_token=token,
)
self._set_service_options(service, self.settings)
result = service.peek_messages(self.test_queues[0])
# Assert
self.assertIsNotNone(result)
self.assertEqual(1, len(result))
message = result[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertEqual('message1', message.message_text)
@record
def test_get_queue_acl(self):
# Arrange
# Act
acl = self.qs.get_queue_acl(self.test_queues[0])
# Assert
self.assertIsNotNone(acl)
self.assertEqual(len(acl.signed_identifiers), 0)
@record
def test_get_queue_acl_iter(self):
# Arrange
# Act
acl = self.qs.get_queue_acl(self.test_queues[0])
for signed_identifier in acl:
pass
# Assert
self.assertIsNotNone(acl)
self.assertEqual(len(acl.signed_identifiers), 0)
self.assertEqual(len(acl), 0)
@record
def test_get_queue_acl_with_non_existing_queue(self):
# Arrange
# Act
with self.assertRaises(AzureMissingResourceHttpError):
self.qs.get_queue_acl(self.creatable_queues[0])
# Assert
@record
def test_set_queue_acl(self):
# Arrange
# Act
resp = self.qs.set_queue_acl(self.test_queues[0])
# Assert
self.assertIsNone(resp)
acl = self.qs.get_queue_acl(self.test_queues[0])
self.assertIsNotNone(acl)
@record
def test_set_queue_acl_with_empty_signed_identifiers(self):
# Arrange
# Act
identifiers = SignedIdentifiers()
resp = self.qs.set_queue_acl(self.test_queues[0], identifiers)
# Assert
self.assertIsNone(resp)
acl = self.qs.get_queue_acl(self.test_queues[0])
self.assertIsNotNone(acl)
self.assertEqual(len(acl.signed_identifiers), 0)
@record
def test_set_queue_acl_with_signed_identifiers(self):
# Arrange
# Act
si = SignedIdentifier()
si.id = 'testid'
si.access_policy.start = '2011-10-11'
si.access_policy.expiry = '2011-10-12'
si.access_policy.permission = QueueSharedAccessPermissions.READ
identifiers = SignedIdentifiers()
identifiers.signed_identifiers.append(si)
resp = self.qs.set_queue_acl(self.test_queues[0], identifiers)
# Assert
self.assertIsNone(resp)
acl = self.qs.get_queue_acl(self.test_queues[0])
self.assertIsNotNone(acl)
self.assertEqual(len(acl.signed_identifiers), 1)
self.assertEqual(len(acl), 1)
self.assertEqual(acl.signed_identifiers[0].id, 'testid')
self.assertEqual(acl[0].id, 'testid')
@record
def test_set_queue_acl_with_non_existing_queue(self):
# Arrange
# Act
with self.assertRaises(AzureMissingResourceHttpError):
self.qs.set_queue_acl(self.creatable_queues[0])
# Assert
@record
def test_with_filter(self):
# Single filter
called = []
def my_filter(request, next):
called.append(True)
return next(request)
qc = self.qs.with_filter(my_filter)
qc.put_message(self.test_queues[7], 'message1')
self.assertTrue(called)
del called[:]
# Chained filters
def filter_a(request, next):
called.append('a')
return next(request)
def filter_b(request, next):
called.append('b')
return next(request)
qc = self.qs.with_filter(filter_a).with_filter(filter_b)
qc.put_message(self.test_queues[7], 'message1')
self.assertEqual(called, ['b', 'a'])
@record
def test_unicode_create_queue_unicode_name(self):
# Action
self.creatable_queues[0] = u'啊齄丂狛狜'
with self.assertRaises(AzureHttpError):
# not supported - queue name must be alphanumeric, lowercase
self.qs.create_queue(self.creatable_queues[0])
# Asserts
@record
def test_unicode_get_messages_unicode_data(self):
# Action
self.qs.put_message(self.test_queues[1], u'message1㚈')
result = self.qs.get_messages(self.test_queues[1])
# Asserts
self.assertIsNotNone(result)
self.assertEqual(1, len(result))
message = result[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertEqual(u'message1㚈', message.message_text)
self.assertNotEqual('', message.pop_receipt)
self.assertEqual('1', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertNotEqual('', message.time_next_visible)
@record
def test_unicode_update_message_unicode_data(self):
# Action
self.qs.put_message(self.test_queues[7], 'message1')
list_result1 = self.qs.get_messages(self.test_queues[7])
self.qs.update_message(self.test_queues[7],
list_result1[0].message_id,
u'啊齄丂狛狜',
list_result1[0].pop_receipt,
visibilitytimeout=0)
list_result2 = self.qs.get_messages(self.test_queues[7])
# Asserts
self.assertIsNotNone(list_result2)
message = list_result2[0]
self.assertIsNotNone(message)
self.assertNotEqual('', message.message_id)
self.assertEqual(u'啊齄丂狛狜', message.message_text)
self.assertNotEqual('', message.pop_receipt)
self.assertEqual('2', message.dequeue_count)
self.assertNotEqual('', message.insertion_time)
self.assertNotEqual('', message.expiration_time)
self.assertNotEqual('', message.time_next_visible)
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
krocat/ToonHA
|
toon/switch.py
|
"""
Support for Eneco Slimmer stekkers (Smart Plugs).
This provides controls for the z-wave smart plugs Toon can control.
"""
import logging
from homeassistant.components.switch import SwitchDevice
import custom_components.toon as toon_main
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup discovered Smart Plugs."""
_toon_main = hass.data[toon_main.TOON_HANDLE]
switch_items = []
for plug in _toon_main.toon.smartplugs:
switch_items.append(EnecoSmartPlug(hass, plug))
add_devices_callback(switch_items)
class EnecoSmartPlug(SwitchDevice):
"""Representation of a Smart Plug."""
def __init__(self, hass, plug):
"""Initialize the Smart Plug."""
self.smartplug = plug
self.toon_data_store = hass.data[toon_main.TOON_HANDLE]
@property
def should_poll(self):
"""No polling needed with subscriptions."""
return True
@property
def unique_id(self):
"""Return the ID of this switch."""
return self.smartplug.device_uuid
@property
def name(self):
"""Return the name of the switch if any."""
return self.smartplug.name
@property
def current_power_w(self):
"""Current power usage in W."""
return self.toon_data_store.get_data('current_power', self.name)
@property
def today_energy_kwh(self):
"""Today total energy usage in kWh."""
return self.toon_data_store.get_data('today_energy', self.name)
@property
def is_on(self):
"""Return true if switch is on. Standby is on."""
return self.toon_data_store.get_data('current_state', self.name)
@property
def available(self):
"""True if switch is available."""
return self.smartplug.can_toggle
def turn_on(self, **kwargs):
"""Turn the switch on."""
return self.smartplug.turn_on()
def turn_off(self):
"""Turn the switch off."""
return self.smartplug.turn_off()
def update(self):
"""Update state."""
self.toon_data_store.update()
|
quattor/aquilon
|
lib/aquilon/worker/commands/unbind_client_cluster.py
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq unbind client --cluster`."""
from aquilon.aqdb.model import Cluster
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.commands.unbind_client_hostname import \
CommandUnbindClientHostname
class CommandUnbindClientCluster(CommandUnbindClientHostname):
required_parameters = ["cluster", "service"]
def get_dbobj(self, session, cluster=None, **_):
return Cluster.get_unique(session, cluster, compel=True)
|
riscmaster/risc_maap
|
risc_control/src/IRIS_DF_Controller.py
|
#!/usr/bin/env python
'''======================================================
Created by: D. Spencer Maughan
Last updated: May 2015
File name: IRIS_DF_Controller.py
Organization: RISC Lab, Utah State University
Notes:
======================================================'''
import roslib; roslib.load_manifest('risc_msgs')
import rospy
from math import *
import numpy as np
import time
#=======================#
# Messages Needed #
#=======================#
from risc_msgs.msg import *
from std_msgs.msg import Bool
from roscopter.msg import Status
#=====================#
# Gain Matrices #
#=====================#
K = np.matrix([[ 1.8, 0, 0, 1.4, 0, 0, 0],\
[ 0, 1.8, 0, 0, 1.4, 0, 0],\
[ 0, 0, 3, 0, 0, 5, 0],\
[ 0, 0, 0, 0, 0, 0,.5]])
#========================#
# Globals #
#========================#
nominal_thrust = 0 # thrust necessary to maintain hover given battery level
phi_scale = 3.053261127645355
phi_trim = 0.0#0.058941904209906
theta_scale = 3.815398742249453
theta_trim = 0.0#-0.091216767651723
ctrl_status = False
states = Cortex()
states.Obj = [States()]*1
traj = Trajectories()
traj.Obj = [Trajectory()]*1
euler_max = 45*np.pi/180
max_yaw_rate = .3490659 #in radians/sec
rate = 45 # Hz
image = 0
start_time = 0
#==================#
# Publishers #
#==================#
pub_ctrl = rospy.Publisher('/controls', Controls, queue_size = 1)
#========================#
# Get Cortex States #
#========================#
def GetStates(S):
global states
states = S
#=====================#
# Get Trajectory #
#=====================#
def GetTraj(S):
global traj
traj = S
#=========================#
# Get Battery Status #
#=========================#
def GetBatt(S):
global nominal_thrust
B = S.battery_remaining
# coefficients for fourth order fit
# determined 11 May 2015 by Spencer Maughan and Ishmaal Erekson
c0 = 0.491674747062374
c1 = -0.024809293286468
c2 = 0.000662710609466
c3 = -0.000008160593348
c4 = 0.000000033699651
nominal_thrust = c0+c1*B+c2*B**2+c3*B**3+c4*B**4
#============================#
# Get Controller Status #
#============================#
def GetStatus(S):
global ctrl_status
ctrl_status = S.data
#========================#
# Basic Controller #
#========================#
def Basic_Controller():
global states, euler_max, max_yaw_rate, pub_ctrl,K,traj
Ctrl = Controls()
Ctrl.Obj = [Control()]*1
Ctrl.header.stamp = states.header.stamp
g = 9.80665 # average value of earth's gravitational constant m/s^2
m = 1.282 # IRIS mass in kg
#===================================#
# Get State Trajectory Errors #
#===================================#
if states.Obj[0].visible:
X = np.asmatrix(np.zeros((7,1)))
X[0] = traj.Obj[0].x-states.Obj[0].x
X[1] = traj.Obj[0].y-states.Obj[0].y
X[2] = traj.Obj[0].z-states.Obj[0].z
X[3] = traj.Obj[0].xdot-states.Obj[0].u
X[4] = traj.Obj[0].ydot-states.Obj[0].v
X[5] = traj.Obj[0].zdot-states.Obj[0].w
X[6] = traj.Obj[0].psi-states.Obj[0].psi*np.pi/180
#============================================#
# Differential Flatness Control Input #
#============================================#
# LQR input
utilde = -K*X
# required input
u_r = np.asmatrix(np.zeros((4,1)))
u = utilde+u_r-np.matrix([[0],[0],[9.81],[0]])
#==================================#
# Rotate to Vehicle 1 Frame #
#==================================#
psi = states.Obj[0].psi*np.pi/180
rotZ = np.matrix([[cos(psi), sin(psi), 0],[-sin(psi), cos(psi), 0],[0, 0, 1]])
Cart = np.matrix([[1, 0, 0],[0, -1, 0],[0, 0, -1]])
u[:-1] = Cart*rotZ*u[:-1]
#===================================#
# Normalize given the Thrust #
#===================================#
T = sqrt(u[0:3].T*u[0:3])
u[:-1] = np.divide(u[:-1],-T)
#==================#
# Set Controls #
#==================#
# Controls for Ardrone
# -phi = right... +phi = left
# -theta = back... +theta = forward
# -psi = right... +psi = left
global phi_trim,theta_trim,phi_scale,theta_scale
phi_d = (asin(u[1,-1]))
theta_d = (-asin(u[0,-1]))
ctrl = Control()
ctrl.name = states.Obj[0].name
ctrl.phi = phi_trim + phi_scale*phi_d
ctrl.theta = theta_trim + theta_scale*theta_d
ctrl.psi = -u[3,-1]/max_yaw_rate
global nominal_thrust
T_d = nominal_thrust+(T-g)/g
ctrl.T = T_d
Ctrl.Obj[0] = ctrl
Ctrl.header = states.header
#rospy.loginfo("latency = %f",states.header.stamp.to_sec()-rospy.get_time())
pub_ctrl.publish(Ctrl)
#===================#
# Main #
#===================#
if __name__=='__main__':
import sys
rospy.init_node('IRIS_DF_Controller')
#=====================================#
# Set up Publish/Subscribe Loop #
#=====================================#
r = rospy.Rate(rate)
while not rospy.is_shutdown():
sub_cortex = rospy.Subscriber('/cortex_raw' , Cortex, GetStates, queue_size=1, buff_size=2**24)
sub_traj = rospy.Subscriber('/trajectory' , Trajectories, GetTraj, queue_size=1, buff_size=2**24)
sub_Batt = rospy.Subscriber('/apm/status' , Status, GetBatt)
sub_status = rospy.Subscriber('/controller_status' , Bool, GetStatus)
Basic_Controller()
r.sleep()
|
sklam/numba
|
numba/core/imputils.py
|
"""
Utilities to simplify the boilerplate for native lowering.
"""
import collections
import contextlib
import inspect
import functools
from enum import Enum
from numba.core import typing, types, utils, cgutils
from numba.core.typing.templates import BaseRegistryLoader
class Registry(object):
"""
A registry of function and attribute implementations.
"""
def __init__(self):
self.functions = []
self.getattrs = []
self.setattrs = []
self.casts = []
self.constants = []
def lower(self, func, *argtys):
"""
Decorate an implementation of *func* for the given argument types.
*func* may be an actual global function object, or any
pseudo-function supported by Numba, such as "getitem".
The decorated implementation has the signature
(context, builder, sig, args).
"""
def decorate(impl):
self.functions.append((impl, func, argtys))
return impl
return decorate
def _decorate_attr(self, impl, ty, attr, impl_list, decorator):
real_impl = decorator(impl, ty, attr)
impl_list.append((real_impl, attr, real_impl.signature))
return impl
def lower_getattr(self, ty, attr):
"""
Decorate an implementation of __getattr__ for type *ty* and
the attribute *attr*.
The decorated implementation will have the signature
(context, builder, typ, val).
"""
def decorate(impl):
return self._decorate_attr(impl, ty, attr, self.getattrs,
_decorate_getattr)
return decorate
def lower_getattr_generic(self, ty):
"""
Decorate the fallback implementation of __getattr__ for type *ty*.
The decorated implementation will have the signature
(context, builder, typ, val, attr). The implementation is
called for attributes which haven't been explicitly registered
with lower_getattr().
"""
return self.lower_getattr(ty, None)
def lower_setattr(self, ty, attr):
"""
Decorate an implementation of __setattr__ for type *ty* and
the attribute *attr*.
The decorated implementation will have the signature
(context, builder, sig, args).
"""
def decorate(impl):
return self._decorate_attr(impl, ty, attr, self.setattrs,
_decorate_setattr)
return decorate
def lower_setattr_generic(self, ty):
"""
Decorate the fallback implementation of __setattr__ for type *ty*.
The decorated implementation will have the signature
(context, builder, sig, args, attr). The implementation is
called for attributes which haven't been explicitly registered
with lower_setattr().
"""
return self.lower_setattr(ty, None)
def lower_cast(self, fromty, toty):
"""
Decorate the implementation of implicit conversion between
*fromty* and *toty*.
The decorated implementation will have the signature
(context, builder, fromty, toty, val).
"""
def decorate(impl):
self.casts.append((impl, (fromty, toty)))
return impl
return decorate
def lower_constant(self, ty):
"""
Decorate the implementation for creating a constant of type *ty*.
The decorated implementation will have the signature
(context, builder, ty, pyval).
"""
def decorate(impl):
self.constants.append((impl, (ty,)))
return impl
return decorate
class RegistryLoader(BaseRegistryLoader):
"""
An incremental loader for a target registry.
"""
registry_items = ('functions', 'getattrs', 'setattrs', 'casts', 'constants')
# Global registry for implementations of builtin operations
# (functions, attributes, type casts)
builtin_registry = Registry()
lower_builtin = builtin_registry.lower
lower_getattr = builtin_registry.lower_getattr
lower_getattr_generic = builtin_registry.lower_getattr_generic
lower_setattr = builtin_registry.lower_setattr
lower_setattr_generic = builtin_registry.lower_setattr_generic
lower_cast = builtin_registry.lower_cast
lower_constant = builtin_registry.lower_constant
def _decorate_getattr(impl, ty, attr):
real_impl = impl
if attr is not None:
def res(context, builder, typ, value, attr):
return real_impl(context, builder, typ, value)
else:
def res(context, builder, typ, value, attr):
return real_impl(context, builder, typ, value, attr)
res.signature = (ty,)
res.attr = attr
return res
def _decorate_setattr(impl, ty, attr):
real_impl = impl
if attr is not None:
def res(context, builder, sig, args, attr):
return real_impl(context, builder, sig, args)
else:
def res(context, builder, sig, args, attr):
return real_impl(context, builder, sig, args, attr)
res.signature = (ty, types.Any)
res.attr = attr
return res
def fix_returning_optional(context, builder, sig, status, retval):
# Reconstruct optional return type
if isinstance(sig.return_type, types.Optional):
value_type = sig.return_type.type
optional_none = context.make_optional_none(builder, value_type)
retvalptr = cgutils.alloca_once_value(builder, optional_none)
with builder.if_then(builder.not_(status.is_none)):
optional_value = context.make_optional_value(
builder, value_type, retval,
)
builder.store(optional_value, retvalptr)
retval = builder.load(retvalptr)
return retval
def user_function(fndesc, libs):
"""
A wrapper inserting code calling Numba-compiled *fndesc*.
"""
def imp(context, builder, sig, args):
func = context.declare_function(builder.module, fndesc)
# env=None assumes this is a nopython function
status, retval = context.call_conv.call_function(
builder, func, fndesc.restype, fndesc.argtypes, args)
with cgutils.if_unlikely(builder, status.is_error):
context.call_conv.return_status_propagate(builder, status)
assert sig.return_type == fndesc.restype
# Reconstruct optional return type
retval = fix_returning_optional(context, builder, sig, status, retval)
# If the data representations don't match up
if retval.type != context.get_value_type(sig.return_type):
msg = "function returned {0} but expect {1}"
raise TypeError(msg.format(retval.type, sig.return_type))
return impl_ret_new_ref(context, builder, fndesc.restype, retval)
imp.signature = fndesc.argtypes
imp.libs = tuple(libs)
return imp
def user_generator(gendesc, libs):
"""
A wrapper inserting code calling Numba-compiled *gendesc*.
"""
def imp(context, builder, sig, args):
func = context.declare_function(builder.module, gendesc)
# env=None assumes this is a nopython function
status, retval = context.call_conv.call_function(
builder, func, gendesc.restype, gendesc.argtypes, args)
# Return raw status for caller to process StopIteration
return status, retval
imp.libs = tuple(libs)
return imp
def iterator_impl(iterable_type, iterator_type):
"""
Decorator a given class as implementing *iterator_type*
(by providing an `iternext()` method).
"""
def wrapper(cls):
# These are unbound methods
iternext = cls.iternext
@iternext_impl(RefType.BORROWED)
def iternext_wrapper(context, builder, sig, args, result):
(value,) = args
iterobj = cls(context, builder, value)
return iternext(iterobj, context, builder, result)
lower_builtin('iternext', iterator_type)(iternext_wrapper)
return cls
return wrapper
class _IternextResult(object):
"""
A result wrapper for iteration, passed by iternext_impl() into the
wrapped function.
"""
__slots__ = ('_context', '_builder', '_pairobj')
def __init__(self, context, builder, pairobj):
self._context = context
self._builder = builder
self._pairobj = pairobj
def set_exhausted(self):
"""
Mark the iterator as exhausted.
"""
self._pairobj.second = self._context.get_constant(types.boolean, False)
def set_valid(self, is_valid=True):
"""
Mark the iterator as valid according to *is_valid* (which must
be either a Python boolean or a LLVM inst).
"""
if is_valid in (False, True):
is_valid = self._context.get_constant(types.boolean, is_valid)
self._pairobj.second = is_valid
def yield_(self, value):
"""
Mark the iterator as yielding the given *value* (a LLVM inst).
"""
self._pairobj.first = value
def is_valid(self):
"""
Return whether the iterator is marked valid.
"""
return self._context.get_argument_value(self._builder,
types.boolean,
self._pairobj.second)
def yielded_value(self):
"""
Return the iterator's yielded value, if any.
"""
return self._pairobj.first
class RefType(Enum):
"""
Enumerate the reference type
"""
"""
A new reference
"""
NEW = 1
"""
A borrowed reference
"""
BORROWED = 2
"""
An untracked reference
"""
UNTRACKED = 3
def iternext_impl(ref_type=None):
"""
Wrap the given iternext() implementation so that it gets passed
an _IternextResult() object easing the returning of the iternext()
result pair.
ref_type: a numba.targets.imputils.RefType value, the reference type used is
that specified through the RefType enum.
The wrapped function will be called with the following signature:
(context, builder, sig, args, iternext_result)
"""
if ref_type not in [x for x in RefType]:
raise ValueError("ref_type must be an enum member of imputils.RefType")
def outer(func):
def wrapper(context, builder, sig, args):
pair_type = sig.return_type
pairobj = context.make_helper(builder, pair_type)
func(context, builder, sig, args,
_IternextResult(context, builder, pairobj))
if ref_type == RefType.NEW:
impl_ret = impl_ret_new_ref
elif ref_type == RefType.BORROWED:
impl_ret = impl_ret_borrowed
elif ref_type == RefType.UNTRACKED:
impl_ret = impl_ret_untracked
else:
raise ValueError("Unknown ref_type encountered")
return impl_ret(context, builder,
pair_type, pairobj._getvalue())
return wrapper
return outer
def call_getiter(context, builder, iterable_type, val):
"""
Call the `getiter()` implementation for the given *iterable_type*
of value *val*, and return the corresponding LLVM inst.
"""
getiter_sig = typing.signature(iterable_type.iterator_type, iterable_type)
getiter_impl = context.get_function('getiter', getiter_sig)
return getiter_impl(builder, (val,))
def call_iternext(context, builder, iterator_type, val):
"""
Call the `iternext()` implementation for the given *iterator_type*
of value *val*, and return a convenience _IternextResult() object
reflecting the results.
"""
itemty = iterator_type.yield_type
pair_type = types.Pair(itemty, types.boolean)
iternext_sig = typing.signature(pair_type, iterator_type)
iternext_impl = context.get_function('iternext', iternext_sig)
val = iternext_impl(builder, (val,))
pairobj = context.make_helper(builder, pair_type, val)
return _IternextResult(context, builder, pairobj)
def call_len(context, builder, ty, val):
"""
Call len() on the given value. Return None if len() isn't defined on
this type.
"""
try:
len_impl = context.get_function(len, typing.signature(types.intp, ty,))
except NotImplementedError:
return None
else:
return len_impl(builder, (val,))
_ForIterLoop = collections.namedtuple('_ForIterLoop',
('value', 'do_break'))
@contextlib.contextmanager
def for_iter(context, builder, iterable_type, val):
"""
Simulate a for loop on the given iterable. Yields a namedtuple with
the given members:
- `value` is the value being yielded
- `do_break` is a callable to early out of the loop
"""
iterator_type = iterable_type.iterator_type
iterval = call_getiter(context, builder, iterable_type, val)
bb_body = builder.append_basic_block('for_iter.body')
bb_end = builder.append_basic_block('for_iter.end')
def do_break():
builder.branch(bb_end)
builder.branch(bb_body)
with builder.goto_block(bb_body):
res = call_iternext(context, builder, iterator_type, iterval)
with builder.if_then(builder.not_(res.is_valid()), likely=False):
builder.branch(bb_end)
yield _ForIterLoop(res.yielded_value(), do_break)
builder.branch(bb_body)
builder.position_at_end(bb_end)
if context.enable_nrt:
context.nrt.decref(builder, iterator_type, iterval)
def impl_ret_new_ref(ctx, builder, retty, ret):
"""
The implementation returns a new reference.
"""
return ret
def impl_ret_borrowed(ctx, builder, retty, ret):
"""
The implementation returns a borrowed reference.
This function automatically incref so that the implementation is
returning a new reference.
"""
if ctx.enable_nrt:
ctx.nrt.incref(builder, retty, ret)
return ret
def impl_ret_untracked(ctx, builder, retty, ret):
"""
The return type is not a NRT object.
"""
return ret
@contextlib.contextmanager
def force_error_model(context, model_name='numpy'):
"""
Temporarily change the context's error model.
"""
from numba.core import callconv
old_error_model = context.error_model
context.error_model = callconv.create_error_model(model_name, context)
try:
yield
finally:
context.error_model = old_error_model
def numba_typeref_ctor(*args, **kwargs):
"""A stub for use internally by Numba when a call is emitted
on a TypeRef.
"""
raise NotImplementedError("This function should not be executed.")
|
sjsrey/pysal_core
|
pysal_core/io/IOHandlers/tests/test_gwt.py
|
import unittest
from ..gwt import GwtIO
from ...FileIO import FileIO as psopen
from .... import examples as pysal_examples
import tempfile
import os
import warnings
class test_GwtIO(unittest.TestCase):
def setUp(self):
self.test_file = test_file = pysal_examples.get_path('juvenile.gwt')
self.obj = GwtIO(test_file, 'r')
def test_close(self):
f = self.obj
f.close()
self.failUnlessRaises(ValueError, f.read)
def test_read(self):
w = self.obj.read()
self.assertEqual(168, w.n)
self.assertEqual(16.678571428571427, w.mean_neighbors)
w.transform = 'B'
self.assertEqual([1.0], w[1].values())
def test_seek(self):
self.test_read()
self.failUnlessRaises(StopIteration, self.obj.read)
self.obj.seek(0)
self.test_read()
# Commented out by CRS, GWT 'w' mode removed until we can find a good solution for retaining distances.
# see issue #153.
# Added back by CRS,
def test_write(self):
w = self.obj.read()
f = tempfile.NamedTemporaryFile(
suffix='.gwt', dir=pysal_examples.get_path(''))
fname = f.name
f.close()
o = psopen(fname, 'w')
#copy the shapefile and ID variable names from the old gwt.
# this is only available after the read() method has been called.
#o.shpName = self.obj.shpName
#o.varName = self.obj.varName
o.write(w)
o.close()
wnew = psopen(fname, 'r').read()
self.assertEqual(wnew.pct_nonzero, w.pct_nonzero)
os.remove(fname)
if __name__ == '__main__':
unittest.main()
|
richardcornish/django-itunespodcast
|
podcast/tests/__init__.py
|
from __future__ import unicode_literals
import os
import datetime
from django.test import TestCase, Client, override_settings
from django.utils import timezone
from ..models import Show, Episode, Enclosure
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
@override_settings(PODCAST_SINGULAR=False)
class PodcastTestCase(TestCase):
fixtures = [
'podcast_category.json',
]
def setUp(self):
super(PodcastTestCase, self).setUp()
self.client = Client()
# show
show = Show.objects.create(
title='All About Everything',
slug='everything',
description='All About Everything is a show about everything. Each week we dive into any subject known to man and talk about it as much as we can. Look for our podcast in the Podcasts app or in the iTunes Store',
managing_editor='john.doe@example.com',
webmaster='',
ttl=60,
subtitle='A show about everything',
summary='',
author_name='John Doe',
author_email='',
owner_name='John Doe',
owner_email='john.doe@example.com',
copyright='John Doe & Family',
image='podcast/tests/static/everything/AllAboutEverything.jpg',
explicit=False,
block=False,
complete=False,
)
show.categories.add(1, 4, 62, 63, 67)
# episode 1
episode_1 = Episode.objects.create(
show=show,
title='Shake Shake Shake Your Spices',
slug='shake-shake-shake-your-spices',
description='This week we talk about <a href="https://itunes/apple.com/us/book/antique-trader-salt-pepper/id429691295?mt=11">salt and pepper shakers</a>, comparing and contrasting pour rates, construction materials, and overall aesthetics. Come and join the party!',
pub_date=timezone.make_aware(datetime.datetime.strptime('2016-03-08T12:00:00', '%Y-%m-%dT%H:%M:%S')),
summary='A short primer on table spices',
image='podcast/tests/static/everything/AllAboutEverything/Episode1.jpg',
explicit=False,
block=False,
)
# episode 2
episode_2 = Episode.objects.create(
show=show,
title='Socket Wrench Shootout',
slug='socket-wrench-shootout',
description='This week we talk about metric vs. Old English socket wrenches. Which one is better? Do you really need both? Get all of your answers here.',
pub_date=timezone.make_aware(datetime.datetime.strptime('2016-03-09T18:00:00', '%Y-%m-%dT%H:%M:%S')),
summary='Comparing socket wrenches is fun!',
author_name='Jane Doe',
image='podcast/tests/static/everything/AllAboutEverything/Episode2.jpg',
explicit=False,
block=False,
)
# episode 3
episode_3 = Episode.objects.create(
show=show,
title='The Best Chili',
slug='best-chili',
description='This week we talk about the best Chili in the world. Which chili is better?',
pub_date=timezone.make_aware(datetime.datetime.strptime('2016-03-10T09:00:00', '%Y-%m-%dT%H:%M:%S')),
summary='Jane and Eric',
author_name='Jane Doe',
image='podcast/tests/static/everything/AllAboutEverything/Episode3.jpg',
explicit=False,
block=False,
)
# episode 4
episode_4 = Episode.objects.create(
show=show,
title='Red,Whine, & Blue',
slug='red-whine-blue',
description='This week we talk about surviving in a Red state if you are a Blue person. Or vice versa.',
pub_date=timezone.make_aware(datetime.datetime.strptime('2016-03-10T22:15:00', '%Y-%m-%dT%H:%M:%S')),
summary='Red + Blue != Purple',
author_name='Various',
image='podcast/tests/static/everything/AllAboutEverything/Episode4.jpg',
explicit=False,
block=False,
)
# enclosure 1
Enclosure.objects.create(
episode=episode_1,
file='podcast/tests/static/everything/AllAboutEverythingEpisode3.m4a',
type='audio/x-m4a',
cc=False,
)
# enclosure 2
Enclosure.objects.create(
episode=episode_2,
file='podcast/tests/static/everything/AllAboutEverythingEpisode2.mp4',
type='video/mp4',
cc=False,
)
# enclosure 3
Enclosure.objects.create(
episode=episode_3,
file='podcast/tests/static/everything/AllAboutEverythingEpisode2.m4v',
type='video/x-m4v',
cc=True,
)
# enclosure 4
Enclosure.objects.create(
episode=episode_4,
file='podcast/tests/static/everything/AllAboutEverythingEpisode4.mp3',
type='audio/mpeg',
cc=False,
)
def test_show_feed(self):
response = self.client.get(reverse('podcast:show_feed'))
with open(os.path.join(os.path.dirname(__file__), 'feed.xml'), 'r') as file_1:
xml_1 = file_1.read()
xml_2 = response.content.decode('utf-8').replace('http://testserverpodcast', 'http://testserver/podcast')
self.maxDiff = None
self.assertXMLEqual(xml_1, xml_2)
|
hellhovnd/dentexchange
|
dentexchange/apps/libs/mixins/views.py
|
# -*- coding:utf-8 -*-
import urlparse
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.http.response import HttpResponseBadRequest
from .base import Mixin
from .. import strings
class SuccessURLAliasViewMixin(Mixin):
def get_success_url(self):
return reverse(self.success_url_alias)
class HttpRefererViewMixin(Mixin):
def get(self, request, referers=None, *args, **kwargs):
from_referer = urlparse.urlsplit(
request.META.get('HTTP_REFERER', '')).path
if referers is not None \
and all(map(lambda r: unicode(r) != from_referer, referers)):
return HttpResponseBadRequest(
strings.HTTP_REFERER_VIEW_MIXIN_FORM_VIEW_BAD_REQUEST \
% from_referer)
return self.base_impl(
HttpRefererViewMixin, self).get(request, args, kwargs)
class KwargsUserFormViewMixin(Mixin):
def get_form_kwargs(self):
kwargs = self.base_impl(KwargsUserFormViewMixin, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
|
Joergen/olympia
|
sites/identitystage/settings_base.py
|
"""private_base will be populated from puppet and placed in this directory"""
import logging
import os
import dj_database_url
from lib.settings_base import (
CACHE_PREFIX, ES_INDEXES, KNOWN_PROXIES, LOGGING, CSP_SCRIPT_SRC,
CSP_FRAME_SRC)
from .. import splitstrip
import private_base as private
ENGAGE_ROBOTS = False
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = private.EMAIL_HOST
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = False
SESSION_COOKIE_SECURE = True
REDIRECT_SECRET_KEY = private.REDIRECT_SECRET_KEY
ADMINS = ()
DATABASES = {}
DATABASES['default'] = dj_database_url.parse(private.DATABASES_DEFAULT_URL)
DATABASES['default']['ENGINE'] = 'mysql_pool'
DATABASES['default']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DATABASES['slave'] = dj_database_url.parse(private.DATABASES_SLAVE_URL)
DATABASES['slave']['ENGINE'] = 'mysql_pool'
DATABASES['slave']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
SERVICES_DATABASE = dj_database_url.parse(private.SERVICES_DATABASE_URL)
DATABASE_POOL_ARGS = {
'max_overflow': 10,
'pool_size': 5,
'recycle': 30
}
SLAVE_DATABASES = ['slave']
CACHES = {
'default': {
'BACKEND': 'caching.backends.memcached.CacheClass',
#'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
#'BACKEND': 'memcachepool.cache.UMemcacheCache',
'LOCATION': splitstrip(private.CACHES_DEFAULT_LOCATION),
'TIMEOUT': 500,
'KEY_PREFIX': CACHE_PREFIX,
},
}
SECRET_KEY = private.SECRET_KEY
LOG_LEVEL = logging.DEBUG
# Celery
BROKER_URL = private.BROKER_URL
CELERY_IGNORE_RESULT = True
CELERY_DISABLE_RATE_LIMITS = True
CELERYD_PREFETCH_MULTIPLIER = 1
NETAPP_STORAGE = private.NETAPP_STORAGE_ROOT + '/shared_storage'
MIRROR_STAGE_PATH = private.NETAPP_STORAGE_ROOT + '/public-staging'
GUARDED_ADDONS_PATH = private.NETAPP_STORAGE_ROOT + '/guarded-addons'
UPLOADS_PATH = NETAPP_STORAGE + '/uploads'
USERPICS_PATH = UPLOADS_PATH + '/userpics'
ADDON_ICONS_PATH = UPLOADS_PATH + '/addon_icons'
COLLECTION_ICONS_PATH = UPLOADS_PATH + '/collection_icons'
IMAGEASSETS_PATH = UPLOADS_PATH + '/imageassets'
REVIEWER_ATTACHMENTS_PATH = UPLOADS_PATH + '/reviewer_attachment'
PREVIEWS_PATH = UPLOADS_PATH + '/previews'
SIGNED_APPS_PATH = NETAPP_STORAGE + '/signed_apps'
SIGNED_APPS_REVIEWER_PATH = NETAPP_STORAGE + '/signed_apps_reviewer'
PREVIEW_THUMBNAIL_PATH = PREVIEWS_PATH + '/thumbs/%s/%d.png'
PREVIEW_FULL_PATH = PREVIEWS_PATH + '/full/%s/%d.%s'
HERA = []
LOGGING['loggers'].update({
'z.task': {'level': logging.DEBUG},
'z.hera': {'level': logging.INFO},
'z.redis': {'level': logging.DEBUG},
'z.pool': {'level': logging.ERROR},
})
REDIS_BACKEND = private.REDIS_BACKENDS_CACHE
REDIS_BACKENDS = {
'cache': private.REDIS_BACKENDS_CACHE,
'cache_slave': private.REDIS_BACKENDS_CACHE_SLAVE,
'master': private.REDIS_BACKENDS_MASTER,
'slave': private.REDIS_BACKENDS_SLAVE,
}
CACHE_MACHINE_USE_REDIS = True
RECAPTCHA_PUBLIC_KEY = private.RECAPTCHA_PUBLIC_KEY
RECAPTCHA_PRIVATE_KEY = private.RECAPTCHA_PRIVATE_KEY
RECAPTCHA_URL = (
'https://www.google.com/recaptcha/api/challenge?k=%s' %
RECAPTCHA_PUBLIC_KEY)
TMP_PATH = os.path.join(NETAPP_STORAGE, 'tmp')
PACKAGER_PATH = os.path.join(TMP_PATH, 'packager')
ADDONS_PATH = private.NETAPP_STORAGE_ROOT + '/files'
PERF_THRESHOLD = 20
SPIDERMONKEY = '/usr/bin/tracemonkey'
# Remove DetectMobileMiddleware from middleware in production.
detect = 'mobility.middleware.DetectMobileMiddleware'
csp = 'csp.middleware.CSPMiddleware'
RESPONSYS_ID = private.RESPONSYS_ID
CRONJOB_LOCK_PREFIX = 'marketplace-identity-stage'
BUILDER_SECRET_KEY = private.BUILDER_SECRET_KEY
BUILDER_VERSIONS_URL = (
"https://builder-addons.allizom.org/repackage/sdk-versions/")
ES_HOSTS = splitstrip(private.ES_HOSTS)
ES_URLS = ['http://%s' % h for h in ES_HOSTS]
ES_INDEXES = dict((k, '%s_identity_stage' % v) for k, v in ES_INDEXES.items())
BUILDER_UPGRADE_URL = "https://builder-addons.allizom.org/repackage/rebuild/"
STATSD_HOST = private.STATSD_HOST
STATSD_PORT = private.STATSD_PORT
STATSD_PREFIX = private.STATSD_PREFIX
GRAPHITE_HOST = private.GRAPHITE_HOST
GRAPHITE_PORT = private.GRAPHITE_PORT
GRAPHITE_PREFIX = private.GRAPHITE_PREFIX
CEF_PRODUCT = STATSD_PREFIX
ES_TIMEOUT = 60
EXPOSE_VALIDATOR_TRACEBACKS = True
KNOWN_PROXIES += ['10.2.83.105',
'10.2.83.106',
'10.2.83.107',
'10.8.83.200',
'10.8.83.201',
'10.8.83.202',
'10.8.83.203',
'10.8.83.204',
'10.8.83.210',
'10.8.83.211',
'10.8.83.212',
'10.8.83.213',
'10.8.83.214',
'10.8.83.215',
'10.8.83.251',
'10.8.83.252',
'10.8.83.253',
]
NEW_FEATURES = True
PERF_TEST_URL = (
'http://talos-addon-master1.amotest.scl1.mozilla.com/trigger/trigger.cgi')
REDIRECT_URL = 'https://outgoing.allizom.org/v1/'
CLEANCSS_BIN = 'cleancss'
UGLIFY_BIN = 'uglifyjs'
CELERYD_TASK_SOFT_TIME_LIMIT = 240
LESS_PREPROCESS = True
XSENDFILE_HEADER = 'X-Accel-Redirect'
ALLOW_SELF_REVIEWS = True
GEOIP_URL = 'http://geo.marketplace.allizom.org'
API_THROTTLE = False
CSP_SCRIPT_SRC = CSP_SCRIPT_SRC + ("https://firefoxos.anosrep.org",)
CSP_FRAME_SRC = CSP_FRAME_SRC + ("https://firefoxos.anosrep.org",)
AES_KEYS = private.AES_KEYS
|
youtube/cobalt
|
third_party/llvm-project/lldb/packages/Python/lldbsuite/test/tools/lldb-mi/variable/TestMiVar.py
|
"""
Test lldb-mi -var-xxx commands.
"""
from __future__ import print_function
import lldbmi_testcase
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class MiVarTestCase(lldbmi_testcase.MiTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
@skipIfWindows # llvm.org/pr24452: Get lldb-mi tests working on Windows
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
@skipIfRemote # We do not currently support remote debugging via the MI.
@skipIfDarwin
def test_lldbmi_eval(self):
"""Test that 'lldb-mi --interpreter' works for evaluating."""
self.spawnLldbMi(args=None)
# Load executable
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Run to program return
line = line_number('main.cpp', '// BP_return')
self.runCmd("-break-insert main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"1\"")
self.runCmd("-exec-run")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Print non-existant variable
self.runCmd("-var-create var1 * undef")
self.expect(
"\^error,msg=\"error: use of undeclared identifier \'undef\'\\\\n\"")
self.runCmd("-data-evaluate-expression undef")
self.expect(
"\^error,msg=\"error: use of undeclared identifier \'undef\'\\\\n\"")
# Print global "g_MyVar", modify, delete and create again
self.runCmd("-data-evaluate-expression g_MyVar")
self.expect("\^done,value=\"3\"")
self.runCmd("-var-create var2 * g_MyVar")
self.expect(
"\^done,name=\"var2\",numchild=\"0\",value=\"3\",type=\"int\",thread-id=\"1\",has_more=\"0\"")
self.runCmd("-var-evaluate-expression var2")
self.expect("\^done,value=\"3\"")
self.runCmd("-var-show-attributes var2")
self.expect("\^done,status=\"editable\"")
self.runCmd("-var-list-children var2")
self.expect("\^done,numchild=\"0\",has_more=\"0\"")
# Ensure -var-list-children also works with quotes
self.runCmd("-var-list-children \"var2\"")
self.expect("\^done,numchild=\"0\",has_more=\"0\"")
self.runCmd("-data-evaluate-expression \"g_MyVar=30\"")
self.expect("\^done,value=\"30\"")
self.runCmd("-var-update --all-values var2")
# self.expect("\^done,changelist=\[\{name=\"var2\",value=\"30\",in_scope=\"true\",type_changed=\"false\",has_more=\"0\"\}\]")
# FIXME -var-update doesn't work
self.runCmd("-var-delete var2")
self.expect("\^done")
self.runCmd("-var-create var2 * g_MyVar")
self.expect(
"\^done,name=\"var2\",numchild=\"0\",value=\"30\",type=\"int\",thread-id=\"1\",has_more=\"0\"")
# Print static "s_MyVar", modify, delete and create again
self.runCmd("-data-evaluate-expression s_MyVar")
self.expect("\^done,value=\"30\"")
self.runCmd("-var-create var3 * s_MyVar")
self.expect(
"\^done,name=\"var3\",numchild=\"0\",value=\"30\",type=\"int\",thread-id=\"1\",has_more=\"0\"")
self.runCmd("-var-evaluate-expression var3")
self.expect("\^done,value=\"30\"")
self.runCmd("-var-show-attributes var3")
self.expect("\^done,status=\"editable\"")
self.runCmd("-var-list-children var3")
self.expect("\^done,numchild=\"0\",has_more=\"0\"")
self.runCmd("-data-evaluate-expression \"s_MyVar=3\"")
self.expect("\^done,value=\"3\"")
self.runCmd("-var-update --all-values var3")
# self.expect("\^done,changelist=\[\{name=\"var3\",value=\"3\",in_scope=\"true\",type_changed=\"false\",has_more=\"0\"\}\]")
# FIXME -var-update doesn't work
self.runCmd("-var-delete var3")
self.expect("\^done")
self.runCmd("-var-create var3 * s_MyVar")
self.expect(
"\^done,name=\"var3\",numchild=\"0\",value=\"3\",type=\"int\",thread-id=\"1\",has_more=\"0\"")
# Print local "b", modify, delete and create again
self.runCmd("-data-evaluate-expression b")
self.expect("\^done,value=\"20\"")
self.runCmd("-var-create var4 * b")
self.expect(
"\^done,name=\"var4\",numchild=\"0\",value=\"20\",type=\"int\",thread-id=\"1\",has_more=\"0\"")
self.runCmd("-var-evaluate-expression var4")
self.expect("\^done,value=\"20\"")
self.runCmd("-var-show-attributes var4")
self.expect("\^done,status=\"editable\"")
self.runCmd("-var-list-children var4")
self.expect("\^done,numchild=\"0\",has_more=\"0\"")
self.runCmd("-data-evaluate-expression \"b=2\"")
self.expect("\^done,value=\"2\"")
self.runCmd("-var-update --all-values var4")
# self.expect("\^done,changelist=\[\{name=\"var4\",value=\"2\",in_scope=\"true\",type_changed=\"false\",has_more=\"0\"\}\]")
# FIXME -var-update doesn't work
self.runCmd("-var-delete var4")
self.expect("\^done")
self.runCmd("-var-create var4 * b")
self.expect(
"\^done,name=\"var4\",numchild=\"0\",value=\"2\",type=\"int\",thread-id=\"1\",has_more=\"0\"")
# Print temp "a + b"
self.runCmd("-data-evaluate-expression \"a + b\"")
self.expect("\^done,value=\"12\"")
self.runCmd("-var-create var5 * \"a + b\"")
self.expect(
"\^done,name=\"var5\",numchild=\"0\",value=\"12\",type=\"int\",thread-id=\"1\",has_more=\"0\"")
self.runCmd("-var-evaluate-expression var5")
self.expect("\^done,value=\"12\"")
self.runCmd("-var-show-attributes var5")
self.expect("\^done,status=\"editable\"") # FIXME editable or not?
self.runCmd("-var-list-children var5")
self.expect("\^done,numchild=\"0\",has_more=\"0\"")
# Print argument "argv[0]"
self.runCmd("-data-evaluate-expression \"argv[0]\"")
self.expect(
"\^done,value=\"0x[0-9a-f]+ \\\\\\\".*?%s\\\\\\\"\"" %
self.myexe)
self.runCmd("-var-create var6 * \"argv[0]\"")
self.expect(
"\^done,name=\"var6\",numchild=\"1\",value=\"0x[0-9a-f]+ \\\\\\\".*?%s\\\\\\\"\",type=\"const char \*\",thread-id=\"1\",has_more=\"0\"" %
self.myexe)
self.runCmd("-var-evaluate-expression var6")
self.expect(
"\^done,value=\"0x[0-9a-f]+ \\\\\\\".*?%s\\\\\\\"\"" %
self.myexe)
self.runCmd("-var-show-attributes var6")
self.expect("\^done,status=\"editable\"")
self.runCmd("-var-list-children --all-values var6")
# FIXME: The name below is not correct. It should be "var.*argv[0]".
# FIXME -var-list-children shows invalid thread-id
self.expect(
"\^done,numchild=\"1\",children=\[child=\{name=\"var6\.\*\$[0-9]+\",exp=\"\*\$[0-9]+\",numchild=\"0\",type=\"const char\",thread-id=\"4294967295\",value=\"47 '/'\",has_more=\"0\"\}\],has_more=\"0\"")
# Print an expression with spaces and optional arguments
self.runCmd("-data-evaluate-expression \"a + b\"")
self.expect("\^done,value=\"12\"")
self.runCmd("-var-create var7 * \"a + b\" --thread 1 --frame 0")
self.expect(
"\^done,name=\"var7\",numchild=\"0\",value=\"12\",type=\"int\",thread-id=\"1\",has_more=\"0\"")
@skipIfWindows # llvm.org/pr24452: Get lldb-mi tests working on Windows
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
@skipIfLinux # llvm.org/pr22841: lldb-mi tests fail on all Linux buildbots
@skipIfDarwin
@skipIfRemote # We do not currently support remote debugging via the MI.
def test_lldbmi_var_update(self):
"""Test that 'lldb-mi --interpreter' works for -var-update."""
self.spawnLldbMi(args=None)
# Load executable
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Run to BP_var_update_test_init
line = line_number('main.cpp', '// BP_var_update_test_init')
self.runCmd("-break-insert main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"1\"")
self.runCmd("-exec-run")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Setup variables
self.runCmd("-var-create var_l * l")
self.expect(
"\^done,name=\"var_l\",numchild=\"0\",value=\"1\",type=\"long\",thread-id=\"1\",has_more=\"0\"")
self.runCmd("-var-create var_complx * complx")
self.expect(
"\^done,name=\"var_complx\",numchild=\"3\",value=\"\{\.\.\.\}\",type=\"complex_type\",thread-id=\"1\",has_more=\"0\"")
self.runCmd("-var-create var_complx_array * complx_array")
self.expect(
"\^done,name=\"var_complx_array\",numchild=\"2\",value=\"\[2\]\",type=\"complex_type \[2\]\",thread-id=\"1\",has_more=\"0\"")
# Go to BP_var_update_test_l
line = line_number('main.cpp', '// BP_var_update_test_l')
self.runCmd("-break-insert main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"2\"")
self.runCmd("-exec-continue")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Test that var_l was updated
self.runCmd("-var-update --all-values var_l")
self.expect(
"\^done,changelist=\[\{name=\"var_l\",value=\"0\",in_scope=\"true\",type_changed=\"false\",has_more=\"0\"\}\]")
# Go to BP_var_update_test_complx
line = line_number('main.cpp', '// BP_var_update_test_complx')
self.runCmd("-break-insert main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"3\"")
self.runCmd("-exec-continue")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Test that var_complx was updated
self.runCmd("-var-update --all-values var_complx")
self.expect(
"\^done,changelist=\[\{name=\"var_complx\",value=\"\{\.\.\.\}\",in_scope=\"true\",type_changed=\"false\",has_more=\"0\"\}\]")
# Go to BP_var_update_test_complx_array
line = line_number('main.cpp', '// BP_var_update_test_complx_array')
self.runCmd("-break-insert main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"4\"")
self.runCmd("-exec-continue")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Test that var_complex_array was updated
self.runCmd("-var-update --all-values var_complx_array")
self.expect(
"\^done,changelist=\[\{name=\"var_complx_array\",value=\"\[2\]\",in_scope=\"true\",type_changed=\"false\",has_more=\"0\"\}\]")
@skipIfWindows # llvm.org/pr24452: Get lldb-mi tests working on Windows
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
@skipIfRemote # We do not currently support remote debugging via the MI.
@skipIfDarwin
def test_lldbmi_var_create_register(self):
"""Test that 'lldb-mi --interpreter' works for -var-create $regname."""
self.spawnLldbMi(args=None)
# Load executable
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Run to main
self.runCmd("-break-insert -f main")
self.expect("\^done,bkpt={number=\"1\"")
self.runCmd("-exec-run")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Find name of register 0
self.runCmd("-data-list-register-names 0")
self.expect("\^done,register-names=\[\".+?\"\]")
register_name = self.child.after.split("\"")[1]
# Create variable for register 0
# Note that message is different in Darwin and Linux:
# Darwin: "^done,name=\"var_reg\",numchild=\"0\",value=\"0x[0-9a-f]+\",type=\"unsigned long\",thread-id=\"1\",has_more=\"0\"
# Linux:
# "^done,name=\"var_reg\",numchild=\"0\",value=\"0x[0-9a-f]+\",type=\"unsigned
# int\",thread-id=\"1\",has_more=\"0\"
self.runCmd("-var-create var_reg * $%s" % register_name)
self.expect(
"\^done,name=\"var_reg\",numchild=\"0\",value=\"0x[0-9a-f]+\",type=\"unsigned (long|int)\",thread-id=\"1\",has_more=\"0\"")
# Assign value to variable
self.runCmd("-var-assign var_reg \"6\"")
# FIXME: the output has different format for 32bit and 64bit values
self.expect("\^done,value=\"0x0*?6\"")
# Assert register 0 updated
self.runCmd("-data-list-register-values d 0")
self.expect("\^done,register-values=\[{number=\"0\",value=\"6\"")
@skipIfWindows # llvm.org/pr24452: Get lldb-mi tests working on Windows
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
@skipIfLinux # llvm.org/pr22841: lldb-mi tests fail on all Linux buildbots
@skipIfRemote # We do not currently support remote debugging via the MI.
@skipIfDarwin
def test_lldbmi_var_list_children(self):
"""Test that 'lldb-mi --interpreter' works for -var-list-children."""
self.spawnLldbMi(args=None)
# Load executable
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Run to BP_var_list_children_test
line = line_number('main.cpp', '// BP_var_list_children_test')
self.runCmd("-break-insert main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"1\"")
self.runCmd("-exec-run")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Create variable
self.runCmd("-var-create var_complx * complx")
self.expect(
"\^done,name=\"var_complx\",numchild=\"3\",value=\"\{\.\.\.\}\",type=\"complex_type\",thread-id=\"1\",has_more=\"0\"")
self.runCmd("-var-create var_complx_array * complx_array")
self.expect(
"\^done,name=\"var_complx_array\",numchild=\"2\",value=\"\[2\]\",type=\"complex_type \[2\]\",thread-id=\"1\",has_more=\"0\"")
self.runCmd("-var-create var_pcomplx * pcomplx")
self.expect(
"\^done,name=\"var_pcomplx\",numchild=\"2\",value=\"\{\.\.\.\}\",type=\"pcomplex_type\",thread-id=\"1\",has_more=\"0\"")
# Test that -var-evaluate-expression can evaluate the children of
# created varobj
self.runCmd("-var-list-children var_complx")
self.runCmd("-var-evaluate-expression var_complx.i")
self.expect("\^done,value=\"3\"")
self.runCmd("-var-list-children var_complx_array")
self.runCmd("-var-evaluate-expression var_complx_array.[0]")
self.expect("\^done,value=\"\{...\}\"")
self.runCmd("-var-list-children var_pcomplx")
self.runCmd("-var-evaluate-expression var_pcomplx.complex_type")
self.expect("\^done,value=\"\{...\}\"")
# Test that -var-list-children lists empty children if range is empty
# (and that print-values is optional)
self.runCmd("-var-list-children var_complx 0 0")
self.expect("\^done,numchild=\"0\",has_more=\"1\"")
self.runCmd("-var-list-children var_complx 99 0")
self.expect("\^done,numchild=\"0\",has_more=\"1\"")
self.runCmd("-var-list-children var_complx 99 3")
self.expect("\^done,numchild=\"0\",has_more=\"0\"")
# Test that -var-list-children lists all children with their values
# (and that from and to are optional)
self.runCmd("-var-list-children --all-values var_complx")
self.expect(
"\^done,numchild=\"3\",children=\[child=\{name=\"var_complx\.i\",exp=\"i\",numchild=\"0\",type=\"int\",thread-id=\"1\",value=\"3\",has_more=\"0\"\},child=\{name=\"var_complx\.inner\",exp=\"inner\",numchild=\"1\",type=\"complex_type::\(anonymous struct\)\",thread-id=\"1\",value=\"\{\.\.\.\}\",has_more=\"0\"\},child=\{name=\"var_complx\.complex_ptr\",exp=\"complex_ptr\",numchild=\"3\",type=\"complex_type \*\",thread-id=\"1\",value=\"0x[0-9a-f]+\",has_more=\"0\"\}\],has_more=\"0\"")
self.runCmd("-var-list-children --simple-values var_complx_array")
self.expect(
"\^done,numchild=\"2\",children=\[child=\{name=\"var_complx_array\.\[0\]\",exp=\"\[0\]\",numchild=\"3\",type=\"complex_type\",thread-id=\"1\",has_more=\"0\"\},child=\{name=\"var_complx_array\.\[1\]\",exp=\"\[1\]\",numchild=\"3\",type=\"complex_type\",thread-id=\"1\",has_more=\"0\"\}\],has_more=\"0\"")
self.runCmd("-var-list-children 0 var_pcomplx")
self.expect(
"\^done,numchild=\"2\",children=\[child=\{name=\"var_pcomplx\.complex_type\",exp=\"complex_type\",numchild=\"3\",type=\"complex_type\",thread-id=\"1\",has_more=\"0\"\},child={name=\"var_pcomplx\.complx\",exp=\"complx\",numchild=\"3\",type=\"complex_type\",thread-id=\"1\",has_more=\"0\"\}\],has_more=\"0\"")
# Test that -var-list-children lists children without values
self.runCmd("-var-list-children 0 var_complx 0 1")
self.expect(
"\^done,numchild=\"1\",children=\[child=\{name=\"var_complx\.i\",exp=\"i\",numchild=\"0\",type=\"int\",thread-id=\"1\",has_more=\"0\"\}\],has_more=\"1\"")
self.runCmd("-var-list-children --no-values var_complx 0 1")
self.expect(
"\^done,numchild=\"1\",children=\[child=\{name=\"var_complx\.i\",exp=\"i\",numchild=\"0\",type=\"int\",thread-id=\"1\",has_more=\"0\"\}\],has_more=\"1\"")
self.runCmd("-var-list-children --no-values var_complx_array 0 1")
self.expect(
"\^done,numchild=\"1\",children=\[child=\{name=\"var_complx_array\.\[0\]\",exp=\"\[0\]\",numchild=\"3\",type=\"complex_type\",thread-id=\"1\",has_more=\"0\"\}\],has_more=\"1\"")
self.runCmd("-var-list-children --no-values var_pcomplx 0 1")
self.expect(
"\^done,numchild=\"1\",children=\[child=\{name=\"var_pcomplx\.complex_type\",exp=\"complex_type\",numchild=\"3\",type=\"complex_type\",thread-id=\"1\",has_more=\"0\"\}\],has_more=\"1\"")
# Test that -var-list-children lists children with all values
self.runCmd("-var-list-children 1 var_complx 1 2")
self.expect(
"\^done,numchild=\"1\",children=\[child=\{name=\"var_complx\.inner\",exp=\"inner\",numchild=\"1\",type=\"complex_type::\(anonymous struct\)\",thread-id=\"1\",value=\"\{\.\.\.\}\",has_more=\"0\"\}\],has_more=\"1\"")
self.runCmd("-var-list-children --all-values var_complx 1 2")
self.expect(
"\^done,numchild=\"1\",children=\[child=\{name=\"var_complx\.inner\",exp=\"inner\",numchild=\"1\",type=\"complex_type::\(anonymous struct\)\",thread-id=\"1\",value=\"\{\.\.\.\}\",has_more=\"0\"\}\],has_more=\"1\"")
self.runCmd("-var-list-children --all-values var_complx_array 1 2")
self.expect(
"\^done,numchild=\"1\",children=\[child=\{name=\"var_complx_array\.\[1\]\",exp=\"\[1\]\",numchild=\"3\",type=\"complex_type\",thread-id=\"1\",value=\"\{\.\.\.\}\",has_more=\"0\"\}\],has_more=\"0\"")
self.runCmd("-var-list-children --all-values var_pcomplx 1 2")
self.expect(
"\^done,numchild=\"1\",children=\[child={name=\"var_pcomplx\.complx\",exp=\"complx\",numchild=\"3\",type=\"complex_type\",thread-id=\"1\",value=\"\{\.\.\.\}\",has_more=\"0\"\}\],has_more=\"0\"")
# Test that -var-list-children lists children with simple values
self.runCmd("-var-list-children 2 var_complx 2 4")
self.expect(
"\^done,numchild=\"1\",children=\[child=\{name=\"var_complx\.complex_ptr\",exp=\"complex_ptr\",numchild=\"3\",type=\"complex_type \*\",thread-id=\"1\",has_more=\"0\"\}\],has_more=\"0\"")
self.runCmd("-var-list-children --simple-values var_complx 2 4")
self.expect(
"\^done,numchild=\"1\",children=\[child=\{name=\"var_complx\.complex_ptr\",exp=\"complex_ptr\",numchild=\"3\",type=\"complex_type \*\",thread-id=\"1\",has_more=\"0\"\}\],has_more=\"0\"")
self.runCmd("-var-list-children --simple-values var_complx_array 2 4")
self.expect("\^done,numchild=\"0\",has_more=\"0\"")
self.runCmd("-var-list-children --simple-values var_pcomplx 2 4")
self.expect("\^done,numchild=\"0\",has_more=\"0\"")
# Test that an invalid from is handled
# FIXME: -1 is treated as unsigned int
self.runCmd("-var-list-children 0 var_complx -1 0")
#self.expect("\^error,msg=\"Command 'var-list-children'\. Variable children range invalid\"")
# Test that an invalid to is handled
# FIXME: -1 is treated as unsigned int
self.runCmd("-var-list-children 0 var_complx 0 -1")
#self.expect("\^error,msg=\"Command 'var-list-children'\. Variable children range invalid\"")
# Test that a missing low-frame or high-frame is handled
self.runCmd("-var-list-children 0 var_complx 0")
self.expect(
"\^error,msg=\"Command 'var-list-children'. Variable children range invalid\"")
@skipIfWindows # llvm.org/pr24452: Get lldb-mi working on Windows
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
@skipIfLinux # llvm.org/pr22841: lldb-mi tests fail on all Linux buildbots
@skipIfRemote # We do not currently support remote debugging via the MI.
@skipIfDarwin
def test_lldbmi_var_create_for_stl_types(self):
"""Test that 'lldb-mi --interpreter' print summary for STL types."""
self.spawnLldbMi(args=None)
# Load executable
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Run to BP_gdb_set_show_print_char_array_as_string_test
line = line_number('main.cpp', '// BP_cpp_stl_types_test')
self.runCmd("-break-insert main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"1\"")
self.runCmd("-exec-run")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Test for std::string
self.runCmd("-var-create - * std_string")
self.expect(
'\^done,name="var\d+",numchild="[0-9]+",value="\\\\"hello\\\\"",type="std::[\S]*?string",thread-id="1",has_more="0"')
@skipIfWindows # llvm.org/pr24452: Get lldb-mi working on Windows
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
@skipIfLinux # llvm.org/pr22841: lldb-mi tests fail on all Linux buildbots
@skipIfRemote # We do not currently support remote debugging via the MI.
@skipIfDarwin
def test_lldbmi_var_create_for_unnamed_objects(self):
"""Test that 'lldb-mi --interpreter' can expand unnamed structures and unions."""
self.spawnLldbMi(args=None)
# Load executable
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Run to breakpoint
line = line_number('main.cpp', '// BP_unnamed_objects_test')
self.runCmd("-break-insert main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"1\"")
self.runCmd("-exec-run")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Evaluate struct_with_unions type and its children
self.runCmd("-var-create v0 * swu")
self.expect(
'\^done,name="v0",numchild="2",value="\{\.\.\.\}",type="struct_with_unions",thread-id="1",has_more="0"')
self.runCmd("-var-list-children v0")
# inspect the first unnamed union
self.runCmd("-var-list-children v0.$0")
self.runCmd("-var-evaluate-expression v0.$0.u_i")
self.expect('\^done,value="1"')
# inspect the second unnamed union
self.runCmd("-var-list-children v0.$1")
self.runCmd("-var-evaluate-expression v0.$1.u1")
self.expect('\^done,value="-1"')
# inspect unnamed structure
self.runCmd("-var-list-children v0.$1.$1")
self.runCmd("-var-evaluate-expression v0.$1.$1.s1")
self.expect('\^done,value="-1"')
|
JensGrabner/mpmath
|
mpmath/calculus/polynomials.py
|
from ..libmp.backend import xrange
from .calculus import defun
#----------------------------------------------------------------------------#
# Polynomials #
#----------------------------------------------------------------------------#
# XXX: extra precision
@defun
def polyval(ctx, coeffs, x, derivative=False):
r"""
Given coefficients `[c_n, \ldots, c_2, c_1, c_0]` and a number `x`,
:func:`~mpmath.polyval` evaluates the polynomial
.. math ::
P(x) = c_n x^n + \ldots + c_2 x^2 + c_1 x + c_0.
If *derivative=True* is set, :func:`~mpmath.polyval` simultaneously
evaluates `P(x)` with the derivative, `P'(x)`, and returns the
tuple `(P(x), P'(x))`.
>>> from mpmath import *
>>> mp.pretty = True
>>> polyval([3, 0, 2], 0.5)
2.75
>>> polyval([3, 0, 2], 0.5, derivative=True)
(2.75, 3.0)
The coefficients and the evaluation point may be any combination
of real or complex numbers.
"""
if not coeffs:
return ctx.zero
p = ctx.convert(coeffs[0])
q = ctx.zero
for c in coeffs[1:]:
if derivative:
q = p + x*q
p = c + x*p
if derivative:
return p, q
else:
return p
@defun
def polyroots(ctx, coeffs, maxsteps=50, cleanup=True, extraprec=10,
error=False, roots_init=None):
"""
Computes all roots (real or complex) of a given polynomial.
The roots are returned as a sorted list, where real roots appear first
followed by complex conjugate roots as adjacent elements. The polynomial
should be given as a list of coefficients, in the format used by
:func:`~mpmath.polyval`. The leading coefficient must be nonzero.
With *error=True*, :func:`~mpmath.polyroots` returns a tuple *(roots, err)*
where *err* is an estimate of the maximum error among the computed roots.
**Examples**
Finding the three real roots of `x^3 - x^2 - 14x + 24`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nprint(polyroots([1,-1,-14,24]), 4)
[-4.0, 2.0, 3.0]
Finding the two complex conjugate roots of `4x^2 + 3x + 2`, with an
error estimate::
>>> roots, err = polyroots([4,3,2], error=True)
>>> for r in roots:
... print(r)
...
(-0.375 + 0.59947894041409j)
(-0.375 - 0.59947894041409j)
>>>
>>> err
2.22044604925031e-16
>>>
>>> polyval([4,3,2], roots[0])
(2.22044604925031e-16 + 0.0j)
>>> polyval([4,3,2], roots[1])
(2.22044604925031e-16 + 0.0j)
The following example computes all the 5th roots of unity; that is,
the roots of `x^5 - 1`::
>>> mp.dps = 20
>>> for r in polyroots([1, 0, 0, 0, 0, -1]):
... print(r)
...
1.0
(-0.8090169943749474241 + 0.58778525229247312917j)
(-0.8090169943749474241 - 0.58778525229247312917j)
(0.3090169943749474241 + 0.95105651629515357212j)
(0.3090169943749474241 - 0.95105651629515357212j)
**Precision and conditioning**
The roots are computed to the current working precision accuracy. If this
accuracy cannot be achieved in ``maxsteps`` steps, then a
``NoConvergence`` exception is raised. The algorithm internally is using
the current working precision extended by ``extraprec``. If
``NoConvergence`` was raised, that is caused either by not having enough
extra precision to achieve convergence (in which case increasing
``extraprec`` should fix the problem) or too low ``maxsteps`` (in which
case increasing ``maxsteps`` should fix the problem), or a combination of
both.
The user should always do a convergence study with regards to
``extraprec`` to ensure accurate results. It is possible to get
convergence to a wrong answer with too low ``extraprec``.
Provided there are no repeated roots, :func:`~mpmath.polyroots` can
typically compute all roots of an arbitrary polynomial to high precision::
>>> mp.dps = 60
>>> for r in polyroots([1, 0, -10, 0, 1]):
... print(r)
...
-3.14626436994197234232913506571557044551247712918732870123249
-0.317837245195782244725757617296174288373133378433432554879127
0.317837245195782244725757617296174288373133378433432554879127
3.14626436994197234232913506571557044551247712918732870123249
>>>
>>> sqrt(3) + sqrt(2)
3.14626436994197234232913506571557044551247712918732870123249
>>> sqrt(3) - sqrt(2)
0.317837245195782244725757617296174288373133378433432554879127
**Algorithm**
:func:`~mpmath.polyroots` implements the Durand-Kerner method [1], which
uses complex arithmetic to locate all roots simultaneously.
The Durand-Kerner method can be viewed as approximately performing
simultaneous Newton iteration for all the roots. In particular,
the convergence to simple roots is quadratic, just like Newton's
method.
Although all roots are internally calculated using complex arithmetic, any
root found to have an imaginary part smaller than the estimated numerical
error is truncated to a real number (small real parts are also chopped).
Real roots are placed first in the returned list, sorted by value. The
remaining complex roots are sorted by their real parts so that conjugate
roots end up next to each other.
**References**
1. http://en.wikipedia.org/wiki/Durand-Kerner_method
"""
if len(coeffs) <= 1:
if not coeffs or not coeffs[0]:
raise ValueError("Input to polyroots must not be the zero polynomial")
# Constant polynomial with no roots
return []
orig = ctx.prec
tol = +ctx.eps
with ctx.extraprec(extraprec):
deg = len(coeffs) - 1
# Must be monic
lead = ctx.convert(coeffs[0])
if lead == 1:
coeffs = [ctx.convert(c) for c in coeffs]
else:
coeffs = [c/lead for c in coeffs]
f = lambda x: ctx.polyval(coeffs, x)
if roots_init is None:
roots = [ctx.mpc((0.4+0.9j)**n) for n in xrange(deg)]
else:
roots = [None]*deg;
deg_init = min(deg, len(roots_init))
roots[:deg_init] = list(roots_init[:deg_init])
roots[deg_init:] = [ctx.mpc((0.4+0.9j)**n) for n
in xrange(deg_init,deg)]
err = [ctx.one for n in xrange(deg)]
# Durand-Kerner iteration until convergence
for step in xrange(maxsteps):
if abs(max(err)) < tol:
break
for i in xrange(deg):
p = roots[i]
x = f(p)
for j in range(deg):
if i != j:
try:
x /= (p-roots[j])
except ZeroDivisionError:
continue
roots[i] = p - x
err[i] = abs(x)
if abs(max(err)) >= tol:
raise ctx.NoConvergence("Didn't converge in maxsteps=%d steps." \
% maxsteps)
# Remove small real or imaginary parts
if cleanup:
for i in xrange(deg):
if abs(roots[i]) < tol:
roots[i] = ctx.zero
elif abs(ctx._im(roots[i])) < tol:
roots[i] = roots[i].real
elif abs(ctx._re(roots[i])) < tol:
roots[i] = roots[i].imag * 1j
roots.sort(key=lambda x: (abs(ctx._im(x)), ctx._re(x)))
if error:
err = max(err)
err = max(err, ctx.ldexp(1, -orig+1))
return [+r for r in roots], +err
else:
return [+r for r in roots]
|
larsmans/numpy
|
numpy/lib/tests/test_io.py
|
from __future__ import division, absolute_import, print_function
import sys
import gzip
import os
import threading
from tempfile import mkstemp, NamedTemporaryFile
import time
import warnings
import gc
from io import BytesIO
from datetime import datetime
import numpy as np
import numpy.ma as ma
from numpy.lib._iotools import (ConverterError, ConverterLockError,
ConversionWarning)
from numpy.compat import asbytes, asbytes_nested, bytes, asstr
from nose import SkipTest
from numpy.ma.testutils import (
TestCase, assert_equal, assert_array_equal,
assert_raises, assert_raises_regex, run_module_suite
)
from numpy.testing import assert_warns, assert_, build_err_msg
from numpy.testing.utils import tempdir
class TextIO(BytesIO):
"""Helper IO class.
Writes encode strings to bytes if needed, reads return bytes.
This makes it easier to emulate files opened in binary mode
without needing to explicitly convert strings to bytes in
setting up the test data.
"""
def __init__(self, s=""):
BytesIO.__init__(self, asbytes(s))
def write(self, s):
BytesIO.write(self, asbytes(s))
def writelines(self, lines):
BytesIO.writelines(self, [asbytes(s) for s in lines])
MAJVER, MINVER = sys.version_info[:2]
IS_64BIT = sys.maxsize > 2**32
def strptime(s, fmt=None):
"""This function is available in the datetime module only
from Python >= 2.5.
"""
if sys.version_info[0] >= 3:
return datetime(*time.strptime(s.decode('latin1'), fmt)[:3])
else:
return datetime(*time.strptime(s, fmt)[:3])
class RoundtripTest(object):
def roundtrip(self, save_func, *args, **kwargs):
"""
save_func : callable
Function used to save arrays to file.
file_on_disk : bool
If true, store the file on disk, instead of in a
string buffer.
save_kwds : dict
Parameters passed to `save_func`.
load_kwds : dict
Parameters passed to `numpy.load`.
args : tuple of arrays
Arrays stored to file.
"""
save_kwds = kwargs.get('save_kwds', {})
load_kwds = kwargs.get('load_kwds', {})
file_on_disk = kwargs.get('file_on_disk', False)
if file_on_disk:
target_file = NamedTemporaryFile(delete=False)
load_file = target_file.name
else:
target_file = BytesIO()
load_file = target_file
try:
arr = args
save_func(target_file, *arr, **save_kwds)
target_file.flush()
target_file.seek(0)
if sys.platform == 'win32' and not isinstance(target_file, BytesIO):
target_file.close()
arr_reloaded = np.load(load_file, **load_kwds)
self.arr = arr
self.arr_reloaded = arr_reloaded
finally:
if not isinstance(target_file, BytesIO):
target_file.close()
# holds an open file descriptor so it can't be deleted on win
if not isinstance(arr_reloaded, np.lib.npyio.NpzFile):
os.remove(target_file.name)
def check_roundtrips(self, a):
self.roundtrip(a)
self.roundtrip(a, file_on_disk=True)
self.roundtrip(np.asfortranarray(a))
self.roundtrip(np.asfortranarray(a), file_on_disk=True)
if a.shape[0] > 1:
# neither C nor Fortran contiguous for 2D arrays or more
self.roundtrip(np.asfortranarray(a)[1:])
self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True)
def test_array(self):
a = np.array([], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], int)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble)
self.check_roundtrips(a)
def test_array_object(self):
if sys.version_info[:2] >= (2, 7):
a = np.array([], object)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], object)
self.check_roundtrips(a)
# Fails with UnpicklingError: could not find MARK on Python 2.6
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
self.roundtrip(a)
@np.testing.dec.knownfailureif(sys.platform == 'win32', "Fail on Win32")
def test_mmap(self):
a = np.array([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
a = np.asfortranarray([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
self.check_roundtrips(a)
def test_format_2_0(self):
dt = [(("%d" % i) * 100, float) for i in range(500)]
a = np.ones(1000, dtype=dt)
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', '', UserWarning)
self.check_roundtrips(a)
class TestSaveLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
assert_equal(self.arr[0], self.arr_reloaded)
assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype)
assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc)
class TestSavezLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
try:
for n, arr in enumerate(self.arr):
reloaded = self.arr_reloaded['arr_%d' % n]
assert_equal(arr, reloaded)
assert_equal(arr.dtype, reloaded.dtype)
assert_equal(arr.flags.fnc, reloaded.flags.fnc)
finally:
# delete tempfile, must be done here on windows
if self.arr_reloaded.fid:
self.arr_reloaded.fid.close()
os.remove(self.arr_reloaded.fid.name)
@np.testing.dec.skipif(not IS_64BIT, "Works only with 64bit systems")
@np.testing.dec.slow
def test_big_arrays(self):
L = (1 << 31) + 100000
a = np.empty(L, dtype=np.uint8)
with tempdir(prefix="numpy_test_big_arrays_") as tmpdir:
tmp = os.path.join(tmpdir, "file.npz")
np.savez(tmp, a=a)
del a
npfile = np.load(tmp)
a = npfile['a']
npfile.close()
def test_multiple_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
self.roundtrip(a, b)
def test_named_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
def test_BagObj(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(sorted(dir(l.f)), ['file_a','file_b'])
assert_equal(a, l.f.file_a)
assert_equal(b, l.f.file_b)
def test_savez_filename_clashes(self):
# Test that issue #852 is fixed
# and savez functions in multithreaded environment
def writer(error_list):
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
arr = np.random.randn(500, 500)
try:
np.savez(tmp, arr=arr)
except OSError as err:
error_list.append(err)
finally:
os.remove(tmp)
errors = []
threads = [threading.Thread(target=writer, args=(errors,))
for j in range(3)]
for t in threads:
t.start()
for t in threads:
t.join()
if errors:
raise AssertionError(errors)
def test_not_closing_opened_fid(self):
# Test that issue #2178 is fixed:
# verify could seek on 'loaded' file
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
fp = open(tmp, 'wb')
np.savez(fp, data='LOVELY LOAD')
fp.close()
fp = open(tmp, 'rb', 10000)
fp.seek(0)
assert_(not fp.closed)
_ = np.load(fp)['data']
assert_(not fp.closed)
# must not get closed by .load(opened fp)
fp.seek(0)
assert_(not fp.closed)
finally:
fp.close()
os.remove(tmp)
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
# It might be a "weak" test since failed to get triggered on
# e.g. Debian sid of 2012 Jul 05 but was reported to
# trigger the failure on Ubuntu 10.04:
# http://projects.scipy.org/numpy/ticket/1517#comment:2
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
fp = open(tmp, 'wb')
np.savez(fp, data='LOVELY LOAD')
fp.close()
# We need to check if the garbage collector can properly close
# numpy npz file returned by np.load when their reference count
# goes to zero. Python 3 running in debug mode raises a
# ResourceWarning when file closing is left to the garbage
# collector, so we catch the warnings. Because ResourceWarning
# is unknown in Python < 3.x, we take the easy way out and
# catch all warnings.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for i in range(1, 1025):
try:
np.load(tmp)["data"]
except Exception as e:
msg = "Failed to load data from a file: %s" % e
raise AssertionError(msg)
finally:
os.remove(tmp)
def test_closing_zipfile_after_load(self):
# Check that zipfile owns file and can close it.
# This needs to pass a file name to load for the
# test.
with tempdir(prefix="numpy_test_closing_zipfile_after_load_") as tmpdir:
fd, tmp = mkstemp(suffix='.npz', dir=tmpdir)
os.close(fd)
np.savez(tmp, lab='place holder')
data = np.load(tmp)
fp = data.zip.fp
data.close()
assert_(fp.closed)
class TestSaveTxt(TestCase):
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
c = BytesIO()
np.savetxt(c, a, fmt=fmt)
c.seek(0)
assert_equal(c.readlines(),
[asbytes((fmt + ' ' + fmt + '\n') % (1, 2)),
asbytes((fmt + ' ' + fmt + '\n') % (3, 4))])
a = np.array([[1, 2], [3, 4]], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n'])
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_delimiter(self):
a = np.array([[1., 2.], [3., 4.]])
c = BytesIO()
np.savetxt(c, a, delimiter=',', fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1,2\n', b'3,4\n'])
def test_format(self):
a = np.array([(1, 2), (3, 4)])
c = BytesIO()
# Sequence of formats
np.savetxt(c, a, fmt=['%02d', '%3.1f'])
c.seek(0)
assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n'])
# A single multiformat string
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Specify delimiter, should be overiden
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Bad fmt, should raise a ValueError
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, a, fmt=99)
def test_header_footer(self):
"""
Test the functionality of the header and footer keyword argument.
"""
c = BytesIO()
a = np.array([(1, 2), (3, 4)], dtype=np.int)
test_header_footer = 'Test header / footer'
# Test the header keyword argument
np.savetxt(c, a, fmt='%1d', header=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('# ' + test_header_footer + '\n1 2\n3 4\n'))
# Test the footer keyword argument
c = BytesIO()
np.savetxt(c, a, fmt='%1d', footer=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n# ' + test_header_footer + '\n'))
# Test the commentstr keyword argument used on the header
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
header=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n'))
# Test the commentstr keyword argument used on the footer
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
footer=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n'))
def test_file_roundtrip(self):
f, name = mkstemp()
os.close(f)
try:
a = np.array([(1, 2), (3, 4)])
np.savetxt(name, a)
b = np.loadtxt(name)
assert_array_equal(a, b)
finally:
os.unlink(name)
def test_complex_arrays(self):
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re + 1.0j * im
# One format only
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e')
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n',
b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n'])
# One format for each real and imaginary part
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n',
b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n'])
# One format for each complex number
c = BytesIO()
np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])
def test_custom_writer(self):
class CustomWriter(list):
def write(self, text):
self.extend(text.split(b'\n'))
w = CustomWriter()
a = np.array([(1, 2), (3, 4)])
np.savetxt(w, a)
b = np.loadtxt(w)
assert_array_equal(a, b)
class TestLoadTxt(TestCase):
def test_record(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)])
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_array_equal(x, a)
d = TextIO()
d.write('M 64.0 75.0\nF 25.0 60.0')
d.seek(0)
mydescriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
b = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=mydescriptor)
y = np.loadtxt(d, dtype=mydescriptor)
assert_array_equal(y, b)
def test_array(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=np.int)
a = np.array([[1, 2], [3, 4]], int)
assert_array_equal(x, a)
c.seek(0)
x = np.loadtxt(c, dtype=float)
a = np.array([[1, 2], [3, 4]], float)
assert_array_equal(x, a)
def test_1D(self):
c = TextIO()
c.write('1\n2\n3\n4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
c = TextIO()
c.write('1,2,3,4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
def test_missing(self):
c = TextIO()
c.write('1,2,3,,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
a = np.array([1, 2, 3, -999, 5], int)
assert_array_equal(x, a)
def test_converters_with_usecols(self):
c = TextIO()
c.write('1,2,3,,5\n6,7,8,9,10\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
a = np.array([[2, -999], [7, 9]], int)
assert_array_equal(x, a)
def test_comments(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments='#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_skiprows(self):
c = TextIO()
c.write('comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_usecols(self):
a = np.array([[1, 2], [3, 4]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1,))
assert_array_equal(x, a[:, 1])
a = np.array([[1, 2, 3], [3, 4, 5]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1, 2))
assert_array_equal(x, a[:, 1:])
# Testing with arrays instead of tuples.
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
assert_array_equal(x, a[:, 1:])
# Checking with dtypes defined converters.
data = '''JOE 70.1 25.3
BOB 60.5 27.9
'''
c = TextIO(data)
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(arr['stid'], [b"JOE", b"BOB"])
assert_equal(arr['temp'], [25.3, 27.9])
def test_fancy_dtype(self):
c = TextIO()
c.write('1,2,3.0\n4,5,6.0\n')
c.seek(0)
dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
x = np.loadtxt(c, dtype=dt, delimiter=',')
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt)
assert_array_equal(x, a)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_3d_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0,
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])],
dtype=dt)
assert_array_equal(x, a)
def test_empty_file(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="loadtxt: Empty input file:")
c = TextIO()
x = np.loadtxt(c)
assert_equal(x.shape, (0,))
x = np.loadtxt(c, dtype=np.int64)
assert_equal(x.shape, (0,))
assert_(x.dtype == np.int64)
def test_unused_converter(self):
c = TextIO()
c.writelines(['1 21\n', '3 42\n'])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_array_equal(data, [21, 42])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_array_equal(data, [33, 66])
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
def test_uint64_type(self):
tgt = (9223372043271415339, 9223372043271415853)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.uint64)
assert_equal(res, tgt)
def test_int64_type(self):
tgt = (-9223372036854775807, 9223372036854775807)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.int64)
assert_equal(res, tgt)
def test_universal_newline(self):
f, name = mkstemp()
os.write(f, b'1 21\r3 42\r')
os.close(f)
try:
data = np.loadtxt(name)
assert_array_equal(data, [[1, 21], [3, 42]])
finally:
os.unlink(name)
def test_empty_field_after_tab(self):
c = TextIO()
c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t')
c.seek(0)
dt = {'names': ('x', 'y', 'z', 'comment'),
'formats': ('<i4', '<i4', '<f4', '|S8')}
x = np.loadtxt(c, dtype=dt, delimiter='\t')
a = np.array([b'start ', b' ', b''])
assert_array_equal(x['comment'], a)
def test_structure_unpack(self):
txt = TextIO("M 21 72\nF 35 58")
dt = {'names': ('a', 'b', 'c'), 'formats': ('|S1', '<i4', '<f4')}
a, b, c = np.loadtxt(txt, dtype=dt, unpack=True)
assert_(a.dtype.str == '|S1')
assert_(b.dtype.str == '<i4')
assert_(c.dtype.str == '<f4')
assert_array_equal(a, np.array([b'M', b'F']))
assert_array_equal(b, np.array([21, 35]))
assert_array_equal(c, np.array([72., 58.]))
def test_ndmin_keyword(self):
c = TextIO()
c.write('1,2,3\n4,5,6')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=3)
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=1.5)
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', ndmin=1)
a = np.array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(x, a)
d = TextIO()
d.write('0,1,2')
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (1, 3))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
e = TextIO()
e.write('0\n1\n2')
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (3, 1))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
# Test ndmin kw with empty file.
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="loadtxt: Empty input file:")
f = TextIO()
assert_(np.loadtxt(f, ndmin=2).shape == (0, 1,))
assert_(np.loadtxt(f, ndmin=1).shape == (0,))
def test_generator_source(self):
def count():
for i in range(10):
yield "%d" % i
res = np.loadtxt(count())
assert_array_equal(res, np.arange(10))
def test_bad_line(self):
c = TextIO()
c.write('1 2 3\n4 5 6\n2 3')
c.seek(0)
# Check for exception and that exception contains line number
assert_raises_regex(ValueError, "3", np.loadtxt, c)
class Testfromregex(TestCase):
# np.fromregex expects files opened in binary mode.
def test_record(self):
c = TextIO()
c.write('1.312 foo\n1.534 bar\n4.444 qux')
c.seek(0)
dt = [('num', np.float64), ('val', 'S3')]
x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt)
a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_2(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.int32), ('val', 'S3')]
x = np.fromregex(c, r"(\d+)\s+(...)", dt)
a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_3(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.float64)]
x = np.fromregex(c, r"(\d+)\s+...", dt)
a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
assert_array_equal(x, a)
#####--------------------------------------------------------------------------
class TestFromTxt(TestCase):
#
def test_record(self):
"Test w/ explicit dtype"
data = TextIO('1 2\n3 4')
# data.seek(0)
test = np.ndfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_equal(test, control)
#
data = TextIO('M 64.0 75.0\nF 25.0 60.0')
# data.seek(0)
descriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
dtype=descriptor)
test = np.ndfromtxt(data, dtype=descriptor)
assert_equal(test, control)
def test_array(self):
"Test outputing a standard ndarray"
data = TextIO('1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data.seek(0)
control = np.array([[1, 2], [3, 4]], dtype=float)
test = np.loadtxt(data, dtype=float)
assert_array_equal(test, control)
def test_1D(self):
"Test squeezing to 1D"
control = np.array([1, 2, 3, 4], int)
#
data = TextIO('1\n2\n3\n4\n')
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data = TextIO('1,2,3,4\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',')
assert_array_equal(test, control)
def test_comments(self):
"Test the stripping of comments"
control = np.array([1, 2, 3, 5], int)
# Comment on its own line
data = TextIO('# comment\n1,2,3,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
# Comment at the end of a line
data = TextIO('1,2,3,5# comment\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
def test_skiprows(self):
"Test row skipping"
control = np.array([1, 2, 3, 5], int)
kwargs = dict(dtype=int, delimiter=',')
#
data = TextIO('comment\n1,2,3,5\n')
test = np.ndfromtxt(data, skip_header=1, **kwargs)
assert_equal(test, control)
#
data = TextIO('# comment\n1,2,3,5\n')
test = np.loadtxt(data, skiprows=1, **kwargs)
assert_equal(test, control)
def test_skip_footer(self):
data = ["# %i" % i for i in range(1, 6)]
data.append("A, B, C")
data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)])
data[-1] = "99,99"
kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10)
test = np.genfromtxt(TextIO("\n".join(data)), **kwargs)
ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)],
dtype=[(_, float) for _ in "ABC"])
assert_equal(test, ctrl)
def test_skip_footer_with_invalid(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n'
# Footer too small to get rid of all invalid values
assert_raises(ValueError, np.genfromtxt,
TextIO(basestr), skip_footer=1)
# except ValueError:
# pass
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
a = np.genfromtxt(TextIO(basestr), skip_footer=3)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n'
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]]))
a = np.genfromtxt(
TextIO(basestr), skip_footer=3, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]]))
def test_header(self):
"Test retrieving a header"
data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
test = np.ndfromtxt(data, dtype=None, names=True)
control = {'gender': np.array([b'M', b'F']),
'age': np.array([64.0, 25.0]),
'weight': np.array([75.0, 60.0])}
assert_equal(test['gender'], control['gender'])
assert_equal(test['age'], control['age'])
assert_equal(test['weight'], control['weight'])
def test_auto_dtype(self):
"Test the automatic definition of the output dtype"
data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
test = np.ndfromtxt(data, dtype=None)
control = [np.array([b'A', b'BCD']),
np.array([64, 25]),
np.array([75.0, 60.0]),
np.array([3 + 4j, 5 + 6j]),
np.array([True, False]), ]
assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4'])
for (i, ctrl) in enumerate(control):
assert_equal(test['f%i' % i], ctrl)
def test_auto_dtype_uniform(self):
"Tests whether the output dtype can be uniformized"
data = TextIO('1 2 3 4\n5 6 7 8\n')
test = np.ndfromtxt(data, dtype=None)
control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
assert_equal(test, control)
def test_fancy_dtype(self):
"Check that a nested dtype isn't MIA"
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.ndfromtxt(data, dtype=fancydtype, delimiter=',')
control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_names_overwrite(self):
"Test overwriting the names of the dtype"
descriptor = {'names': ('g', 'a', 'w'),
'formats': ('S1', 'i4', 'f4')}
data = TextIO(b'M 64.0 75.0\nF 25.0 60.0')
names = ('gender', 'age', 'weight')
test = np.ndfromtxt(data, dtype=descriptor, names=names)
descriptor['names'] = names
control = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=descriptor)
assert_equal(test, control)
def test_commented_header(self):
"Check that names can be retrieved even if the line is commented out."
data = TextIO("""
#gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
# The # is part of the first name and should be deleted automatically.
test = np.genfromtxt(data, names=True, dtype=None)
ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
assert_equal(test, ctrl)
# Ditto, but we should get rid of the first element
data = TextIO(b"""
# gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
test = np.genfromtxt(data, names=True, dtype=None)
assert_equal(test, ctrl)
def test_autonames_and_usecols(self):
"Tests names and usecols"
data = TextIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'),
names=True, dtype=None)
control = np.array(('aaaa', 45, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_with_usecols(self):
"Test the combination user-defined converters and usecol"
data = TextIO('1,2,3,,5\n6,7,8,9,10\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
control = np.array([[2, -999], [7, 9]], int)
assert_equal(test, control)
def test_converters_with_usecols_and_names(self):
"Tests names and usecols"
data = TextIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True,
dtype=None, converters={'C': lambda s: 2 * int(s)})
control = np.array(('aaaa', 90, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_cornercases(self):
"Test the conversion to datetime."
converter = {
'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', np.object_), ('stid', float)])
assert_equal(test, control)
def test_converters_cornercases2(self):
"Test the conversion to datetime64."
converter = {
'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', 'datetime64[us]'), ('stid', float)])
assert_equal(test, control)
def test_unused_converter(self):
"Test whether unused converters are forgotten"
data = TextIO("1 21\n 3 42\n")
test = np.ndfromtxt(data, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_equal(test, [21, 42])
#
data.seek(0)
test = np.ndfromtxt(data, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_equal(test, [33, 66])
def test_invalid_converter(self):
strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or
(b'r' not in x.lower() and x.strip() or 0.0))
strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or
(b'%' not in x.lower() and x.strip() or 0.0))
s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n"
"L24U05,12/5/2003, 2 %,1,300, 150.5\r\n"
"D02N03,10/10/2004,R 1,,7,145.55")
kwargs = dict(
converters={2: strip_per, 3: strip_rand}, delimiter=",",
dtype=None)
assert_raises(ConverterError, np.genfromtxt, s, **kwargs)
def test_tricky_converter_bug1666(self):
"Test some corner case"
s = TextIO('q1,2\nq3,4')
cnv = lambda s: float(s[1:])
test = np.genfromtxt(s, delimiter=',', converters={0: cnv})
control = np.array([[1., 2.], [3., 4.]])
assert_equal(test, control)
def test_dtype_with_converters(self):
dstr = "2009; 23; 46"
test = np.ndfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: bytes})
control = np.array([('2009', 23., 46)],
dtype=[('f0', '|S4'), ('f1', float), ('f2', float)])
assert_equal(test, control)
test = np.ndfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: float})
control = np.array([2009., 23., 46],)
assert_equal(test, control)
def test_dtype_with_converters_and_usecols(self):
dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n"
dmap = {'1:1':0, '1:n':1, 'm:1':2, 'm:n':3}
dtyp = [('E1','i4'),('E2','i4'),('E3','i2'),('N', 'i1')]
conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]}
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
names=None, converters=conv)
control = np.rec.array([[1,5,-1,0], [2,8,-1,1], [3,3,-2,3]], dtype=dtyp)
assert_equal(test, control)
dtyp = [('E1','i4'),('E2','i4'),('N', 'i1')]
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
usecols=(0,1,3), names=None, converters=conv)
control = np.rec.array([[1,5,0], [2,8,1], [3,3,3]], dtype=dtyp)
assert_equal(test, control)
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
#
ndtype = [('nest', [('idx', int), ('code', np.object)])]
try:
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
except NotImplementedError:
pass
else:
errmsg = "Nested dtype involving objects should be supported."
raise AssertionError(errmsg)
def test_userconverters_with_explicit_dtype(self):
"Test user_converters w/ explicit (standard) dtype"
data = TextIO('skip,skip,2001-01-01,1.0,skip')
test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: bytes})
control = np.array([('2001-01-01', 1.)],
dtype=[('', '|S10'), ('', float)])
assert_equal(test, control)
def test_spacedelimiter(self):
"Test space delimiter"
data = TextIO("1 2 3 4 5\n6 7 8 9 10")
test = np.ndfromtxt(data)
control = np.array([[1., 2., 3., 4., 5.],
[6., 7., 8., 9., 10.]])
assert_equal(test, control)
def test_integer_delimiter(self):
"Test using an integer for delimiter"
data = " 1 2 3\n 4 5 67\n890123 4"
test = np.genfromtxt(TextIO(data), delimiter=3)
control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]])
assert_equal(test, control)
def test_missing(self):
data = TextIO('1,2,3,,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
control = np.array([1, 2, 3, -999, 5], int)
assert_equal(test, control)
def test_missing_with_tabs(self):
"Test w/ a delimiter tab"
txt = "1\t2\t3\n\t2\t\n1\t\t3"
test = np.genfromtxt(TextIO(txt), delimiter="\t",
usemask=True,)
ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],)
ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool)
assert_equal(test.data, ctrl_d)
assert_equal(test.mask, ctrl_m)
def test_usecols(self):
"Test the selection of columns"
# Select 1 column
control = np.array([[1, 2], [3, 4]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1,))
assert_equal(test, control[:, 1])
#
control = np.array([[1, 2, 3], [3, 4, 5]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1, 2))
assert_equal(test, control[:, 1:])
# Testing with arrays instead of tuples.
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=np.array([1, 2]))
assert_equal(test, control[:, 1:])
def test_usecols_as_css(self):
"Test giving usecols with a comma-separated string"
data = "1 2 3\n4 5 6"
test = np.genfromtxt(TextIO(data),
names="a, b, c", usecols="a, c")
ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"])
assert_equal(test, ctrl)
def test_usecols_with_structured_dtype(self):
"Test usecols with an explicit structured dtype"
data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9")
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
test = np.ndfromtxt(
data, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(test['stid'], [b"JOE", b"BOB"])
assert_equal(test['temp'], [25.3, 27.9])
def test_usecols_with_integer(self):
"Test usecols with an integer"
test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0)
assert_equal(test, np.array([1., 4.]))
def test_usecols_with_named_columns(self):
"Test usecols with named columns"
ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)])
data = "1 2 3\n4 5 6"
kwargs = dict(names="a, b, c")
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data),
usecols=('a', 'c'), **kwargs)
assert_equal(test, ctrl)
def test_empty_file(self):
"Test that an empty file raises the proper warning."
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="genfromtxt: Empty input file:")
data = TextIO()
test = np.genfromtxt(data)
assert_equal(test, np.array([]))
def test_fancy_dtype_alt(self):
"Check that a nested dtype isn't MIA"
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.mafromtxt(data, dtype=fancydtype, delimiter=',')
control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.ndfromtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_withmissing(self):
data = TextIO('A,B\n0,1\n2,N/A')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.mafromtxt(data, dtype=None, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
data.seek(0)
test = np.mafromtxt(data, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.float), ('B', np.float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_user_missing_values(self):
data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
basekwargs = dict(dtype=None, delimiter=",", names=True,)
mdtype = [('A', int), ('B', float), ('C', complex)]
#
test = np.mafromtxt(TextIO(data), missing_values="N/A",
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
dtype=mdtype)
assert_equal(test, control)
#
basekwargs['dtype'] = mdtype
test = np.mafromtxt(TextIO(data),
missing_values={0: -9, 1: -99, 2: -999j}, **basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
#
test = np.mafromtxt(TextIO(data),
missing_values={0: -9, 'B': -99, 'C': -999j},
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
def test_user_filling_values(self):
"Test with missing and filling values"
ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)])
data = "N/A, 2, 3\n4, ,???"
kwargs = dict(delimiter=",",
dtype=int,
names="a,b,c",
missing_values={0: "N/A", 'b': " ", 2: "???"},
filling_values={0: 0, 'b': 0, 2: -999})
test = np.genfromtxt(TextIO(data), **kwargs)
ctrl = np.array([(0, 2, 3), (4, 0, -999)],
dtype=[(_, int) for _ in "abc"])
assert_equal(test, ctrl)
#
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"])
assert_equal(test, ctrl)
data2 = "1,2,*,4\n5,*,7,8\n"
test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
missing_values="*", filling_values=0)
ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]])
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
missing_values="*", filling_values=-1)
ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]])
assert_equal(test, ctrl)
def test_withmissing_float(self):
data = TextIO('A,B\n0,1.5\n2,-999.00')
test = np.mafromtxt(data, dtype=None, delimiter=',',
missing_values='-999.0', names=True,)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_with_masked_column_uniform(self):
"Test masked column"
data = TextIO('1 2 3\n4 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]])
assert_equal(test, control)
def test_with_masked_column_various(self):
"Test masked column"
data = TextIO('True 2 3\nFalse 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([(1, 2, 3), (0, 5, 6)],
mask=[(0, 1, 0), (0, 1, 0)],
dtype=[('f0', bool), ('f1', bool), ('f2', int)])
assert_equal(test, control)
def test_invalid_raise(self):
"Test invalid raise"
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
#
kwargs = dict(delimiter=",", dtype=None, names=True)
# XXX: is there a better way to get the return value of the callable in
# assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.ndfromtxt(mdata, invalid_raise=False, **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde']))
#
mdata.seek(0)
assert_raises(ValueError, np.ndfromtxt, mdata,
delimiter=",", names=True)
def test_invalid_raise_with_usecols(self):
"Test invalid_raise with usecols"
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
kwargs = dict(delimiter=",", dtype=None, names=True,
invalid_raise=False)
# XXX: is there a better way to get the return value of the callable in
# assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.ndfromtxt(mdata, usecols=(0, 4), **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae']))
#
mdata.seek(0)
mtest = np.ndfromtxt(mdata, usecols=(0, 1), **kwargs)
assert_equal(len(mtest), 50)
control = np.ones(50, dtype=[(_, int) for _ in 'ab'])
control[[10 * _ for _ in range(5)]] = (2, 2)
assert_equal(mtest, control)
def test_inconsistent_dtype(self):
"Test inconsistent dtype"
data = ["1, 1, 1, 1, -1.1"] * 50
mdata = TextIO("\n".join(data))
converters = {4: lambda x: "(%s)" % x}
kwargs = dict(delimiter=",", converters=converters,
dtype=[(_, int) for _ in 'abcde'],)
assert_raises(ValueError, np.genfromtxt, mdata, **kwargs)
def test_default_field_format(self):
"Test default format"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=None, defaultfmt="f%02i")
ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)],
dtype=[("f00", int), ("f01", int), ("f02", float)])
assert_equal(mtest, ctrl)
def test_single_dtype_wo_names(self):
"Test single dtype w/o names"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, defaultfmt="f%02i")
ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float)
assert_equal(mtest, ctrl)
def test_single_dtype_w_explicit_names(self):
"Test single dtype w explicit names"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, names="a, b, c")
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_single_dtype_w_implicit_names(self):
"Test single dtype w implicit names"
data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, names=True)
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_easy_structured_dtype(self):
"Test easy structured dtype"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data), delimiter=",",
dtype=(int, float, float), defaultfmt="f_%02i")
ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)],
dtype=[("f_00", int), ("f_01", float), ("f_02", float)])
assert_equal(mtest, ctrl)
def test_autostrip(self):
"Test autostrip"
data = "01/01/2003 , 1.3, abcde"
kwargs = dict(delimiter=",", dtype=None)
mtest = np.ndfromtxt(TextIO(data), **kwargs)
ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')],
dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')])
assert_equal(mtest, ctrl)
mtest = np.ndfromtxt(TextIO(data), autostrip=True, **kwargs)
ctrl = np.array([('01/01/2003', 1.3, 'abcde')],
dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')])
assert_equal(mtest, ctrl)
def test_replace_space(self):
"Test the 'replace_space' option"
txt = "A.A, B (B), C:C\n1, 2, 3.14"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_incomplete_names(self):
"Test w/ incomplete names"
data = "A,,C\n0,1,2\n3,4,5"
kwargs = dict(delimiter=",", names=True)
# w/ dtype=None
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, int) for _ in ('A', 'f0', 'C')])
test = np.ndfromtxt(TextIO(data), dtype=None, **kwargs)
assert_equal(test, ctrl)
# w/ default dtype
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, float) for _ in ('A', 'f0', 'C')])
test = np.ndfromtxt(TextIO(data), **kwargs)
def test_names_auto_completion(self):
"Make sure that names are properly completed"
data = "1 2 3\n 4 5 6"
test = np.genfromtxt(TextIO(data),
dtype=(int, float, int), names="a")
ctrl = np.array([(1, 2, 3), (4, 5, 6)],
dtype=[('a', int), ('f0', float), ('f1', int)])
assert_equal(test, ctrl)
def test_names_with_usecols_bug1636(self):
"Make sure we pick up the right names w/ usecols"
data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4"
ctrl_names = ("A", "C", "E")
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=(0, 2, 4), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=int, delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
def test_fixed_width_names(self):
"Test fix-width w/ names"
data = " A B C\n 0 1 2.3\n 45 67 9."
kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
#
kwargs = dict(delimiter=5, names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_filling_values(self):
"Test missing values"
data = b"1, 2, 3\n1, , 5\n0, 6, \n"
kwargs = dict(delimiter=",", dtype=None, filling_values=-999)
ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int)
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_comments_is_none(self):
# Github issue 329 (None was previously being converted to 'None').
test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_equal(test[1], b'testNonetherestofthedata')
test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_equal(test[1], b' testNonetherestofthedata')
def test_recfromtxt(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(data, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
def test_recfromcsv(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(data, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
#
data = TextIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing_values='N/A',)
control = np.array([(0, 1), (2, 3)],
dtype=[('a', np.int), ('b', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,3')
dtype = [('a', np.int), ('b', np.float)]
test = np.recfromcsv(data, missing_values='N/A', dtype=dtype)
control = np.array([(0, 1), (2, 3)],
dtype=dtype)
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
def test_gft_using_filename(self):
# Test that we can load data from a filename as well as a file object
wanted = np.arange(6).reshape((2, 3))
if sys.version_info[0] >= 3:
# python 3k is known to fail for '\r'
linesep = ('\n', '\r\n')
else:
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
f, name = mkstemp()
# We can't use NamedTemporaryFile on windows, because we cannot
# reopen the file.
try:
os.write(f, asbytes(data))
assert_array_equal(np.genfromtxt(name), wanted)
finally:
os.close(f)
os.unlink(name)
def test_gft_using_generator(self):
# gft doesn't work with unicode.
def count():
for i in range(10):
yield asbytes("%d" % i)
res = np.genfromtxt(count())
assert_array_equal(res, np.arange(10))
def test_gzip_load():
a = np.random.random((5, 5))
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
np.save(f, a)
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.load(f), a)
def test_gzip_loadtxt():
# Thanks to another windows brokeness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
# reopened by another open call. So we first put the gzipped string
# of the test reference array, write it to a securely opened file,
# which is then read from by the loadtxt function
s = BytesIO()
g = gzip.GzipFile(fileobj=s, mode='w')
g.write(b'1 2 3\n')
g.close()
s.seek(0)
f, name = mkstemp(suffix='.gz')
try:
os.write(f, s.read())
s.close()
assert_array_equal(np.loadtxt(name), [1, 2, 3])
finally:
os.close(f)
os.unlink(name)
def test_gzip_loadtxt_from_string():
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
f.write(b'1 2 3\n')
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.loadtxt(f), [1, 2, 3])
def test_npzfile_dict():
s = BytesIO()
x = np.zeros((3, 3))
y = np.zeros((3, 3))
np.savez(s, x=x, y=y)
s.seek(0)
z = np.load(s)
assert_('x' in z)
assert_('y' in z)
assert_('x' in z.keys())
assert_('y' in z.keys())
for f, a in z.items():
assert_(f in ['x', 'y'])
assert_equal(a.shape, (3, 3))
assert_(len(z.items()) == 2)
for f in z:
assert_(f in ['x', 'y'])
assert_('x' in z.keys())
def test_load_refcount():
# Check that objects returned by np.load are directly freed based on
# their refcount, rather than needing the gc to collect them.
f = BytesIO()
np.savez(f, [1, 2, 3])
f.seek(0)
gc.collect()
n_before = len(gc.get_objects())
np.load(f)
n_after = len(gc.get_objects())
assert_equal(n_before, n_after)
if __name__ == "__main__":
run_module_suite()
|
ajrichards/bayesian-examples
|
python/howto-logging.py
|
#!/usr/bin/env python
import time,os,re,csv,sys,uuid,joblib
from datetime import date
import numpy as np
from sklearn import svm
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
def train_model(X,y,saved_model):
"""
function to train model
"""
## Perform a train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
## Specify parameters and model
params = {'C':1.0,'kernel':'linear','gamma':0.5}
clf = svm.SVC(**params,probability=True)
## fit model on training data
clf = clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(classification_report(y_test,y_pred))
## retrain using all data
clf.fit(X, y)
print("... saving model: {}".format(saved_model))
joblib.dump(clf,saved_model)
print(y_test[:5])
print(X_test[:5,:])
def _update_predict_log(y_pred,y_proba,query,runtime):
"""
update predict log file
"""
## name the logfile using something that cycles with date (day, month, year)
today = date.today()
logfile = "example-predict-{}-{}.log".format(today.year, today.month)
## write the data to a csv file
header = ['unique_id','timestamp','y_pred','y_proba','x_shape','model_version','runtime']
write_header = False
if not os.path.exists(logfile):
write_header = True
with open(logfile,'a') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='|')
if write_header:
writer.writerow(header)
to_write = map(str,[uuid.uuid4(),time.time(),y_pred,y_proba,query.shape,MODEL_VERSION,runtime])
writer.writerow(to_write)
def predict(query):
"""
generic function for prediction
"""
## start timer for runtime
time_start = time.time()
## ensure the model is loaded
model = joblib.load(saved_model)
## output checking
if len(query.shape) == 1:
query = query.reshape(1, -1)
## make prediction and gather data for log entry
y_pred = model.predict(query)
y_proba = None
if 'predict_proba' in dir(model) and model.probability == True:
y_proba = model.predict_proba(query)
m, s = divmod(time.time()-time_start, 60)
h, m = divmod(m, 60)
runtime = "%03d:%02d:%02d"%(h, m, s)
## update the log file
_update_predict_log(y_pred,y_proba,query,runtime)
return(y_pred)
if __name__ == "__main__":
## import some data to play with
iris = datasets.load_iris()
X = iris.data[:,:2]
y = iris.target
## train the model
MODEL_VERSION = 1.0
saved_model = "example-predict-{}.joblib".format(re.sub("\.","_",str(MODEL_VERSION)))
model = train_model(X,y,saved_model)
## example predict
query = np.array([[6.1,2.8]])
for query in [np.array([[6.1,2.8]]), np.array([[7.7,2.5]]), np.array([[5.8,3.8]])]:
y_pred = predict(query)
print("predicted: {}".format(y_pred))
|
alexmoratalla/yambopy
|
yambopy/units.py
|
I = complex(0,1)
ha2ev = 27.211396132
ev2cm1 = 8065.5440044136285
bohr2ang = 0.52917720859
atomic_mass = [ None, 1.00794, 4.002602, 6.941, 9.012182,
10.811, 12.0107, 14.0067, 15.9994, 18.9984032,
20.1797, 22.98976928, 24.305,26.9815386, 28.0855,
30.973762, 32.065, 35.453, 39.948, 39.0983,
40.078, 44.955912, 47.867, 50.9415, 51.9961,
54.938045, 55.845, 58.933195, 58.6934, 63.546,
65.38, 69.723, 72.64, 74.9216, 78.96,
79.904, 83.798, 85.4678, 87.62, 88.90585,
91.224, 92.90638, 95.96, None, 101.07,
102.9055, 106.42, 107.8682, 112.411, 114.818,
118.71, 121.76, 127.6, 126.90447, 131.293,
132.9054519, 137.327, 138.90547, 140.116, 140.90765,
144.242, None, 150.36, 151.964, 157.25,
158.92535, 162.5, 164.93032, 167.259, 168.93421,
173.054, 174.9668, 178.49, 180.94788, 183.84,
186.207, 190.23, 192.217, 195.084, 196.966569,
200.59, 204.3833, 207.2, 208.9804, None,
None, None, None, None, None,
232.03806, 231.03588, 238.02891, None, None,
None, None, None, None, None,
None, None, None, None, None,
None, None, None, None, None,
None, None, None, None, None,
None, None, None, None]
chemical_symbols = ['X', 'H', 'He', 'Li', 'Be',
'B', 'C', 'N', 'O', 'F',
'Ne', 'Na', 'Mg', 'Al', 'Si',
'P', 'S', 'Cl', 'Ar', 'K',
'Ca', 'Sc', 'Ti', 'V', 'Cr',
'Mn', 'Fe', 'Co', 'Ni', 'Cu',
'Zn', 'Ga', 'Ge', 'As', 'Se',
'Br', 'Kr', 'Rb', 'Sr', 'Y',
'Zr', 'Nb', 'Mo', 'Tc', 'Ru',
'Rh', 'Pd', 'Ag', 'Cd', 'In',
'Sn', 'Sb', 'Te', 'I', 'Xe',
'Cs', 'Ba', 'La', 'Ce', 'Pr',
'Nd', 'Pm', 'Sm', 'Eu', 'Gd',
'Tb', 'Dy', 'Ho', 'Er', 'Tm',
'Yb', 'Lu', 'Hf', 'Ta', 'W',
'Re', 'Os', 'Ir', 'Pt', 'Au',
'Hg', 'Tl', 'Pb', 'Bi', 'Po',
'At', 'Rn', 'Fr', 'Ra', 'Ac',
'Th', 'Pa', 'U', 'Np', 'Pu',
'Am', 'Cm', 'Bk', 'Cf', 'Es',
'Fm', 'Md', 'No', 'Lr']
|
pombredanne/parakeet
|
parakeet/ndtypes/fn_type.py
|
from core_types import IncompatibleTypes, ImmutableT
class FnT(ImmutableT):
"""Type of a typed function"""
def __init__(self, input_types, return_type):
self.input_types = tuple(input_types)
self.return_type = return_type
self._hash = hash(self.input_types + (return_type,))
def __str__(self):
input_str = ", ".join(str(t) for t in self.input_types)
return "(%s)->%s" % (input_str, self.return_type)
def __repr__(self):
return str(self)
def __eq__(self, other):
return other.__class__ is FnT and \
self.return_type == other.return_type and \
len(self.input_types) == len(other.input_types) and \
all(t1 == t2 for (t1, t2) in
zip(self.input_types, other.input_types))
def combine(self, other):
if self == other:
return self
else:
raise IncompatibleTypes(self, other)
def __hash__(self):
return self._hash
_fn_type_cache = {}
def make_fn_type(input_types, return_type):
input_types = tuple(input_types)
key = input_types, return_type
if key in _fn_type_cache:
return _fn_type_cache[key]
else:
t = FnT(input_types, return_type)
_fn_type_cache[key] = t
return t
|
standage/tag
|
tag/__init__.py
|
#!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (C) 2015 Daniel Standage <daniel.standage@gmail.com>
#
# This file is part of tag (http://github.com/standage/tag) and is licensed
# under the BSD 3-clause license: see LICENSE.
# -----------------------------------------------------------------------------
"""Package-wide configuration"""
try:
import __builtin__ as builtins
except ImportError: # pragma: no cover
import builtins
from tag.comment import Comment
from tag.directive import Directive
from tag.feature import Feature
from tag.sequence import Sequence
from tag.range import Range
from tag.reader import GFF3Reader
from tag.writer import GFF3Writer
from tag.score import Score
from tag import bae
from tag import cli
from tag import index
from tag import locus
from tag import select
from tag import transcript
from gzip import open as gzopen
import sys
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
def open(filename, mode):
if mode not in ['r', 'w']:
raise ValueError('invalid mode "{}"'.format(mode))
if filename in ['-', None]: # pragma: no cover
filehandle = sys.stdin if mode == 'r' else sys.stdout
return filehandle
openfunc = builtins.open
if filename.endswith('.gz'):
openfunc = gzopen
mode += 't'
return openfunc(filename, mode)
|
dekom/threepress-bookworm-read-only
|
bookworm/gdata/tests/all_tests_coverage.py
|
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import coverage
import all_tests
import atom.core
import atom.http_core
import atom.mock_http_core
import atom.auth
import atom.client
import gdata.gauth
import gdata.client
import gdata.data
import gdata.blogger.data
import gdata.blogger.client
import gdata.maps.data
import gdata.maps.client
import gdata.spreadsheets.data
from gdata.test_config import settings
# Ensure that coverage tests execute the live requests to the servers, but
# allow use of cached server responses to speed up repeated runs.
settings.RUN_LIVE_TESTS = True
settings.CLEAR_CACHE = False
def suite():
return unittest.TestSuite((atom_tests.core_test.suite(),))
if __name__ == '__main__':
coverage.erase()
coverage.start()
unittest.TextTestRunner().run(all_tests.suite())
coverage.stop()
coverage.report([atom.core, atom.http_core, atom.auth, atom.data,
atom.mock_http_core, atom.client, gdata.gauth, gdata.client,
gdata.core, gdata.data, gdata.blogger.data, gdata.blogger.client,
gdata.maps.data, gdata.maps.client, gdata.spreadsheets.data])
|
Just-D/chromium-1
|
content/test/gpu/gpu_tests/gpu_test_expectations.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# mavericks, yosemite, linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Browser types:
# android-webview-shell, android-content-shell, debug
#
# ANGLE renderer:
# d3d9, d3d11, opengl
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
#
# Sample usage in SetExpectations in subclasses:
# self.Fail('gl-enable-vertex-attrib.html',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
ANGLE_MODIFIERS = ['d3d9', 'd3d11', 'opengl']
BROWSER_TYPE_MODIFIERS = [
'android-webview-shell', 'android-content-shell', 'debug' ]
class _FlakyExpectation(object):
def __init__(self, expectation, max_num_retries):
self.expectation = expectation
self.max_num_retries = max_num_retries
class GpuTestExpectations(test_expectations.TestExpectations):
def __init__(self):
self._flaky_expectations = []
super(GpuTestExpectations, self).__init__()
def Flaky(self, url_pattern, conditions=None, bug=None, max_num_retries=2):
expectation = _FlakyExpectation(self.CreateExpectation(
'pass', url_pattern, conditions, bug), max_num_retries)
self._flaky_expectations.append(expectation)
def GetFlakyRetriesForPage(self, page, browser):
for fe in self._flaky_expectations:
e = fe.expectation
if self.ExpectationAppliesToPage(e, browser, page):
return fe.max_num_retries
return 0
def IsValidUserDefinedCondition(self, condition):
# Add support for d3d9, d3d11 and opengl-specific expectations.
if condition in ANGLE_MODIFIERS:
return True
# Add support for browser-type-specific expectations.
if condition in BROWSER_TYPE_MODIFIERS:
return True
return super(GpuTestExpectations,
self).IsValidUserDefinedCondition(condition)
def ModifiersApply(self, browser, expectation):
if not super(GpuTestExpectations, self).ModifiersApply(
browser, expectation):
return False
# We'll only get here if the OS and GPU matched the expectation.
# TODO(kbr): refactor _Expectation to be a public class so that
# the GPU-specific properties can be moved into a subclass, and
# run the unit tests from this directory on the CQ and the bots.
# crbug.com/495868 crbug.com/495870
# Check for presence of Android WebView.
browser_expectations = [x for x in expectation.user_defined_conditions
if x in BROWSER_TYPE_MODIFIERS]
browser_matches = ((not browser_expectations) or
browser.browser_type in browser_expectations)
if not browser_matches:
return False
angle_renderer = ''
gpu_info = None
if browser.supports_system_info:
gpu_info = browser.GetSystemInfo().gpu
if gpu_info and gpu_info.aux_attributes:
gl_renderer = gpu_info.aux_attributes.get('gl_renderer')
if gl_renderer:
if 'Direct3D11' in gl_renderer:
angle_renderer = 'd3d11'
elif 'Direct3D9' in gl_renderer:
angle_renderer = 'd3d9'
elif 'OpenGL' in gl_renderer:
angle_renderer = 'opengl'
angle_expectations = [x for x in expectation.user_defined_conditions
if x in ANGLE_MODIFIERS]
angle_matches = ((not angle_expectations) or
angle_renderer in angle_expectations)
return angle_matches
|
mvaled/sentry
|
tests/sentry/api/endpoints/test_monitor_checkins.py
|
from __future__ import absolute_import, print_function
from datetime import timedelta
from django.utils import timezone
from freezegun import freeze_time
from sentry.models import CheckInStatus, Monitor, MonitorCheckIn, MonitorStatus, MonitorType
from sentry.testutils import APITestCase
@freeze_time("2019-01-01")
class CreateMonitorCheckInTest(APITestCase):
def test_passing(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org, members=[user])
project = self.create_project(teams=[team])
monitor = Monitor.objects.create(
organization_id=org.id,
project_id=project.id,
next_checkin=timezone.now() - timedelta(minutes=1),
type=MonitorType.CRON_JOB,
config={"schedule": "* * * * *"},
)
self.login_as(user=user)
with self.feature({"organizations:monitors": True}):
resp = self.client.post(
"/api/0/monitors/{}/checkins/".format(monitor.guid), data={"status": "ok"}
)
assert resp.status_code == 201, resp.content
checkin = MonitorCheckIn.objects.get(guid=resp.data["id"])
assert checkin.status == CheckInStatus.OK
monitor = Monitor.objects.get(id=monitor.id)
assert monitor.status == MonitorStatus.OK
assert monitor.last_checkin == checkin.date_added
assert monitor.next_checkin == monitor.get_next_scheduled_checkin(checkin.date_added)
def test_failing(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org, members=[user])
project = self.create_project(teams=[team])
monitor = Monitor.objects.create(
organization_id=org.id,
project_id=project.id,
next_checkin=timezone.now() - timedelta(minutes=1),
type=MonitorType.CRON_JOB,
config={"schedule": "* * * * *"},
)
self.login_as(user=user)
with self.feature({"organizations:monitors": True}):
resp = self.client.post(
"/api/0/monitors/{}/checkins/".format(monitor.guid), data={"status": "error"}
)
assert resp.status_code == 201, resp.content
checkin = MonitorCheckIn.objects.get(guid=resp.data["id"])
assert checkin.status == CheckInStatus.ERROR
monitor = Monitor.objects.get(id=monitor.id)
assert monitor.status == MonitorStatus.ERROR
assert monitor.last_checkin == checkin.date_added
assert monitor.next_checkin == monitor.get_next_scheduled_checkin(checkin.date_added)
def test_disabled(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org, members=[user])
project = self.create_project(teams=[team])
monitor = Monitor.objects.create(
organization_id=org.id,
project_id=project.id,
next_checkin=timezone.now() - timedelta(minutes=1),
type=MonitorType.CRON_JOB,
status=MonitorStatus.DISABLED,
config={"schedule": "* * * * *"},
)
self.login_as(user=user)
with self.feature({"organizations:monitors": True}):
resp = self.client.post(
"/api/0/monitors/{}/checkins/".format(monitor.guid), data={"status": "error"}
)
assert resp.status_code == 201, resp.content
checkin = MonitorCheckIn.objects.get(guid=resp.data["id"])
assert checkin.status == CheckInStatus.ERROR
monitor = Monitor.objects.get(id=monitor.id)
assert monitor.status == MonitorStatus.DISABLED
assert monitor.last_checkin == checkin.date_added
assert monitor.next_checkin == monitor.get_next_scheduled_checkin(checkin.date_added)
def test_pending_deletion(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org, members=[user])
project = self.create_project(teams=[team])
monitor = Monitor.objects.create(
organization_id=org.id,
project_id=project.id,
next_checkin=timezone.now() - timedelta(minutes=1),
type=MonitorType.CRON_JOB,
status=MonitorStatus.PENDING_DELETION,
config={"schedule": "* * * * *"},
)
self.login_as(user=user)
with self.feature({"organizations:monitors": True}):
resp = self.client.post(
"/api/0/monitors/{}/checkins/".format(monitor.guid), data={"status": "error"}
)
assert resp.status_code == 404, resp.content
def test_deletion_in_progress(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org, members=[user])
project = self.create_project(teams=[team])
monitor = Monitor.objects.create(
organization_id=org.id,
project_id=project.id,
next_checkin=timezone.now() - timedelta(minutes=1),
type=MonitorType.CRON_JOB,
status=MonitorStatus.DELETION_IN_PROGRESS,
config={"schedule": "* * * * *"},
)
self.login_as(user=user)
with self.feature({"organizations:monitors": True}):
resp = self.client.post(
"/api/0/monitors/{}/checkins/".format(monitor.guid), data={"status": "error"}
)
assert resp.status_code == 404, resp.content
|
nnrcschmdt/helsinki
|
program/management/commands/check_automation_ids.py
|
import json
from os.path import join
from django.conf import settings
from django.core.management.base import NoArgsCommand
from program.models import ProgramSlot
class Command(NoArgsCommand):
help = 'checks the automation_ids used by program slots against the exported'
def handle_noargs(self, **options):
cache_dir = getattr(settings, 'AUTOMATION_CACHE_DIR', 'cache')
cached_shows = join(cache_dir, 'shows.json')
with open(cached_shows) as shows_json:
shows = json.loads(shows_json.read())
rd_ids = {}
for show in shows['shows']:
rd_ids[show['id']] = show
for show in shows['multi-shows']:
rd_ids[show['id']] = show
pv_ids = []
for programslot in ProgramSlot.objects.filter(automation_id__isnull=False):
pv_ids.append(int(programslot.automation_id))
for automation_id in sorted(rd_ids.iterkeys()):
if rd_ids[automation_id]['type'] == 's':
continue
multi_id = -1
if 'multi' in rd_ids[automation_id]:
multi_id = rd_ids[automation_id]['multi']['id']
if automation_id not in pv_ids and multi_id not in pv_ids:
if multi_id < 0:
print '+ %d' % (automation_id)
else:
print '+ %d (%d)' % (automation_id, multi_id)
for automation_id in sorted(pv_ids):
if automation_id not in rd_ids:
print '-', automation_id
|
lfairchild/PmagPy
|
programs/core_depthplot.py
|
#!/usr/bin/env pythonw
#from __future__ import print_function
import sys
import wx
import os
import matplotlib
if matplotlib.get_backend() != "WXAgg":
matplotlib.use("WXAgg")
import matplotlib.pyplot as plt
from pmagpy import pmagplotlib
import pmagpy.command_line_extractor as extractor
import pmagpy.ipmag as ipmag
import dialogs.pmag_widgets as pw
import dialogs.pmag_menu_dialogs as pmag_menu_dialogs
def main():
"""
NAME
core_depthplot.py
DESCRIPTION
plots various measurements versus core_depth or age. plots data flagged as 'FS-SS-C' as discrete samples.
SYNTAX
core_depthplot.py [command line options]
# or, for Anaconda users:
core_depthplot_anaconda [command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input measurments format file
-fsum FILE: specify input LIMS database (IODP) core summary csv file
-fwig FILE: specify input depth,wiggle to plot, in magic format with sample_core_depth key for depth
-fsa FILE: specify input er_samples format file from magic for depth
-fa FILE: specify input ages format file from magic for age
NB: must have either -fsa OR -fa (not both)
-fsp FILE sym size: specify input zeq_specimen format file from magic, sym and size
NB: PCAs will have specified color, while fisher means will be white with specified color as the edgecolor
-fres FILE specify input pmag_results file from magic, sym and size
-LP [AF,T,ARM,IRM, X] step [in mT,C,mT,mT, mass/vol] to plot
-S do not plot blanket treatment data (if this is set, you don't need the -LP)
-sym SYM SIZE, symbol, size for continuous points (e.g., ro 5, bs 10, g^ 10 for red dot, blue square, green triangle), default is blue dot at 5 pt
-D do not plot declination
-M do not plot magnetization
-log plot magnetization on a log scale
-L do not connect dots with a line
-I do not plot inclination
-d min max [in m] depth range to plot
-n normalize by weight in er_specimen table
-Iex: plot the expected inc at lat - only available for results with lat info in file
-ts TS amin amax: plot the GPTS for the time interval between amin and amax (numbers in Ma)
TS: [ck95, gts04, gts12]
-ds [mbsf,mcd] specify depth scale, mbsf default
-fmt [svg, eps, pdf, png] specify output format for plot (default: svg)
-sav save plot silently
DEFAULTS:
Measurements file: measurements.txt
Samples file: samples.txt
NRM step
Summary file: none
"""
args = sys.argv
if '-h' in args:
print(main.__doc__)
sys.exit()
dataframe = extractor.command_line_dataframe([ ['f', False, 'measurements.txt'], ['fsum', False, ''],
['fwig', False, ''], ['fsa', False, ''],
['fa', False, ''], ['fsp', False, ''],
['fres', False, '' ], ['fmt', False, 'svg'],
['LP', False, ''], ['n', False, False],
['d', False, '-1 -1'], ['ts', False, ''],
['WD', False, '.'], ['L', False, True],
['S', False, True], ['D', False, True],
['I', False, True], ['M', False, True],
['log', False, 0],
['ds', False, 'sample_core_depth'],
['sym', False, 'bo 5'], ['ID', False, '.'],
['sav', False, False], ['DM', False, 3]])
checked_args = extractor.extract_and_check_args(args, dataframe)
meas_file, sum_file, wig_file, samp_file, age_file, spc_file, res_file, fmt, meth, norm, depth, timescale, dir_path, pltLine, pltSus, pltDec, pltInc, pltMag, logit, depth_scale, symbol, input_dir, save, data_model_num = extractor.get_vars(
['f', 'fsum', 'fwig', 'fsa', 'fa', 'fsp', 'fres', 'fmt', 'LP', 'n', 'd', 'ts', 'WD', 'L', 'S', 'D', 'I', 'M', 'log', 'ds', 'sym', 'ID', 'sav', 'DM'], checked_args)
# format some variables
# format symbol/size
try:
sym, size = symbol.split()
size = int(size)
except:
print('you should provide -sym in this format: ro 5')
print('using defaults instead')
sym, size = 'ro', 5
# format result file, symbol, size
if res_file:
try:
res_file, res_sym, res_size = res_file.split()
except:
print('you must provide -fres in this format: -fres filename symbol size')
print(
'could not parse {}, defaulting to using no result file'.format(res_file))
res_file, res_sym, res_size = '', '', 0
else:
res_file, res_sym, res_size = '', '', 0
# format specimen file, symbol, size
if spc_file:
try:
spc_file, spc_sym, spc_size = spc_file.split()
except:
print('you must provide -fsp in this format: -fsp filename symbol size')
print(
'could not parse {}, defaulting to using no specimen file'.format(spc_file))
spc_file, spc_sym, spc_size = '', '', 0
else:
spc_file, spc_sym, spc_size = '', '', 0
# format min/max depth
try:
dmin, dmax = depth.split()
except:
print('you must provide -d in this format: -d dmin dmax')
print('could not parse {}, defaulting to plotting all depths'.format(depth))
dmin, dmax = -1, -1
# format timescale, min/max time
if timescale:
try:
timescale, amin, amax = timescale.split()
pltTime = True
except:
print(
'you must provide -ts in this format: -ts timescale minimum_age maximum_age')
print(
'could not parse {}, defaulting to using no timescale'.format(timescale))
timescale, amin, amax = None, -1, -1
pltTime = False
else:
timescale, amin, amax = None, -1, -1
pltTime = False
# format norm and wt_file
if norm and not isinstance(norm, bool):
wt_file = norm
norm = True
else:
norm = False
wt_file = ''
# format list of protcols and step
try:
method, step = meth.split()
except:
print(
'To use the -LP flag you must provide both the protocol and the step in this format:\n-LP [AF,T,ARM,IRM, X] step [in mT,C,mT,mT, mass/vol] to plot')
print('Defaulting to using no protocol')
method, step = 'LT-NO', 0
# list of varnames
#['f', 'fsum', 'fwig', 'fsa', 'fa', 'fsp', 'fres', 'fmt', 'LP', 'n', 'd', 'ts', 'WD', 'L', 'S', 'D', 'I', 'M', 'log', 'ds', 'sym' ]
#meas_file, sum_file, wig_file, samp_file, age_file, spc_file, res_file, fmt, meth, norm, depth, timescale, dir_path, pltLine, pltSus, pltDec, pltInc, pltMag, logit, depth_scale, symbol
fig, figname = ipmag.core_depthplot(input_dir, meas_file, spc_file, samp_file, age_file, sum_file, wt_file, depth_scale, dmin, dmax, sym, size,
spc_sym, spc_size, method, step, fmt, pltDec, pltInc, pltMag, pltLine, pltSus, logit, pltTime, timescale, amin, amax, norm, data_model_num)
if not pmagplotlib.isServer:
figname = figname.replace(':', '_')
if fig and save:
print('-I- Created plot: {}'.format(figname))
plt.savefig(figname)
return
app = wx.App(redirect=False)
if not fig:
pw.simple_warning(
'No plot was able to be created with the data you provided.\nMake sure you have given all the required information and try again')
return False
dpi = fig.get_dpi()
pixel_width = dpi * fig.get_figwidth()
pixel_height = dpi * fig.get_figheight()
figname = os.path.join(dir_path, figname)
plot_frame = pmag_menu_dialogs.PlotFrame((int(pixel_width), int(pixel_height + 50)),
fig, figname, standalone=True)
app.MainLoop()
if __name__ == "__main__":
main()
|
FrancoisRheaultUS/dipy
|
doc/examples/sfm_reconst.py
|
"""
.. _sfm-reconst:
==============================================
Reconstruction with the Sparse Fascicle Model
==============================================
In this example, we will use the Sparse Fascicle Model (SFM) [Rokem2015]_, to
reconstruct the fiber Orientation Distribution Function (fODF) in every voxel.
First, we import the modules we will use in this example:
"""
import dipy.reconst.sfm as sfm
import dipy.data as dpd
import dipy.direction.peaks as dpp
from dipy.io.image import load_nifti, save_nifti
from dipy.io.gradients import read_bvals_bvecs
from dipy.core.gradients import gradient_table
from dipy.viz import window, actor
"""
For the purpose of this example, we will use the Stanford HARDI dataset (150
directions, single b-value of 2000 $s/mm^2$) that can be automatically
downloaded. If you have not yet downloaded this data-set in one of the other
examples, you will need to be connected to the internet the first time you run
this example. The data will be stored for subsequent runs, and for use with
other examples.
"""
hardi_fname, hardi_bval_fname, hardi_bvec_fname = dpd.get_fnames('stanford_hardi')
data, affine = load_nifti(hardi_fname)
bvals, bvecs = read_bvals_bvecs(hardi_bval_fname, hardi_bvec_fname)
gtab = gradient_table(bvals, bvecs)
# Enables/disables interactive visualization
interactive = False
"""
Reconstruction of the fiber ODF in each voxel guides subsequent tracking
steps. Here, the model is the Sparse Fascicle Model, described in
[Rokem2014]_. This model reconstructs the diffusion signal as a combination of
the signals from different fascicles. This model can be written as:
.. math::
y = X\beta
Where $y$ is the signal and $\beta$ are weights on different points in the
sphere. The columns of the design matrix, $X$ are the signals in each point in
the measurement that would be predicted if there was a fascicle oriented in the
direction represented by that column. Typically, the signal used for this
kernel will be a prolate tensor with axial diffusivity 3-5 times higher than
its radial diffusivity. The exact numbers can also be estimated from examining
parts of the brain in which there is known to be only one fascicle (e.g. in
corpus callosum).
Sparsity constraints on the fiber ODF ($\beta$) are set through the Elastic Net
algorihtm [Zou2005]_.
Elastic Net optimizes the following cost function:
.. math::
\sum_{i=1}^{n}{(y_i - \hat{y}_i)^2} + \alpha (\lambda \sum_{j=1}^{m}{w_j}+(1-\lambda) \sum_{j=1}^{m}{w^2_j}
where $\hat{y}$ is the signal predicted for a particular setting of $\beta$,
such that the left part of this expression is the squared loss function;
$\alpha$ is a parameter that sets the balance between the squared loss on
the data, and the regularization constraints. The regularization parameter
$\lambda$ sets the `l1_ratio`, which controls the balance between L1-sparsity
(low sum of weights), and low L2-sparsity (low sum-of-squares of the weights).
Just like Constrained Spherical Deconvolution (see :ref:`reconst-csd`), the SFM
requires the definition of a response function. We'll take advantage of the
automated algorithm in the :mod:`csdeconv` module to find this response
function:
"""
from dipy.reconst.csdeconv import auto_response
response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7)
"""
The ``response`` return value contains two entries. The first is an array with
the eigenvalues of the response function and the second is the average S0 for
this response.
It is a very good practice to always validate the result of ``auto_response``.
For, this purpose we can print it and have a look at its values.
"""
print(response)
"""
(array([ 0.0014, 0.00029, 0.00029]), 416.206)
We initialize an SFM model object, using these values. We will use the default
sphere (362 vertices, symmetrically distributed on the surface of the sphere),
as a set of putative fascicle directions that are considered in the model
"""
sphere = dpd.get_sphere()
sf_model = sfm.SparseFascicleModel(gtab, sphere=sphere,
l1_ratio=0.5, alpha=0.001,
response=response[0])
"""
For the purpose of the example, we will consider a small volume of data
containing parts of the corpus callosum and of the centrum semiovale
"""
data_small = data[20:50, 55:85, 38:39]
"""
Fitting the model to this small volume of data, we calculate the ODF of this
model on the sphere, and plot it.
"""
sf_fit = sf_model.fit(data_small)
sf_odf = sf_fit.odf(sphere)
fodf_spheres = actor.odf_slicer(sf_odf, sphere=sphere, scale=0.8,
colormap='plasma')
ren = window.Renderer()
ren.add(fodf_spheres)
print('Saving illustration as sf_odfs.png')
window.record(ren, out_path='sf_odfs.png', size=(1000, 1000))
if interactive:
window.show(ren)
"""
We can extract the peaks from the ODF, and plot these as well
"""
sf_peaks = dpp.peaks_from_model(sf_model,
data_small,
sphere,
relative_peak_threshold=.5,
min_separation_angle=25,
return_sh=False)
window.clear(ren)
fodf_peaks = actor.peak_slicer(sf_peaks.peak_dirs, sf_peaks.peak_values)
ren.add(fodf_peaks)
print('Saving illustration as sf_peaks.png')
window.record(ren, out_path='sf_peaks.png', size=(1000, 1000))
if interactive:
window.show(ren)
"""
Finally, we plot both the peaks and the ODFs, overlayed:
"""
fodf_spheres.GetProperty().SetOpacity(0.4)
ren.add(fodf_spheres)
print('Saving illustration as sf_both.png')
window.record(ren, out_path='sf_both.png', size=(1000, 1000))
if interactive:
window.show(ren)
"""
.. figure:: sf_both.png
:align: center
SFM Peaks and ODFs.
To see how to use this information in tracking, proceed to :ref:`sfm-track`.
References
----------
.. [Rokem2015] Ariel Rokem, Jason D. Yeatman, Franco Pestilli, Kendrick
N. Kay, Aviv Mezer, Stefan van der Walt, Brian A. Wandell
(2015). Evaluating the accuracy of diffusion MRI models in white
matter. PLoS ONE 10(4): e0123272. doi:10.1371/journal.pone.0123272
.. [Zou2005] Zou H, Hastie T (2005). Regularization and variable
selection via the elastic net. J R Stat Soc B:301-320
"""
|
thypad/brew
|
skensemble/selection/dynamic/dsknn.py
|
import numpy as np
from brew.base import Ensemble
from brew.metrics.diversity.paired import kuncheva_double_fault_measure
from .base import DCS
class DSKNN(DCS):
"""DS-KNN
The DS-KNN selects an ensemble of classifiers based on
their accuracy and diversity in the neighborhood of the
test sample.
Attributes
----------
`Xval` : array-like, shape = [indeterminated, n_features]
Validation set.
`yval` : array-like, shape = [indeterminated]
Labels of the validation set.
`knn` : sklearn KNeighborsClassifier,
Classifier used to find neighborhood.
Examples
--------
>>> from brew.selection.dynamic import DSKNN
>>> from brew.generation.bagging import Bagging
>>> from brew.base import EnsembleClassifier
>>>
>>> from sklearn.tree import DecisionTreeClassifier
>>> import numpy as np
>>>
>>> X = np.array([[-1, 0], [-0.8, 1], [-0.8, -1], [-0.5, 0],
[0.5, 0], [1, 0], [0.8, 1], [0.8, -1]])
>>> y = np.array([1, 1, 1, 2, 1, 2, 2, 2])
>>> tree = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
>>> bag = Bagging(base_classifier=tree, n_classifiers=10)
>>> bag.fit(X, y)
>>>
>>> sel = DSKNN(X, y, K=3)
>>>
>>> clf = EnsembleClassifier(bag.ensemble, selector=sel)
>>> clf.predict([-1.1,-0.5])
[1]
See also
--------
brew.selection.dynamic.lca.OLA: Overall Local Accuracy.
brew.selection.dynamic.lca.LCA: Local Class Accuracy.
References
----------
Santana, Alixandre, et al. "A dynamic classifier selection method
to build ensembles using accuracy and diversity." 2006 Ninth
Brazilian Symposium on Neural Networks (SBRN'06). IEEE, 2006.
"""
def __init__(self, Xval, yval, K=5, weighted=False, knn=None,
n_1=0.7, n_2=0.3):
if n_1 < 0 or n_2 < 0 or n_1 <= n_2:
raise Exception
self.n_1 = n_1
self.n_2 = n_2
super(DSKNN, self).__init__(
Xval, yval, K=K, weighted=weighted, knn=knn)
def select(self, ensemble, x):
if ensemble.in_agreement(x):
return Ensemble([ensemble.classifiers[0]]), None
n_sel_1, n_sel_2 = self.n_1, self.n_2
if isinstance(self.n_1, float):
n_sel_1 = int(n_sel_1 * len(ensemble))
if isinstance(self.n_2, float):
n_sel_2 = int(n_sel_2 * len(ensemble))
n_sel_1 = max(n_sel_1, 1)
n_sel_2 = max(n_sel_2, 1)
# intialize variables
# the the indexes of the KNN of x
classifiers = ensemble.classifiers
[idx] = self.knn.kneighbors(x, return_distance=False)
X, y = self.Xval[idx], self.yval[idx]
acc_scores = np.array([clf.score(X, y) for clf in classifiers])
out = ensemble.output(X, mode='labels')
oracle = np.equal(out, y[:, np.newaxis])
div_scores = np.zeros(len(ensemble), dtype=float)
for i in range(len(ensemble)):
tmp = []
for j in range(len(ensemble)):
if i != j:
d = kuncheva_double_fault_measure(oracle[:, [i, j]])
tmp.append(d)
div_scores[i] = np.mean(tmp)
z = zip(np.arange(len(ensemble)), acc_scores, div_scores)
z = sorted(z, key=lambda e: e[1], reverse=True)[:n_sel_1]
z = sorted(z, key=lambda e: e[2], reverse=False)[:n_sel_2]
z = zip(*z)[0]
classifiers = [classifiers[i] for i in z]
return Ensemble(classifiers=classifiers), None
|
mosajjal/mitmproxy
|
mitmproxy/contrib/kaitaistruct/jpeg.py
|
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
# The source was jpeg.ksy from here - https://github.com/kaitai-io/kaitai_struct_formats/blob/24e2d00048b8084ceec30a187a79cb87a79a48ba/image/jpeg.ksy
import array
import struct
import zlib
from enum import Enum
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
from .exif import Exif
class Jpeg(KaitaiStruct):
class ComponentId(Enum):
y = 1
cb = 2
cr = 3
i = 4
q = 5
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.segments = []
while not self._io.is_eof():
self.segments.append(self._root.Segment(self._io, self, self._root))
class Segment(KaitaiStruct):
class MarkerEnum(Enum):
tem = 1
sof0 = 192
sof1 = 193
sof2 = 194
sof3 = 195
dht = 196
sof5 = 197
sof6 = 198
sof7 = 199
soi = 216
eoi = 217
sos = 218
dqt = 219
dnl = 220
dri = 221
dhp = 222
app0 = 224
app1 = 225
app2 = 226
app3 = 227
app4 = 228
app5 = 229
app6 = 230
app7 = 231
app8 = 232
app9 = 233
app10 = 234
app11 = 235
app12 = 236
app13 = 237
app14 = 238
app15 = 239
com = 254
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.magic = self._io.ensure_fixed_contents(struct.pack('1b', -1))
self.marker = self._root.Segment.MarkerEnum(self._io.read_u1())
if ((self.marker != self._root.Segment.MarkerEnum.soi) and (self.marker != self._root.Segment.MarkerEnum.eoi)) :
self.length = self._io.read_u2be()
if ((self.marker != self._root.Segment.MarkerEnum.soi) and (self.marker != self._root.Segment.MarkerEnum.eoi)) :
_on = self.marker
if _on == self._root.Segment.MarkerEnum.sos:
self._raw_data = self._io.read_bytes((self.length - 2))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = self._root.SegmentSos(io, self, self._root)
elif _on == self._root.Segment.MarkerEnum.app1:
self._raw_data = self._io.read_bytes((self.length - 2))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = self._root.SegmentApp1(io, self, self._root)
elif _on == self._root.Segment.MarkerEnum.sof0:
self._raw_data = self._io.read_bytes((self.length - 2))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = self._root.SegmentSof0(io, self, self._root)
elif _on == self._root.Segment.MarkerEnum.app0:
self._raw_data = self._io.read_bytes((self.length - 2))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = self._root.SegmentApp0(io, self, self._root)
else:
self.data = self._io.read_bytes((self.length - 2))
if self.marker == self._root.Segment.MarkerEnum.sos:
self.image_data = self._io.read_bytes_full()
class SegmentSos(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.num_components = self._io.read_u1()
self.components = [None] * (self.num_components)
for i in range(self.num_components):
self.components[i] = self._root.SegmentSos.Component(self._io, self, self._root)
self.start_spectral_selection = self._io.read_u1()
self.end_spectral = self._io.read_u1()
self.appr_bit_pos = self._io.read_u1()
class Component(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.id = self._root.ComponentId(self._io.read_u1())
self.huffman_table = self._io.read_u1()
class SegmentApp1(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.magic = self._io.read_strz("ASCII", 0, False, True, True)
_on = self.magic
if _on == u"Exif":
self.body = self._root.ExifInJpeg(self._io, self, self._root)
class SegmentSof0(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.bits_per_sample = self._io.read_u1()
self.image_height = self._io.read_u2be()
self.image_width = self._io.read_u2be()
self.num_components = self._io.read_u1()
self.components = [None] * (self.num_components)
for i in range(self.num_components):
self.components[i] = self._root.SegmentSof0.Component(self._io, self, self._root)
class Component(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.id = self._root.ComponentId(self._io.read_u1())
self.sampling_factors = self._io.read_u1()
self.quantization_table_id = self._io.read_u1()
@property
def sampling_x(self):
if hasattr(self, '_m_sampling_x'):
return self._m_sampling_x if hasattr(self, '_m_sampling_x') else None
self._m_sampling_x = ((self.sampling_factors & 240) >> 4)
return self._m_sampling_x if hasattr(self, '_m_sampling_x') else None
@property
def sampling_y(self):
if hasattr(self, '_m_sampling_y'):
return self._m_sampling_y if hasattr(self, '_m_sampling_y') else None
self._m_sampling_y = (self.sampling_factors & 15)
return self._m_sampling_y if hasattr(self, '_m_sampling_y') else None
class ExifInJpeg(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.extra_zero = self._io.ensure_fixed_contents(struct.pack('1b', 0))
self._raw_data = self._io.read_bytes_full()
io = KaitaiStream(BytesIO(self._raw_data))
self.data = Exif(io)
class SegmentApp0(KaitaiStruct):
class DensityUnit(Enum):
no_units = 0
pixels_per_inch = 1
pixels_per_cm = 2
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.magic = self._io.read_str_byte_limit(5, "ASCII")
self.version_major = self._io.read_u1()
self.version_minor = self._io.read_u1()
self.density_units = self._root.SegmentApp0.DensityUnit(self._io.read_u1())
self.density_x = self._io.read_u2be()
self.density_y = self._io.read_u2be()
self.thumbnail_x = self._io.read_u1()
self.thumbnail_y = self._io.read_u1()
self.thumbnail = self._io.read_bytes(((self.thumbnail_x * self.thumbnail_y) * 3))
|
KellyChan/python-examples
|
python/data_science/Titanic/complexHeuristic.py
|
import numpy
import pandas
import statsmodels.api as sm
def complex_heuristic(file_path):
'''
You are given a list of Titantic passengers and their associating
information. More information about the data can be seen at the link below:
http://www.kaggle.com/c/titanic-gettingStarted/data
For this exercise, you need to write a more sophisticated heuristic
that will use the passengers' gender and their social economical class and age
to predict if they survived the Titanic diaster.
You prediction should be 79% accurate or higher.
If the passenger is female or if his/her socio-economical status is high AND
if the passenger is under 18, you should assume the passenger surived.
Otherwise, you should assume the passenger perished in the disaster.
Or more specifically in code terms: female or (high status and under 18)
You can access the gender of a passenger via passenger['Sex'].
If the passenger is male, passenger['Sex'] will return a string "male".
If the passenger is female, passenger['Sex'] will return a string "female".
You can access the socio-economical status of a passenger via passenger['Pclass']:
High socio-economical status -- passenger['Pclass'] is 1
Medium socio-economical status -- passenger['Pclass'] is 2
Low socio-economical status -- passenger['Pclass'] is 3
You can access the age of a passenger via passenger['Age'].
Write your prediction back into the "predictions" dictionary. The
key of the dictionary should be the Passenger's id (which can be accessed
via passenger["PassengerId"]) and the associating value should be 1 if the
passenger survied or 0 otherwise.
For example, if a passenger survived:
passenger_id = passenger['PassengerId']
predictions[passenger_id] = 1
Or if a passenger perished in the disaster:
passenger_id = passenger['PassengerId']
predictions[passenger_id] = 0
You can also look at the titantic data that you will be working with
at the link below:
https://www.dropbox.com/s/r5f9aos8p9ri9sa/titanic_data.csv
'''
predictions = {}
df = pandas.read_csv(file_path)
for passenger_index, passenger in df.iterrows():
#
# your code here
#
if (passenger['Sex']=='female') or (passenger['Pclass']==1 and passenger['Age']<18):
predictions[passenger['PassengerId']] = 1
else:
predictions[passenger['PassengerId']] = 0
return predictions
|
b1naryth1ef/rowboat
|
rowboat/types/guild.py
|
import os
from holster.enum import Enum
from rowboat.types import Model, SlottedModel, Field, DictField, text, raw, rule_matcher
CooldownMode = Enum(
'GUILD',
'CHANNEL',
'USER',
)
class PluginConfigObj(object):
client = None
class PluginsConfig(Model):
def __init__(self, inst, obj):
self.client = None
self.load_into(inst, obj)
@classmethod
def parse(cls, obj, *args, **kwargs):
inst = PluginConfigObj()
cls(inst, obj)
return inst
@classmethod
def force_load_plugin_configs(cls):
"""
This function can be called to ensure that this class will have all its
attributes properly loaded, as they are dynamically set when plugin configs
are defined.
"""
plugins = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'plugins')
for name in os.listdir(plugins):
__import__('rowboat.plugins.{}'.format(
name.rsplit('.', 1)[0]
))
class CommandOverrideConfig(SlottedModel):
disabled = Field(bool, default=False)
level = Field(int)
class CommandsConfig(SlottedModel):
prefix = Field(str, default='')
mention = Field(bool, default=False)
overrides = Field(raw)
def get_command_override(self, command):
return rule_matcher(command, self.overrides or [])
class GuildConfig(SlottedModel):
nickname = Field(text)
commands = Field(CommandsConfig, default=None, create=False)
levels = DictField(int, int)
plugins = Field(PluginsConfig.parse)
|
KnightOS/packages.knightos.org
|
packages/config.py
|
import logging
try:
from configparser import ConfigParser
except ImportError:
# Python 2 support
from ConfigParser import ConfigParser
logger = logging.getLogger("packges.knightos.org")
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
sh.setFormatter(formatter)
logger.addHandler(sh)
# scss logger
logging.getLogger("scss").addHandler(sh)
config = ConfigParser()
config.readfp(open('config.ini'))
env = 'dev'
_cfg = lambda k: config.get(env, k)
_cfgi = lambda k: int(_cfg(k))
|
Spiderlover/Toontown
|
otp/friends/PlayerFriendsManager.py
|
from direct.distributed.DistributedObjectGlobal import DistributedObjectGlobal
from direct.directnotify.DirectNotifyGlobal import directNotify
from otp.otpbase import OTPGlobals
from otp.avatar.Avatar import teleportNotify
from otp.friends import FriendResponseCodes
class PlayerFriendsManager(DistributedObjectGlobal):
notify = directNotify.newCategory('PlayerFriendsManager')
def __init__(self, cr):
DistributedObjectGlobal.__init__(self, cr)
self.playerFriendsList = set()
self.playerId2Info = {}
self.playerAvId2avInfo = {}
self.accept('gotExtraFriendHandles', self.__handleFriendHandles)
def delete(self):
self.ignoreAll()
def sendRequestInvite(self, playerId):
print 'PFM sendRequestInvite id:%s' % playerId
self.sendUpdate('requestInvite', [0, playerId, True])
def sendRequestDecline(self, playerId):
self.sendUpdate('requestDecline', [0, playerId])
def sendRequestRemove(self, playerId):
self.sendUpdate('requestRemove', [0, playerId])
def sendRequestUnlimitedSecret(self):
self.sendUpdate('requestUnlimitedSecret', [0])
def sendRequestLimitedSecret(self, username, password):
self.sendUpdate('requestLimitedSecret', [0, username, password])
def sendRequestUseUnlimitedSecret(self, secret):
pass
def sendRequestUseLimitedSecret(self, secret, username, password):
pass
def sendSCWhisper(self, recipientId, msgId):
self.sendUpdate('whisperSCTo', [0, recipientId, msgId])
def sendSCCustomWhisper(self, recipientId, msgId):
self.sendUpdate('whisperSCCustomTo', [0, recipientId, msgId])
def sendSCEmoteWhisper(self, recipientId, msgId):
self.sendUpdate('whisperSCEmoteTo', [0, recipientId, msgId])
def setTalkAccount(self, toAc, fromAc, fromName, message, mods, flags):
localAvatar.displayTalkAccount(fromAc, fromName, message, mods)
toName = None
friendInfo = self.getFriendInfo(toAc)
if friendInfo:
toName = friendInfo.playerName
elif toAc == localAvatar.DISLid:
toName = localAvatar.getName()
base.talkAssistant.receiveAccountTalk(None, None, fromAc, fromName, toAc, toName, message)
return
def invitationFrom(self, playerId, avatarName):
messenger.send(OTPGlobals.PlayerFriendInvitationEvent, [playerId, avatarName])
def retractInvite(self, playerId):
messenger.send(OTPGlobals.PlayerFriendRetractInviteEvent, [playerId])
def rejectInvite(self, playerId, reason):
messenger.send(OTPGlobals.PlayerFriendRejectInviteEvent, [playerId, reason])
def rejectRemove(self, playerId, reason):
messenger.send(OTPGlobals.PlayerFriendRejectRemoveEvent, [playerId, reason])
def secretResponse(self, secret):
print 'secretResponse %s' % secret
messenger.send(OTPGlobals.PlayerFriendNewSecretEvent, [secret])
def rejectSecret(self, reason):
print 'rejectSecret %s' % reason
messenger.send(OTPGlobals.PlayerFriendRejectNewSecretEvent, [reason])
def rejectUseSecret(self, reason):
print 'rejectUseSecret %s' % reason
messenger.send(OTPGlobals.PlayerFriendRejectUseSecretEvent, [reason])
def invitationResponse(self, playerId, respCode, context):
if respCode == FriendResponseCodes.INVITATION_RESP_DECLINE:
messenger.send(OTPGlobals.PlayerFriendRejectInviteEvent, [playerId, respCode])
elif respCode == FriendResponseCodes.INVITATION_RESP_NEW_FRIENDS:
pass
def updatePlayerFriend(self, id, info, isNewFriend):
self.notify.warning('updatePlayerFriend: %s, %s, %s' % (id, info, isNewFriend))
info.calcUnderstandableYesNo()
if info.playerName[0:5] == 'Guest':
info.playerName = 'Guest ' + info.playerName[5:]
if id not in self.playerFriendsList:
self.playerFriendsList.add(id)
self.playerId2Info[id] = info
messenger.send(OTPGlobals.PlayerFriendAddEvent, [id, info, isNewFriend])
elif id in self.playerId2Info:
if not self.playerId2Info[id].onlineYesNo and info.onlineYesNo:
self.playerId2Info[id] = info
messenger.send('playerOnline', [id])
base.talkAssistant.receiveFriendAccountUpdate(id, info.playerName, info.onlineYesNo)
elif self.playerId2Info[id].onlineYesNo and not info.onlineYesNo:
self.playerId2Info[id] = info
messenger.send('playerOffline', [id])
base.talkAssistant.receiveFriendAccountUpdate(id, info.playerName, info.onlineYesNo)
if not self.askAvatarKnownHere(info.avatarId):
self.requestAvatarInfo(info.avatarId)
self.playerId2Info[id] = info
av = base.cr.doId2do.get(info.avatarId, None)
if av is not None:
av.considerUnderstandable()
messenger.send(OTPGlobals.PlayerFriendUpdateEvent, [id, info])
return
def removePlayerFriend(self, id):
if id not in self.playerFriendsList:
return
self.playerFriendsList.remove(id)
info = self.playerId2Info.pop(id, None)
if info is not None:
av = base.cr.doId2do.get(info.avatarId, None)
if av is not None:
av.considerUnderstandable()
messenger.send(OTPGlobals.PlayerFriendRemoveEvent, [id])
return
def whisperSCFrom(self, playerId, msg):
base.talkAssistant.receivePlayerWhisperSpeedChat(msg, playerId)
def isFriend(self, pId):
return self.isPlayerFriend(pId)
def isPlayerFriend(self, pId):
if not pId:
return 0
return pId in self.playerFriendsList
def isAvatarOwnerPlayerFriend(self, avId):
pId = self.findPlayerIdFromAvId(avId)
if pId and self.isPlayerFriend(pId):
return True
else:
return False
def getFriendInfo(self, pId):
return self.playerId2Info.get(pId)
def findPlayerIdFromAvId(self, avId):
for playerId in self.playerId2Info:
if self.playerId2Info[playerId].avatarId == avId:
if self.playerId2Info[playerId].onlineYesNo:
return playerId
return None
def findAvIdFromPlayerId(self, pId):
pInfo = self.playerId2Info.get(pId)
if pInfo:
return pInfo.avatarId
else:
return None
return None
def findPlayerInfoFromAvId(self, avId):
playerId = self.findPlayerIdFromAvId(avId)
if playerId:
return self.getFriendInfo(playerId)
else:
return None
return None
def askAvatarOnline(self, avId):
returnValue = 0
if avId in self.cr.doId2do:
returnValue = 1
if avId in self.playerAvId2avInfo:
playerId = self.findPlayerIdFromAvId(avId)
if playerId in self.playerId2Info:
playerInfo = self.playerId2Info[playerId]
if playerInfo.onlineYesNo:
returnValue = 1
return returnValue
def countTrueFriends(self):
count = 0
for id in self.playerId2Info:
if self.playerId2Info[id].openChatFriendshipYesNo:
count += 1
return count
def askTransientFriend(self, avId):
if (avId in self.playerAvId2avInfo) and (not base.cr.isAvatarFriend(avId)):
return 1
else:
return 0
def askAvatarKnown(self, avId):
if self.askAvatarKnownElseWhere(avId) or self.askAvatarKnownHere(avId):
return 1
else:
return 0
def askAvatarKnownElseWhere(self, avId):
if hasattr(base, 'cr'):
if base.cr.askAvatarKnown(avId):
return 1
return 0
def askAvatarKnownHere(self, avId):
if avId in self.playerAvId2avInfo:
return 1
else:
return 0
def requestAvatarInfo(self, avId):
if hasattr(base, 'cr'):
base.cr.queueRequestAvatarInfo(avId)
def __handleFriendHandles(self, handleList):
for handle in handleList:
self.playerAvId2avInfo[handle.getDoId()] = handle
messenger.send('friendsListChanged')
def getAvHandleFromId(self, avId):
if avId in self.playerAvId2avInfo:
return self.playerAvId2avInfo[avId]
def identifyFriend(self, avId):
handle = None
teleportNotify.debug('identifyFriend(%s)' % avId)
handle = base.cr.identifyFriend(avId)
if not handle:
teleportNotify.debug('getAvHandleFromId(%s)' % avId)
handle = self.getAvHandleFromId(avId)
return handle
def getAllOnlinePlayerAvatars(self):
returnList = []
for avatarId in self.playerAvId2avInfo:
playerId = self.findPlayerIdFromAvId(avatarId)
if playerId:
if self.playerId2Info[playerId].onlineYesNo:
returnList.append(avatarId)
return returnList
def identifyAvatar(self, doId):
if doId in base.cr.doId2do:
return base.cr.doId2do[doId]
else:
return self.identifyFriend(doId)
def friendsListFull(self):
return len(self.playerFriendsList) >= OTPGlobals.MaxPlayerFriends
|
JoshuaSkelly/TroubleInCloudLand
|
main.py
|
#!/usr/bin/python
import pygame
import enemies
from core import balloon, bullet, game, gem, particle, player, world
from scenes import credits, scene, splashscreen
from ui import menu, text
from utils import prettyprint, utility, vector
from utils.settings import *
pygame.init()
utility.read_settings()
if settings_list[SETTING_FULLSCREEN]:
screen = utility.set_fullscreen()
else:
screen = utility.set_fullscreen(False)
pygame.display.set_icon(utility.load_image('icon'))
pygame.display.set_caption('Trouble In CloudLand v1.1')
screen.fill((0, 0, 0))
tempText = text.Text(FONT_PATH, 36, (255, 255, 255))
tempText.set_text('Loading...')
tempText.position = vector.Vector2d((SCREEN_WIDTH / 2) - (tempText.image.get_width() / 2), (SCREEN_HEIGHT / 2) - (tempText.image.get_height() / 2))
tempText.update()
tempText.draw(screen)
pygame.display.flip()
try:
pygame.mixer.set_reserved(MUSIC_CHANNEL)
pygame.mixer.Channel(MUSIC_CHANNEL).set_volume(1)
pygame.mixer.set_reserved(PLAYER_CHANNEL)
pygame.mixer.Channel(PLAYER_CHANNEL).set_volume(1)
pygame.mixer.set_reserved(OW_CHANNEL)
pygame.mixer.Channel(OW_CHANNEL).set_volume(1)
pygame.mixer.set_reserved(BAAKE_CHANNEL)
pygame.mixer.Channel(BAAKE_CHANNEL).set_volume(1)
pygame.mixer.set_reserved(BOSS_CHANNEL)
pygame.mixer.Channel(BOSS_CHANNEL).set_volume(1)
pygame.mixer.set_reserved(PICKUP_CHANNEL)
pygame.mixer.Channel(PICKUP_CHANNEL).set_volume(1)
except:
utility.sound_active = False
print('WARNING! - Sound not initialized.')
pygame.mouse.set_visible(False)
music_list = [
utility.load_sound('menuMusic'),
utility.load_sound('music0'),
utility.load_sound('music1'),
utility.load_sound('music2'),
utility.load_sound('bossMusic')
]
world.load_data()
player.load_data()
bullet.load_data()
pygame.event.pump()
enemies.baake.load_data()
balloon.load_data()
gem.load_data()
pygame.event.pump()
enemies.moono.load_data()
enemies.batto.load_data()
enemies.rokubi.load_data()
pygame.event.pump()
enemies.haoya.load_data()
enemies.yurei.load_data()
enemies.bokko.load_data()
pygame.event.pump()
enemies.hakta.load_data()
enemies.raayu.load_data()
enemies.paajo.load_data()
pygame.event.pump()
enemies.boss.load_data()
particle.load_data()
menu.load_data()
for event in pygame.event.get():
pass
splashscreen.SplashScreen(screen, 'pygamesplash')
utility.play_music(music_list[MENU_MUSIC])
splashscreen.SplashScreen(screen, 'gameSplash')
if settings_list[WORLD_UNLOCKED] == 0:
new_scene = scene.TutorialScene()
elif settings_list[WORLD_UNLOCKED] == 1:
new_scene = scene.ForestScene()
elif settings_list[WORLD_UNLOCKED] == 2:
new_scene = scene.RockyScene()
elif settings_list[WORLD_UNLOCKED] == 3:
new_scene = scene.PinkScene()
game_is_running = True
main_menu_dictionary = {
START_GAME: ('Play', 'Start a New Game'),
OPTION_MENU: ('Options', 'Change Sound and Video Options'),
CREDIT_MENU: ('Credits', 'Who We Are, What We Did'),
EXIT_GAME: ('Exit', 'Exit the Game')
}
world_menu_dictionary = {
TUTORIAL: ('Tutorial', 'Start the Tutorial [Learn]'),
WORLD1: ('Cloudopolis', 'Start Playing Cloudopolis [Apprentice]'),
WORLD2: ('Nightmaria', 'Start Playing Nightmaria [Journeyman]'),
WORLD3: ('Opulent Dream', 'Start Playing Opulent Dream [Master]'),
EXIT_OPTIONS: ('Back', 'Go Back to the Main Menu')
}
option_menu_dictionary = {
SOUND_MENU: ('Sound Options', 'Change Sound Options'),
DISPLAY_MENU: ('Video Options', 'Change Video Options'),
CHANGE_SENSITIVITY: ('Mouse Sensitivity: ' + prettyprint.mouse_sensitivity(settings_list[SENSITIVITY]), 'Change Mouse Sensitivity'),
EXIT_OPTIONS: ('Back', 'Go Back to the Main Menu')
}
sound_menu_dictionary = {
TOGGLE_SFX: ('Sound Effects: ' + prettyprint.on(settings_list[SFX]), 'Turn ' + prettyprint.on(not settings_list[SFX]) + ' Sound Effects'),
TOGGLE_MUSIC: ('Music: ' + prettyprint.on(settings_list[MUSIC]), 'Turn ' + prettyprint.on(not settings_list[MUSIC]) + ' Music'),
EXIT_OPTIONS: ('Back', 'Go Back to the Option Menu')
}
display_menu_dictionary = {
TOGGLE_PARTICLES: ('Particles: ' + prettyprint.able(settings_list[PARTICLES]), 'Turn ' + prettyprint.on(not settings_list[PARTICLES]) + ' Particle Effects'),
TOGGLE_FULLSCREEN: ('Video Mode: ' + prettyprint.screen_mode(settings_list[SETTING_FULLSCREEN]), 'Switch To ' + prettyprint.screen_mode(not settings_list[SETTING_FULLSCREEN]) + ' Mode'),
EXIT_OPTIONS: ('Back', 'Go Back to the Main Menu')
}
sensitivity_menu_dictionary = {
0: ('Very Low', 'Change Sensitivity to Very Low'),
1: ('Low', 'Change Sensitivity to Low'),
2: ('Normal', 'Change Sensitivity to Normal'),
3: ('High', 'Change Sensitivity to High'),
4: ('Very High', 'Change Sensitivity to Very High')
}
menu_bounds = (0, SCREEN_HEIGHT / 3, SCREEN_WIDTH, SCREEN_HEIGHT)
while game_is_running:
menu_result = menu.Menu(screen,
music_list[MENU_MUSIC],
new_scene,
menu_bounds,
('Trouble in Cloudland', 80, SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4),
main_menu_dictionary).show()
if menu_result == START_GAME:
last_highlighted = settings_list[WORLD_UNLOCKED]
world_result = menu.Menu(screen,
music_list[MENU_MUSIC],
new_scene,
menu_bounds,
('Choose a World', 96, SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4),
world_menu_dictionary,
last_highlighted).show()
if world_result == TUTORIAL:
game.Game(screen, 0, music_list).run()
elif world_result == EXIT_OPTIONS:
world_result = False
elif world_result is not False:
utility.fade_music()
utility.play_music(music_list[world_result - 1], True)
game.Game(screen, world_result - 1, music_list).run()
elif menu_result == OPTION_MENU:
option_result = True
last_highlighted = 0
while option_result:
option_result = menu.Menu(screen,
music_list[MENU_MUSIC],
new_scene,
menu_bounds,
('Options', 96, SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4),
option_menu_dictionary,
last_highlighted).show()
if option_result == SOUND_MENU:
sound_result = True
last_highlighted = 0
while sound_result:
sound_result = menu.Menu(screen,
music_list[MENU_MUSIC],
new_scene,
menu_bounds,
('Sound Options', 96, SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4),
sound_menu_dictionary,
last_highlighted).show()
if sound_result == TOGGLE_SFX:
settings_list[SFX] = not settings_list[SFX]
last_highlighted = 0
elif sound_result == TOGGLE_MUSIC:
settings_list[MUSIC] = not settings_list[MUSIC]
if not settings_list[MUSIC]:
pygame.mixer.Channel(MUSIC_CHANNEL).stop()
last_highlighted = 1
elif sound_result == EXIT_OPTIONS:
sound_result = False
sound_menu_dictionary = {
TOGGLE_SFX: ('Sound Effects: ' + prettyprint.on(settings_list[SFX]), 'Turn ' + prettyprint.on(not settings_list[SFX]) + ' Sound Effects'),
TOGGLE_MUSIC: ('Music: ' + prettyprint.on(settings_list[MUSIC]), 'Turn ' + prettyprint.on(not settings_list[MUSIC]) + ' Music'),
EXIT_OPTIONS: ('Back','Go Back to the Option Menu')
}
if option_result == DISPLAY_MENU:
display_result = True
last_highlighted = 0
while display_result:
display_result = menu.Menu(screen,
music_list[MENU_MUSIC],
new_scene,
menu_bounds,
('Video Options', 96, SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4),
display_menu_dictionary,
last_highlighted).show()
if display_result == TOGGLE_PARTICLES:
settings_list[PARTICLES] = not settings_list[PARTICLES]
last_highlighted = 0
elif display_result == TOGGLE_FULLSCREEN:
settings_list[SETTING_FULLSCREEN] = not settings_list[SETTING_FULLSCREEN]
last_highlighted = 1
if settings_list[SETTING_FULLSCREEN]:
screen = utility.set_fullscreen()
else:
screen = utility.set_fullscreen(False)
pygame.mouse.set_visible(False)
elif display_result == EXIT_OPTIONS:
display_result = False
display_menu_dictionary = {
TOGGLE_PARTICLES: ('Particles: ' + prettyprint.able(settings_list[PARTICLES]), 'Turn ' + prettyprint.on(not settings_list[PARTICLES]) + ' Particle Effects'),
TOGGLE_FULLSCREEN: ('Video Mode: ' + prettyprint.screen_mode(settings_list[SETTING_FULLSCREEN]), 'Switch To ' + prettyprint.screen_mode(not settings_list[SETTING_FULLSCREEN]) + ' Mode'),
EXIT_OPTIONS: ('Back', 'Go Back to the Main Menu')
}
elif option_result == EXIT_OPTIONS:
option_result = False
elif option_result == CHANGE_SENSITIVITY:
sensitivity_result = True
last_highlighted = 0
while sensitivity_result:
sensitivity_menu = menu.Menu(screen,
music_list[MENU_MUSIC],
new_scene,
menu_bounds,
('Mouse Sensitivity', 96, SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4),
sensitivity_menu_dictionary,
last_highlighted)
sensitivity_result = sensitivity_menu.show()
mouse_sensitivities = [0.5, 0.75, 1, 1.25, 1.5]
settings_list[SENSITIVITY] = mouse_sensitivities[sensitivity_result]
if sensitivity_result > 0:
sensitivity_result = False
option_menu_dictionary = {
SOUND_MENU: ('Sound Options', 'Change Sound Options'),
DISPLAY_MENU: ('Video Options', 'Change Video Options'),
CHANGE_SENSITIVITY: ('Mouse Sensitivity: ' + prettyprint.mouse_sensitivity(settings_list[SENSITIVITY]), 'Change Mouse Sensitivity'),
EXIT_OPTIONS: ('Back', 'Go Back to the Main Menu')
}
elif menu_result == CREDIT_MENU:
credits.Credits(screen, music_list[MENU_MUSIC])
elif menu_result == EXIT_GAME:
game_is_running = False
utility.write_settings()
splashscreen.SplashScreen(screen, 'outroSplash')
quit()
|
SohoTechLabs/django-ajax-changelist
|
ajax_changelist/admin.py
|
from django import http
from django.conf.urls import patterns
from django.contrib import admin
from django.db import models
from django.forms.models import modelform_factory
from django.shortcuts import get_object_or_404
from django.template import loader, Context
from django.views.generic import View
def get_printable_field_value(instance, fieldname):
""" Get the display value of a model field, showing a comma-delimited
list for M2M fields.
"""
field = instance._meta.get_field(fieldname)
field_value = getattr(instance, fieldname)
if isinstance(field, models.ManyToManyField):
field_value = ', '.join([unicode(f) for f in
field_value.all()])
return field_value
class AjaxModelFormView(View):
""" Handles AJAX updates of a single field on an object
(You likely don't need to use this directly as the admin
registers a URL for it itself.)
"""
model = None
valid_fields = None
def __init__(self, model, valid_fields, **kwargs):
self.model = model
self.valid_fields = valid_fields
def post(self, request, object_id, *args, **kwargs):
if not request.user or not request.user.is_staff:
return http.HttpResponseForbidden()
request = request.POST.copy()
fieldname = request.pop('field', None)[0]
form_prefix = request.pop('prefix', None)[0]
# prevent setting fields that weren't made AJAX-editable
if fieldname not in self.valid_fields:
return http.HttpResponseBadRequest()
ItemForm = modelform_factory(self.model, fields=(fieldname,))
instance = get_object_or_404(self.model, pk=object_id)
form = ItemForm(request, instance=instance, prefix=form_prefix)
if not form or not form.is_valid():
return http.HttpResponseBadRequest()
form.save()
new_value = get_printable_field_value(instance, fieldname)
return http.HttpResponse(new_value)
class AjaxModelAdmin(admin.ModelAdmin):
""" Admin class providing support for inline forms in
listview that are submitted through AJAX.
"""
def __init__(self, *args, **kwargs):
HANDLER_NAME_TPL = "_%s_ajax_handler"
if not hasattr(self, 'ajax_list_display'):
self.ajax_list_display = []
self.list_display = list(self.list_display)
self.list_display = self.list_display + map(lambda name: HANDLER_NAME_TPL % name,
self.ajax_list_display)
super(AjaxModelAdmin, self).__init__(*args, **kwargs)
for name in self.ajax_list_display:
setattr(self, HANDLER_NAME_TPL % name,
self._get_field_handler(name))
self.ajax_item_template = loader.get_template('ajax_changelist/'
'field_form.html')
def get_urls(self):
""" Add endpoint for saving a new field value. """
urls = super(AjaxModelAdmin, self).get_urls()
list_urls = patterns('',
(r'^(?P<object_id>\d+)$',
AjaxModelFormView.as_view(model=self.model,
valid_fields=self.ajax_list_display)))
return list_urls + urls
def _get_field_handler(self, fieldname):
""" Handle rendering of AJAX-editable fields for the changelist, by
dynamically building a callable for each field.
"""
def handler_function(obj, *args, **kwargs):
ItemForm = modelform_factory(self.model, fields=(fieldname,))
form = ItemForm(instance=obj, prefix="c" + unicode(obj.id))
field_value = get_printable_field_value(obj, fieldname)
# Render the field value and edit form
return self.ajax_item_template.render(Context({
'object_id': obj.id,
'field_name': fieldname,
'form': form.as_p(),
'field_value': field_value
}))
handler_function.allow_tags = True
handler_function.short_description = fieldname
return handler_function
class Media:
#FIXME: dripping jQueries is straight-up wack.
js = ('//ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js',
'ajax_changelist/js/lib/jquery.django_csrf.js',
'ajax_changelist/js/admin.js',)
css = {
'all': ('ajax_changelist/css/admin.css',)
}
|
a113n/bcbio-nextgen
|
scripts/bcbio_setup_genome.py
|
#!/usr/bin/env python -Es
"""
Script to set up a custom genome for bcbio-nextgen
"""
from __future__ import print_function
from argparse import ArgumentParser
import collections
import gzip
import os
from Bio import SeqIO
import toolz as tz
from bcbio.utils import safe_makedir, file_exists, chdir, is_gzipped
from bcbio.distributed.transaction import file_transaction
from bcbio.provenance import do
from bcbio.install import (REMOTES, get_cloudbiolinux, SUPPORTED_INDEXES,
_get_data_dir)
from bcbio.pipeline.run_info import ALLOWED_CONTIG_NAME_CHARS
from bcbio.galaxy import loc
from bcbio.log import logger
import subprocess
import sys
import shutil
import yaml
import gffutils
from gffutils.iterators import DataIterator
import tempfile
SEQ_DIR = "seq"
RNASEQ_DIR = "rnaseq"
SRNASEQ_DIR = "srnaseq"
ERCC_BUCKET = "bcbio-data.s3.amazonaws.com/"
def extract_if_gzipped(filename):
stem, ext = os.path.splitext(filename)
if ext == ".gz":
subprocess.check_call("gzip -cd %s > %s" % (filename, stem), shell=True)
return stem
else:
return filename
def gff3_to_gtf(gff3_file):
dialect = {'field separator': '; ',
'fmt': 'gtf',
'keyval separator': ' ',
'leading semicolon': False,
'multival separator': ',',
'quoted GFF2 values': True,
'order': ['gene_id', 'transcript_id'],
'repeated keys': False,
'trailing semicolon': True}
out_file = os.path.splitext(gff3_file)[0] + ".gtf"
if file_exists(out_file):
return out_file
logger.info("Converting %s to %s." % (gff3_file, out_file))
if _is_from_ncbi(gff3_file):
logger.info("NCBI format detected by the presence of the %s key."
% _is_from_ncbi(gff3_file))
_output_ncbi_gff3(gff3_file, out_file, dialect)
else:
_output_gff3(gff3_file, out_file, dialect)
return out_file
def _output_gff3(gff3_file, out_file, dialect):
db = gffutils.create_db(gff3_file, ":memory:")
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for feature in DataIterator(db.features_of_type("exon"), dialect=dialect):
transcript_id = feature["Parent"][0]
gene_id = db[transcript_id]["Parent"][0]
attr = {"transcript_id": transcript_id, "gene_id": gene_id}
attributes = gffutils.attributes.Attributes(attr)
feature.attributes = attributes
print(feature, file=out_handle, end="")
def _output_ncbi_gff3(gff3_file, out_file, dialect):
gene_key = "gene"
id_spec = {"gene": gene_key}
db = gffutils.create_db(gff3_file, ":memory:", id_spec=id_spec)
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for feature in DataIterator(db.features_of_type("exon"), dialect=dialect):
# Gnomon features are often missing a transcript id
# some malformed features are also missing the gene key
try:
transcript_id = feature["transcript_id"]
except KeyError:
try:
transcript_id = feature[gene_key]
except KeyError:
continue
gene_id = feature[gene_key]
try:
biotype = feature["gene_biotype"]
except KeyError:
biotype = "unknown"
attr = {"transcript_id": transcript_id, "gene_id": gene_id,
"gene_biotype": biotype}
attributes = gffutils.attributes.Attributes(attr)
feature.attributes = attributes
print(feature, file=out_handle, end="")
def _is_from_ncbi(gff3_file):
with open(gff3_file) as in_handle:
for line in tz.take(10000, in_handle):
if "Dbxref" in line:
return "Dbxref"
if "db_xref" in line:
return "db_xref"
return None
def _index_w_command(env, dir_name, command, ref_file, pre=None, post=None, ext=None):
index_name = os.path.splitext(os.path.basename(ref_file))[0]
if ext is not None: index_name += ext
build_path = os.path.join(os.path.dirname(ref_file), os.pardir)
out_dir = os.path.join(build_path, dir_name)
index_path = os.path.join(out_dir, index_name)
safe_makedir(out_dir)
subprocess.check_call(command.format(ref_file=ref_file,
index_name=index_path), shell=True)
return index_path
def setup_base_directories(genome_dir, name, build, gtf=None):
name_dir = os.path.join(genome_dir, name)
safe_makedir(name_dir)
build_dir = os.path.join(name_dir, build)
safe_makedir(build_dir)
seq_dir = os.path.join(build_dir, SEQ_DIR)
safe_makedir(seq_dir)
if gtf:
gtf_dir = os.path.join(build_dir, RNASEQ_DIR)
safe_makedir(gtf_dir)
return build_dir
def install_fasta_file(build_dir, fasta, build):
out_file = os.path.join(build_dir, SEQ_DIR, build + ".fa")
if not file_exists(out_file):
recs = SeqIO.parse(fasta, "fasta")
with open(out_file, "w") as out_handle:
SeqIO.write((_clean_rec_name(rec) for rec in recs), out_handle, "fasta")
return out_file
def _clean_rec_name(rec):
"""Clean illegal characters in input fasta file which cause problems downstream.
"""
out_id = []
for char in list(rec.id):
if char in ALLOWED_CONTIG_NAME_CHARS:
out_id.append(char)
else:
out_id.append("_")
rec.id = "".join(out_id)
rec.description = ""
return rec
def install_gtf_file(build_dir, gtf, build):
out_file = os.path.join(build_dir, RNASEQ_DIR, "ref-transcripts.gtf")
if not file_exists(out_file):
if is_gzipped(gtf):
with gzip.open(gtf_file, 'rb') as in_handle:
with open(out_file, 'wb') as out_handle:
shutil.copyfileobj(in_handle, out_handle)
else:
shutil.copyfile(gtf, out_file)
return out_file
def install_srna(species, gtf):
out_file = os.path.join(SRNASEQ_DIR, "srna-transcripts.gtf")
safe_makedir(SRNASEQ_DIR)
if gtf:
if not file_exists(out_file):
shutil.copyfile(gtf, out_file)
try:
from seqcluster import install
except ImportError:
raise ImportError("install seqcluster first, please.")
with chdir(SRNASEQ_DIR):
hairpin, miRNA = install._install_mirbase()
cmd = ("cat %s | awk '{if ($0~/>%s/){name=$0; print name} else if ($0~/^>/){name=0};if (name!=0 && $0!~/^>/){print $0;}}' | sed 's/U/T/g' > hairpin.fa")
do.run(cmd % (hairpin, species), "set precursor.")
cmd = ("grep -A 1 {species} {miRNA} > miRNA.str")
do.run(cmd.format(**locals()), "set miRNA.")
shutil.rmtree("mirbase")
return out_file
def append_ercc(gtf_file, fasta_file):
ercc_fa = ERCC_BUCKET + "ERCC92.fasta.gz"
tmp_fa = tempfile.NamedTemporaryFile(delete=False, suffix=".gz").name
append_fa_cmd = "wget {ercc_fa} -O {tmp_fa}; gzip -cd {tmp_fa} >> {fasta_file}"
print(append_fa_cmd.format(**locals()))
subprocess.check_call(append_fa_cmd.format(**locals()), shell=True)
ercc_gtf = ERCC_BUCKET + "ERCC92.gtf.gz"
tmp_gtf = tempfile.NamedTemporaryFile(delete=False, suffix=".gz").name
append_gtf_cmd = "wget {ercc_gtf} -O {tmp_gtf}; gzip -cd {tmp_gtf} >> {gtf_file}"
print(append_gtf_cmd.format(**locals()))
subprocess.check_call(append_gtf_cmd.format(**locals()), shell=True)
class MyParser(ArgumentParser):
def error(self, message):
self.print_help()
galaxy_base = os.path.join(_get_data_dir(), "galaxy")
print("\nCurrent genomes\n")
print(open(loc.get_loc_file(galaxy_base, "samtools")).read())
sys.exit(0)
if __name__ == "__main__":
description = ("Set up a custom genome for bcbio-nextgen. This will "
"place the genome under name/build in the genomes "
"directory in your bcbio-nextgen installation.")
parser = MyParser(description=description)
parser.add_argument("-c", "--cores", default=1,
help="number of cores to use")
parser.add_argument("-f", "--fasta", required=True,
help="FASTA file of the genome.")
parser.add_argument("--gff3", default=False, action='store_true',
help="File is a GFF3 file.")
parser.add_argument("-g", "--gtf", default=None,
help="GTF file of the transcriptome")
parser.add_argument("-n", "--name", required=True,
help="Name of organism, for example Hsapiens.")
parser.add_argument("-b", "--build", required=True,
help="Build of genome, for example hg19.")
parser.add_argument("-i", "--indexes", choices=SUPPORTED_INDEXES, nargs="*",
default=["seq"], help="Space separated list of indexes to make")
parser.add_argument("--ercc", action='store_true', default=False,
help="Add ERCC spike-ins.")
parser.add_argument("--mirbase", help="species in mirbase for smallRNAseq data.")
parser.add_argument("--srna_gtf", help="gtf to use for smallRNAseq data.")
parser.add_argument("--buildversion", required=True,
help=("String describing build of genome used. Examples: "
"Ensembl_94, EnsemblMetazoa_94, Flybase_21, etc"))
args = parser.parse_args()
# if not all([args.mirbase, args.srna_gtf]) and any([args.mirbase, args.srna_gtf]):
# raise ValueError("--mirbase and --srna_gtf both need a value.")
os.environ["PATH"] += os.pathsep + os.path.dirname(sys.executable)
cbl = get_cloudbiolinux(REMOTES)
sys.path.insert(0, cbl["dir"])
genomemod = __import__("cloudbio.biodata", fromlist=["genomes"])
# monkey patch cloudbiolinux to use this indexing command instead
genomes = getattr(genomemod, 'genomes')
genomes._index_w_command = _index_w_command
genome_dir = os.path.abspath(os.path.join(_get_data_dir(), "genomes"))
args.fasta = os.path.abspath(args.fasta)
if not file_exists(args.fasta):
print("%s does not exist, exiting." % args.fasta)
sys.exit(1)
args.gtf = os.path.abspath(args.gtf) if args.gtf else None
if args.gtf and not file_exists(args.gtf):
print("%s does not exist, exiting." % args.gtf)
sys.exit(1)
args.srna_gtf = os.path.abspath(args.srna_gtf) if args.srna_gtf else None
gtf_file = args.gtf
if args.gff3:
gtf_file = extract_if_gzipped(gtf_file)
gtf_file = gff3_to_gtf(gtf_file)
# always make a sequence dictionary
if "seq" not in args.indexes:
args.indexes.append("seq")
prepare_tx = os.path.join(cbl["dir"], "utils", "prepare_tx_gff.py")
print("Creating directories using %s as the base." % (genome_dir))
build_dir = setup_base_directories(genome_dir, args.name, args.build, args.gtf)
os.chdir(build_dir)
print("Genomes will be installed into %s." % (build_dir))
fasta_file = extract_if_gzipped(args.fasta)
fasta_file = install_fasta_file(build_dir, fasta_file, args.build)
print("Installed genome as %s." % (fasta_file))
if args.gtf:
if "bowtie2" not in args.indexes:
args.indexes.append("bowtie2")
gtf_file = install_gtf_file(build_dir, gtf_file, args.build)
print("Installed GTF as %s." % (gtf_file))
if args.ercc:
print("Appending ERCC sequences to %s and %s." % (gtf_file, fasta_file))
append_ercc(gtf_file, fasta_file)
indexed = {}
Env = collections.namedtuple("Env", "system_install, cores")
env = Env(genome_dir, args.cores)
for index in args.indexes:
print("Creating the %s index." % (index))
index_fn = genomes.get_index_fn(index)
if not index_fn:
print("Do not know how to make the index %s, skipping." % (index))
continue
indexed[index] = index_fn(env, fasta_file)
indexed["samtools"] = fasta_file
if args.gtf:
"Preparing transcriptome."
with chdir(os.path.join(build_dir, os.pardir)):
cmd = ("{sys.executable} {prepare_tx} --buildversion {args.buildversion} --cores {args.cores} --genome-dir {genome_dir} "
"--gtf {gtf_file} {args.name} {args.build}")
subprocess.check_call(cmd.format(**locals()), shell=True)
if args.mirbase:
"Preparing smallRNA data."
with chdir(os.path.join(build_dir)):
install_srna(args.mirbase, args.srna_gtf)
base_dir = os.path.normpath(os.path.dirname(fasta_file))
resource_file = os.path.join(base_dir, "%s-resources.yaml" % args.build)
print("Dumping genome resources to %s." % resource_file)
resource_dict = {"version": 1}
if args.gtf:
transcripts = ["rnaseq", "transcripts"]
mask = ["rnaseq", "transcripts_mask"]
index = ["rnaseq", "transcriptome_index", "tophat"]
dexseq = ["rnaseq", "dexseq"]
refflat = ["rnaseq", "refflat"]
rRNA_fa = ["rnaseq", "rRNA_fa"]
resource_dict = tz.update_in(resource_dict, transcripts,
lambda x: "../rnaseq/ref-transcripts.gtf")
resource_dict = tz.update_in(resource_dict, mask,
lambda x: "../rnaseq/ref-transcripts-mask.gtf")
resource_dict = tz.update_in(resource_dict, index,
lambda x: "../rnaseq/tophat/%s_transcriptome.ver" % args.build)
resource_dict = tz.update_in(resource_dict, refflat,
lambda x: "../rnaseq/ref-transcripts.refFlat")
resource_dict = tz.update_in(resource_dict, dexseq,
lambda x: "../rnaseq/ref-transcripts.dexseq.gff3")
resource_dict = tz.update_in(resource_dict, rRNA_fa,
lambda x: "../rnaseq/rRNA.fa")
if args.mirbase:
srna_gtf = ["srnaseq", "srna_transcripts"]
srna_mirbase = ["srnaseq", "mirbase_hairpin"]
resource_dict = tz.update_in(resource_dict, srna_gtf,
lambda x: "../srnaseq/srna-transcripts.gtf")
resource_dict = tz.update_in(resource_dict, srna_mirbase,
lambda x: "../srnaseq/hairpin.fa")
# write out resource dictionarry
with file_transaction(resource_file) as tx_resource_file:
with open(tx_resource_file, "w") as out_handle:
out_handle.write(yaml.dump(resource_dict, default_flow_style=False))
print("Updating Galaxy .loc files.")
galaxy_base = os.path.join(_get_data_dir(), "galaxy")
for index, index_file in indexed.items():
if index_file:
loc.update_loc_file(galaxy_base, index, args.build, index_file)
print("Genome installation complete.")
|
sserrot/champion_relationships
|
venv/Lib/site-packages/jedi/api/file_name.py
|
import os
from jedi._compatibility import FileNotFoundError, force_unicode, scandir
from jedi.api import classes
from jedi.api.strings import StringName, get_quote_ending
from jedi.api.helpers import match
from jedi.inference.helpers import get_str_or_none
class PathName(StringName):
api_type = u'path'
def complete_file_name(inference_state, module_context, start_leaf, quote, string,
like_name, signatures_callback, code_lines, position, fuzzy):
# First we want to find out what can actually be changed as a name.
like_name_length = len(os.path.basename(string))
addition = _get_string_additions(module_context, start_leaf)
if string.startswith('~'):
string = os.path.expanduser(string)
if addition is None:
return
string = addition + string
# Here we use basename again, because if strings are added like
# `'foo' + 'bar`, it should complete to `foobar/`.
must_start_with = os.path.basename(string)
string = os.path.dirname(string)
sigs = signatures_callback(*position)
is_in_os_path_join = sigs and all(s.full_name == 'os.path.join' for s in sigs)
if is_in_os_path_join:
to_be_added = _add_os_path_join(module_context, start_leaf, sigs[0].bracket_start)
if to_be_added is None:
is_in_os_path_join = False
else:
string = to_be_added + string
base_path = os.path.join(inference_state.project.path, string)
try:
listed = sorted(scandir(base_path), key=lambda e: e.name)
# OSError: [Errno 36] File name too long: '...'
except (FileNotFoundError, OSError):
return
quote_ending = get_quote_ending(quote, code_lines, position)
for entry in listed:
name = entry.name
if match(name, must_start_with, fuzzy=fuzzy):
if is_in_os_path_join or not entry.is_dir():
name += quote_ending
else:
name += os.path.sep
yield classes.Completion(
inference_state,
PathName(inference_state, name[len(must_start_with) - like_name_length:]),
stack=None,
like_name_length=like_name_length,
is_fuzzy=fuzzy,
)
def _get_string_additions(module_context, start_leaf):
def iterate_nodes():
node = addition.parent
was_addition = True
for child_node in reversed(node.children[:node.children.index(addition)]):
if was_addition:
was_addition = False
yield child_node
continue
if child_node != '+':
break
was_addition = True
addition = start_leaf.get_previous_leaf()
if addition != '+':
return ''
context = module_context.create_context(start_leaf)
return _add_strings(context, reversed(list(iterate_nodes())))
def _add_strings(context, nodes, add_slash=False):
string = ''
first = True
for child_node in nodes:
values = context.infer_node(child_node)
if len(values) != 1:
return None
c, = values
s = get_str_or_none(c)
if s is None:
return None
if not first and add_slash:
string += os.path.sep
string += force_unicode(s)
first = False
return string
def _add_os_path_join(module_context, start_leaf, bracket_start):
def check(maybe_bracket, nodes):
if maybe_bracket.start_pos != bracket_start:
return None
if not nodes:
return ''
context = module_context.create_context(nodes[0])
return _add_strings(context, nodes, add_slash=True) or ''
if start_leaf.type == 'error_leaf':
# Unfinished string literal, like `join('`
value_node = start_leaf.parent
index = value_node.children.index(start_leaf)
if index > 0:
error_node = value_node.children[index - 1]
if error_node.type == 'error_node' and len(error_node.children) >= 2:
index = -2
if error_node.children[-1].type == 'arglist':
arglist_nodes = error_node.children[-1].children
index -= 1
else:
arglist_nodes = []
return check(error_node.children[index + 1], arglist_nodes[::2])
return None
# Maybe an arglist or some weird error case. Therefore checked below.
searched_node_child = start_leaf
while searched_node_child.parent is not None \
and searched_node_child.parent.type not in ('arglist', 'trailer', 'error_node'):
searched_node_child = searched_node_child.parent
if searched_node_child.get_first_leaf() is not start_leaf:
return None
searched_node = searched_node_child.parent
if searched_node is None:
return None
index = searched_node.children.index(searched_node_child)
arglist_nodes = searched_node.children[:index]
if searched_node.type == 'arglist':
trailer = searched_node.parent
if trailer.type == 'error_node':
trailer_index = trailer.children.index(searched_node)
assert trailer_index >= 2
assert trailer.children[trailer_index - 1] == '('
return check(trailer.children[trailer_index - 1], arglist_nodes[::2])
elif trailer.type == 'trailer':
return check(trailer.children[0], arglist_nodes[::2])
elif searched_node.type == 'trailer':
return check(searched_node.children[0], [])
elif searched_node.type == 'error_node':
# Stuff like `join(""`
return check(arglist_nodes[-1], [])
|
EmanueleCannizzaro/scons
|
src/engine/SCons/Tool/mssdk.py
|
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mssdk.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""engine.SCons.Tool.mssdk
Tool-specific initialization for Microsoft SDKs, both Platform
SDKs and Windows SDKs.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
from MSCommon import mssdk_exists, \
mssdk_setup_env
def generate(env):
"""Add construction variables for an MS SDK to an Environment."""
mssdk_setup_env(env)
def exists(env):
return mssdk_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
indro/t2c
|
apps/external_apps/django_openid/admin.py
|
from django.contrib.auth.models import User
from django.contrib.auth.admin import UserAdmin
from django.contrib import admin
from django.contrib.admin.sites import NotRegistered
from models import UserOpenidAssociation
class OpenIDInline(admin.StackedInline):
model = UserOpenidAssociation
class UserAdminWithOpenIDs(UserAdmin):
inlines = [OpenIDInline]
# Add OpenIDs to the user admin, but only if User has been registered
try:
admin.site.unregister(User)
admin.site.register(User, UserAdminWithOpenIDs)
except NotRegistered:
pass
#from models import Nonce, Association
#admin.site.register(Nonce)
#admin.site.register(Association)
|
Toilal/mailinabox
|
tools/mail.py
|
#!/usr/bin/python3
import sys, getpass, urllib.request, urllib.error, json
def mgmt(cmd, data=None, is_json=False):
# The base URL for the management daemon. (Listens on IPv4 only.)
mgmt_uri = 'http://127.0.0.1:10222'
setup_key_auth(mgmt_uri)
req = urllib.request.Request(mgmt_uri + cmd, urllib.parse.urlencode(data).encode("utf8") if data else None)
try:
response = urllib.request.urlopen(req)
except urllib.error.HTTPError as e:
if e.code == 401:
try:
print(e.read().decode("utf8"))
except:
pass
print("The management daemon refused access. The API key file may be out of sync. Try 'service mailinabox restart'.", file=sys.stderr)
elif hasattr(e, 'read'):
print(e.read().decode('utf8'), file=sys.stderr)
else:
print(e, file=sys.stderr)
sys.exit(1)
resp = response.read().decode('utf8')
if is_json: resp = json.loads(resp)
return resp
def read_password():
first = getpass.getpass('password: ')
second = getpass.getpass(' (again): ')
while first != second:
print('Passwords not the same. Try again.')
first = getpass.getpass('password: ')
second = getpass.getpass(' (again): ')
return first
def setup_key_auth(mgmt_uri):
key = open('/var/lib/mailinabox/api.key').read().strip()
auth_handler = urllib.request.HTTPBasicAuthHandler()
auth_handler.add_password(
realm='Mail-in-a-Box Management Server',
uri=mgmt_uri,
user=key,
passwd='')
opener = urllib.request.build_opener(auth_handler)
urllib.request.install_opener(opener)
if len(sys.argv) < 2:
print("Usage: ")
print(" tools/mail.py user (lists users)")
print(" tools/mail.py user add user@domain.com [password]")
print(" tools/mail.py user password user@domain.com [password]")
print(" tools/mail.py user remove user@domain.com")
print(" tools/mail.py user make-admin user@domain.com")
print(" tools/mail.py user remove-admin user@domain.com")
print(" tools/mail.py user admins (lists admins)")
print(" tools/mail.py alias (lists aliases)")
print(" tools/mail.py alias add incoming.name@domain.com sent.to@other.domain.com")
print(" tools/mail.py alias add incoming.name@domain.com 'sent.to@other.domain.com, multiple.people@other.domain.com'")
print(" tools/mail.py alias remove incoming.name@domain.com")
print()
print("Removing a mail user does not delete their mail folders on disk. It only prevents IMAP/SMTP login.")
print()
elif sys.argv[1] == "user" and len(sys.argv) == 2:
# Dump a list of users, one per line. Mark admins with an asterisk.
users = mgmt("/mail/users?format=json", is_json=True)
for domain in users:
for user in domain["users"]:
if user['status'] == 'inactive': continue
print(user['email'], end='')
if "admin" in user['privileges']:
print("*", end='')
print()
elif sys.argv[1] == "user" and sys.argv[2] in ("add", "password"):
if len(sys.argv) < 5:
if len(sys.argv) < 4:
email = input("email: ")
else:
email = sys.argv[3]
pw = read_password()
else:
email, pw = sys.argv[3:5]
if sys.argv[2] == "add":
print(mgmt("/mail/users/add", { "email": email, "password": pw }))
elif sys.argv[2] == "password":
print(mgmt("/mail/users/password", { "email": email, "password": pw }))
elif sys.argv[1] == "user" and sys.argv[2] == "remove" and len(sys.argv) == 4:
print(mgmt("/mail/users/remove", { "email": sys.argv[3] }))
elif sys.argv[1] == "user" and sys.argv[2] in ("make-admin", "remove-admin") and len(sys.argv) == 4:
if sys.argv[2] == "make-admin":
action = "add"
else:
action = "remove"
print(mgmt("/mail/users/privileges/" + action, { "email": sys.argv[3], "privilege": "admin" }))
elif sys.argv[1] == "user" and sys.argv[2] == "admins":
# Dump a list of admin users.
users = mgmt("/mail/users?format=json", is_json=True)
for domain in users:
for user in domain["users"]:
if "admin" in user['privileges']:
print(user['email'])
elif sys.argv[1] == "alias" and len(sys.argv) == 2:
print(mgmt("/mail/aliases"))
elif sys.argv[1] == "alias" and sys.argv[2] == "add" and len(sys.argv) == 5:
print(mgmt("/mail/aliases/add", { "source": sys.argv[3], "destination": sys.argv[4] }))
elif sys.argv[1] == "alias" and sys.argv[2] == "remove" and len(sys.argv) == 4:
print(mgmt("/mail/aliases/remove", { "source": sys.argv[3] }))
else:
print("Invalid command-line arguments.")
sys.exit(1)
|
kellinm/blivet
|
blivet/flags.py
|
# flags.py
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): David Lehman <dlehman@redhat.com>
#
import shlex
import selinux
from .util import open # pylint: disable=redefined-builtin
class Flags(object):
def __init__(self):
#
# mode of operation
#
self.testing = False
self.installer_mode = False
#
# minor modes (installer-specific)
#
self.automated_install = False
self.live_install = False
self.image_install = False
#
# enable/disable functionality
#
self.selinux = selinux.is_selinux_enabled()
self.multipath = True
self.dmraid = True
self.ibft = True
self.noiswmd = False
self.gfs2 = True
self.jfs = True
self.reiserfs = True
self.arm_platform = None
self.gpt = False
self.multipath_friendly_names = True
# set to False to suppress the default LVM behavior of saving
# backup metadata in /etc/lvm/{archive,backup}
self.lvm_metadata_backup = True
# whether to include nodev filesystems in the devicetree (only
# meaningful when flags.installer_mode is False)
self.include_nodev = False
self.boot_cmdline = {}
self.update_from_boot_cmdline()
self.allow_imperfect_devices = True
def get_boot_cmdline(self):
buf = open("/proc/cmdline").read().strip()
args = shlex.split(buf)
for arg in args:
(opt, _equals, val) = arg.partition("=")
if val:
self.boot_cmdline[opt] = val
def update_from_boot_cmdline(self):
self.get_boot_cmdline()
if "nompath" in self.boot_cmdline:
self.multipath = False
if "nodmraid" in self.boot_cmdline:
self.dmraid = False
if "noiswmd" in self.boot_cmdline:
self.noiswmd = True
def update_from_anaconda_flags(self, anaconda_flags):
self.installer_mode = True
self.testing = anaconda_flags.testing
self.automated_install = anaconda_flags.automatedInstall
self.live_install = anaconda_flags.livecdInstall
self.image_install = anaconda_flags.imageInstall
self.selinux = anaconda_flags.selinux
self.gfs2 = "gfs2" in self.boot_cmdline
self.jfs = "jfs" in self.boot_cmdline
self.reiserfs = "reiserfs" in self.boot_cmdline
self.arm_platform = anaconda_flags.armPlatform
self.gpt = anaconda_flags.gpt
self.multipath_friendly_names = anaconda_flags.mpathFriendlyNames
self.allow_imperfect_devices = anaconda_flags.rescue_mode
self.ibft = anaconda_flags.ibft
self.dmraid = anaconda_flags.dmraid
# We don't want image installs writing backups of the *image* metadata
# into the *host's* /etc/lvm. This can get real messy on build systems.
if self.image_install:
self.lvm_metadata_backup = False
flags = Flags()
|
lmotta/Roam
|
src/configmanager/editorwidgets/listwidget.py
|
import os
from functools import partial
from PyQt4.QtGui import QWidget
from PyQt4.QtCore import Qt
from qgis.core import QgsMapLayer
from qgis.gui import QgsExpressionBuilderDialog
from roam.api.utils import layer_by_name
from configmanager.models import QgsLayerModel, QgsFieldModel
from configmanager.editorwidgets.core import ConfigWidget
from configmanager.editorwidgets.uifiles.ui_listwidget_config import Ui_Form
class ListWidgetConfig(Ui_Form, ConfigWidget):
description = 'Select an item from a predefined list'
def __init__(self, parent=None):
super(ListWidgetConfig, self).__init__(parent)
self.setupUi(self)
self.allownull = False
self.orderby = False
self.orderbyCheck.hide()
self.layerRadio.clicked.connect(partial(self.stackedWidget.setCurrentIndex, 0))
self.listRadio.clicked.connect(partial(self.stackedWidget.setCurrentIndex, 1))
self.layermodel = QgsLayerModel(watchregistry=False)
self.layermodel.layerfilter = [QgsMapLayer.VectorLayer]
self.fieldmodel = QgsFieldModel()
self.blockSignals(True)
self.layerCombo.setModel(self.layermodel)
self.keyCombo.setModel(self.fieldmodel)
self.valueCombo.setModel(self.fieldmodel)
self.filterButton.pressed.connect(self.define_filter)
self.fieldmodel.setLayerFilter(self.layerCombo.view().selectionModel())
self.reset()
self.blockSignals(False)
def define_filter(self):
layer = self.layerCombo.currentText()
if not layer:
return
layer = layer_by_name(layer)
dlg = QgsExpressionBuilderDialog(layer, "List filter", self)
text = self.filterText.toPlainText()
dlg.setExpressionText(text)
if dlg.exec_():
self.filterText.setPlainText(dlg.expressionText())
def reset(self):
self.listtype = 'layer'
self.listText.setPlainText('')
self.orderby = False
self.allownull = False
self.filterText.setPlainText('')
self.layerCombo.setCurrentIndex(-1)
self.keyCombo.setCurrentIndex(-1)
self.valueCombo.setCurrentIndex(-1)
def widgetchanged(self):
self.widgetdirty.emit(self.getconfig())
@property
def allownull(self):
return self.allownullCheck.isChecked()
@allownull.setter
def allownull(self, value):
self.allownullCheck.setChecked(value)
@property
def orderby(self):
return self.orderbyCheck.isChecked()
@orderby.setter
def orderby(self, value):
self.orderbyCheck.setChecked(value)
@property
def list(self):
return [item for item in self.listText.toPlainText().split('\n')]
@property
def filter(self):
return self.filterText.toPlainText()
@property
def layer(self):
return self.layerCombo.currentText()
@property
def key(self):
index_key = self.fieldmodel.index(self.keyCombo.currentIndex(), 0)
fieldname_key = self.fieldmodel.data(index_key, QgsFieldModel.FieldNameRole)
return fieldname_key
@property
def value(self):
index_value = self.fieldmodel.index(self.valueCombo.currentIndex(), 0)
return self.fieldmodel.data(index_value, QgsFieldModel.FieldNameRole)
def getconfig(self):
config = {}
config['allownull'] = self.allownull
config['orderbyvalue'] = self.orderby
if self.layerRadio.isChecked():
subconfig = {}
# TODO Grab the data here and not just the text
subconfig['layer'] = self.layer
subconfig['key'] = self.key
subconfig['value'] = self.value
subconfig['filter'] = self.filter
config['layer'] = subconfig
else:
config['list'] = {}
config['list']['items'] = self.list
return config
def blockSignals(self, bool):
for child in self.findChildren(QWidget):
child.blockSignals(bool)
super(ListWidgetConfig, self).blockSignals(bool)
def setconfig(self, config):
self.blockSignals(True)
self.allownull = config.get('allownull', True)
self.orderby = config.get('orderbyvalue', False)
#Clear the widgets
self.listText.setPlainText('')
self.keyCombo.clear()
self.valueCombo.clear()
self.filterText.clear()
self.layermodel.refresh()
# Rebind all the values
if 'list' in config:
subconfig = config.get('list', {})
self.listRadio.setChecked(True)
self.stackedWidget.setCurrentIndex(1)
listitems = subconfig.get('items', [])
itemtext = '\n'.join(listitems)
self.listText.setPlainText(itemtext)
else:
self.layerRadio.setChecked(True)
self.stackedWidget.setCurrentIndex(0)
subconfig = config.get('layer', {})
layer = subconfig.get('layer', '') or ''
key = subconfig.get('key', '') or ''
value = subconfig.get('value', '') or ''
filter = subconfig.get('filter', None)
index = self.layerCombo.findData(layer, Qt.DisplayRole)
if index > -1:
self.layerCombo.setCurrentIndex(index)
index = self.layermodel.index(index, 0)
self.fieldmodel.updateLayer(index, None)
keyindex = self.keyCombo.findData(key.lower(), QgsFieldModel.FieldNameRole)
if keyindex > -1:
self.keyCombo.setCurrentIndex(keyindex)
valueindex = self.valueCombo.findData(value.lower(), QgsFieldModel.FieldNameRole)
if valueindex > -1:
self.valueCombo.setCurrentIndex(valueindex)
self.filterText.setPlainText(filter)
self.allownullCheck.setChecked(self.allownull)
self.orderbyCheck.setChecked(self.orderby)
self.blockSignals(False)
|
hanyassasa87/ns3-802.11ad
|
doc/manual/source/conf.py
|
# -*- coding: utf-8 -*-
#
# test documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 26 00:00:43 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sys, os
# To change default code-block format in Latex to footnotesize (8pt)
# Tip from https://stackoverflow.com/questions/9899283/how-do-you-change-the-code-example-font-size-in-latex-pdf-output-with-sphinx/9955928
# Note: sizes are \footnotesize (8pt), \small (9pt), and \normalsize (10pt).
#from sphinx.highlighting import PygmentsBridge
#from pygments.formatters.latex import LatexFormatter
#
#class CustomLatexFormatter(LatexFormatter):
# def __init__(self, **options):
# super(CustomLatexFormatter, self).__init__(**options)
# self.verboptions = r"formatcom=\footnotesize"
#
#PygmentsBridge.latex_formatter = CustomLatexFormatter
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.imgmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ns-3 project'
copyright = u'2006-2019'
#author = u'test'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'ns-3-dev'
# The full version, including alpha/beta/rc tags.
release = u'ns-3-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# These patterns also affect html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'ns3_html_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../..']
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'est vtest'
html_title = 'Manual'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
html_last_updated_fmt = '%b %d, %Y %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ns-3doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# VerbatimBorderColor: make the box around code samples blend into the background
# Tip from https://stackoverflow.com/questions/29403100/how-to-remove-the-box-around-the-code-block-in-restructuredtext-with-sphinx
#
# sphinxcode is the wrapper around \texttt that sphinx.sty provides.
# Redefine it here as needed to change the inline literal font size
# (double backquotes) to either \footnotesize (8pt) or \small (9pt)
#
# See above to change the font size of verbatim code blocks
#
# 'preamble': '',
'preamble': u'''\\usepackage{amssymb}
\\definecolor{VerbatimBorderColor}{rgb}{1,1,1}
\\renewcommand{\\sphinxcode}[1]{\\texttt{\\small{#1}}}
'''
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ns-3-manual.tex', u'ns-3 Manual',
u'ns-3 project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
latex_logo = '../../ns3_html_theme/static/ns-3.png'
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, will not define \strong, \code, \titleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... to help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ns-3-manual', u'ns-3 Manual',
[u'ns-3 project'], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for texinfo output ---------------------------------------
#texinfo_documents = [
# (master_doc, 'test', u'test Documentation',
# author, 'test', 'One line description of project.',
# 'Miscellaneous'),
#]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
|
AlbertoPeon/invenio
|
modules/bibupload/lib/batchuploader_engine.py
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Batch Uploader core functions. Uploading metadata and documents.
"""
import os
import pwd
import grp
import sys
import time
import tempfile
import cgi
import re
from invenio.dbquery import run_sql, Error
from invenio.access_control_engine import acc_authorize_action
from invenio.webuser import collect_user_info, page_not_authorized
from invenio.config import CFG_BINDIR, CFG_TMPSHAREDDIR, CFG_LOGDIR, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, \
CFG_OAI_ID_FIELD, CFG_BATCHUPLOADER_DAEMON_DIR, \
CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS, \
CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS, \
CFG_PREFIX, CFG_SITE_LANG
from invenio.textutils import encode_for_xml
from invenio.bibtask import task_low_level_submission
from invenio.messages import gettext_set_language
from invenio.textmarc2xmlmarc import transform_file
from invenio.shellutils import run_shell_command
from invenio.bibupload import xml_marc_to_records, bibupload
import invenio.bibupload as bibupload_module
from invenio.bibrecord import create_records, \
record_strip_empty_volatile_subfields, \
record_strip_empty_fields
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
PERMITTED_MODES = ['-i', '-r', '-c', '-a', '-ir',
'--insert', '--replace', '--correct', '--append']
_CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS_RE = re.compile(CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS)
def cli_allocate_record(req):
req.content_type = "text/plain"
req.send_http_header()
# check IP and useragent:
if not _check_client_ip(req):
msg = "[ERROR] Sorry, client IP %s cannot use the service." % _get_client_ip(req)
_log(msg)
return _write(req, msg)
if not _check_client_useragent(req):
msg = '[ERROR] Sorry, the "%s" useragent cannot use the service.' % _get_useragent(req)
_log(msg)
return _write(req, msg)
recid = run_sql("insert into bibrec (creation_date,modification_date) values(NOW(),NOW())")
return recid
def cli_upload(req, file_content=None, mode=None, callback_url=None, nonce=None, special_treatment=None):
""" Robot interface for uploading MARC files
"""
req.content_type = "text/plain"
req.send_http_header()
# check IP and useragent:
if not _check_client_ip(req):
msg = "[ERROR] Sorry, client IP %s cannot use the service." % _get_client_ip(req)
_log(msg)
return _write(req, msg)
if not _check_client_useragent(req):
msg = "[ERROR] Sorry, the %s useragent cannot use the service." % _get_useragent(req)
_log(msg)
return _write(req, msg)
arg_mode = mode
if not arg_mode:
msg = "[ERROR] Please specify upload mode to use."
_log(msg)
return _write(req, msg)
if not arg_mode in PERMITTED_MODES:
msg = "[ERROR] Invalid upload mode."
_log(msg)
return _write(req, msg)
arg_file = file_content
if hasattr(arg_file, 'read'):
## We've been passed a readable file, e.g. req
arg_file = arg_file.read()
if not arg_file:
msg = "[ERROR] Please provide a body to your request."
_log(msg)
return _write(req, msg)
else:
if not arg_file:
msg = "[ERROR] Please specify file body to input."
_log(msg)
return _write(req, msg)
if hasattr(arg_file, "filename"):
arg_file = arg_file.value
else:
msg = "[ERROR] 'file' parameter must be a (single) file"
_log(msg)
return _write(req, msg)
# write temporary file:
tempfile.tempdir = CFG_TMPSHAREDDIR
filename = tempfile.mktemp(prefix="batchupload_" + \
time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_")
filedesc = open(filename, 'w')
filedesc.write(arg_file)
filedesc.close()
# check if this client can run this file:
client_ip = _get_client_ip(req)
permitted_dbcollids = CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS[client_ip]
if permitted_dbcollids != ['*']: # wildcard
allow = _check_client_can_submit_file(client_ip, filename, req, 0)
if not allow:
msg = "[ERROR] Cannot submit such a file from this IP. (Wrong collection.)"
_log(msg)
return _write(req, msg)
# check validity of marcxml
xmlmarclint_path = CFG_BINDIR + '/xmlmarclint'
xmlmarclint_output, dummy1, dummy2 = run_shell_command('%s %s' % (xmlmarclint_path, filename))
if xmlmarclint_output != 0:
msg = "[ERROR] MARCXML is not valid."
_log(msg)
return _write(req, msg)
args = ['bibupload', "batchupload", arg_mode, filename]
# run upload command
if callback_url:
args += ["--callback-url", callback_url]
if nonce:
args += ["--nonce", nonce]
if special_treatment:
args += ["--special-treatment", special_treatment]
task_low_level_submission(*args)
msg = "[INFO] %s" % ' '.join(args)
_log(msg)
return _write(req, msg)
def metadata_upload(req, metafile=None, filetype=None, mode=None, exec_date=None,
exec_time=None, metafilename=None, ln=CFG_SITE_LANG,
priority="1", email_logs_to=None):
"""
Metadata web upload service. Get upload parameters and exec bibupload for the given file.
Finally, write upload history.
@return: tuple (error code, message)
error code: code that indicates if an error ocurred
message: message describing the error
"""
# start output:
req.content_type = "text/html"
req.send_http_header()
error_codes = {'not_authorized': 1}
# write temporary file:
if filetype != 'marcxml':
metafile = _transform_input_to_marcxml(file_input=metafile)
user_info = collect_user_info(req)
tempfile.tempdir = CFG_TMPSHAREDDIR
filename = tempfile.mktemp(prefix="batchupload_" + \
user_info['nickname'] + "_" + time.strftime("%Y%m%d%H%M%S",
time.localtime()) + "_")
filedesc = open(filename, 'w')
filedesc.write(metafile)
filedesc.close()
# check if this client can run this file:
if req is not None:
allow = _check_client_can_submit_file(req=req, metafile=metafile, webupload=1, ln=ln)
if allow[0] != 0:
return (error_codes['not_authorized'], allow[1])
# run upload command:
task_arguments = ('bibupload', user_info['nickname'], mode, "--name=" + metafilename, "--priority=" + priority)
if exec_date:
date = exec_date
if exec_time:
date += ' ' + exec_time
task_arguments += ("-t", date)
if email_logs_to:
task_arguments += ('--email-logs-to', email_logs_to)
task_arguments += (filename, )
jobid = task_low_level_submission(*task_arguments)
# write batch upload history
run_sql("""INSERT INTO hstBATCHUPLOAD (user, submitdate,
filename, execdate, id_schTASK, batch_mode)
VALUES (%s, NOW(), %s, %s, %s, "metadata")""",
(user_info['nickname'], metafilename,
exec_date != "" and (exec_date + ' ' + exec_time)
or time.strftime("%Y-%m-%d %H:%M:%S"), str(jobid), ))
return (0, "Task %s queued" % str(jobid))
def document_upload(req=None, folder="", matching="", mode="", exec_date="", exec_time="", ln=CFG_SITE_LANG, priority="1", email_logs_to=None):
""" Take files from the given directory and upload them with the appropiate mode.
@parameters:
+ folder: Folder where the files to upload are stored
+ matching: How to match file names with record fields (report number, barcode,...)
+ mode: Upload mode (append, revise, replace)
@return: tuple (file, error code)
file: file name causing the error to notify the user
error code:
1 - More than one possible recID, ambiguous behaviour
2 - No records match that file name
3 - File already exists
"""
import sys
if sys.hexversion < 0x2060000:
from md5 import md5
else:
from hashlib import md5
from invenio.bibdocfile import BibRecDocs, file_strip_ext
import shutil
from invenio.search_engine import perform_request_search, \
search_pattern, \
guess_collection_of_a_record
_ = gettext_set_language(ln)
errors = []
info = [0, []] # Number of files read, name of the files
try:
files = os.listdir(folder)
except OSError, error:
errors.append(("", error))
return errors, info
err_desc = {1: _("More than one possible recID, ambiguous behaviour"), 2: _("No records match that file name"),
3: _("File already exists"), 4: _("A file with the same name and format already exists"),
5: _("No rights to upload to collection '%s'")}
# Create directory DONE/ if doesn't exist
folder = (folder[-1] == "/") and folder or (folder + "/")
files_done_dir = folder + "DONE/"
try:
os.mkdir(files_done_dir)
except OSError:
# Directory exists or no write permission
pass
for docfile in files:
if os.path.isfile(os.path.join(folder, docfile)):
info[0] += 1
identifier = file_strip_ext(docfile)
extension = docfile[len(identifier):]
rec_id = None
if identifier:
rec_id = search_pattern(p=identifier, f=matching, m='e')
if not rec_id:
errors.append((docfile, err_desc[2]))
continue
elif len(rec_id) > 1:
errors.append((docfile, err_desc[1]))
continue
else:
rec_id = str(list(rec_id)[0])
rec_info = BibRecDocs(rec_id)
if rec_info.bibdocs:
for bibdoc in rec_info.bibdocs:
attached_files = bibdoc.list_all_files()
file_md5 = md5(open(os.path.join(folder, docfile), "rb").read()).hexdigest()
num_errors = len(errors)
for attached_file in attached_files:
if attached_file.checksum == file_md5:
errors.append((docfile, err_desc[3]))
break
elif attached_file.get_full_name() == docfile:
errors.append((docfile, err_desc[4]))
break
if len(errors) > num_errors:
continue
# Check if user has rights to upload file
if req is not None:
file_collection = guess_collection_of_a_record(int(rec_id))
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=file_collection)
if auth_code != 0:
error_msg = err_desc[5] % file_collection
errors.append((docfile, error_msg))
continue
tempfile.tempdir = CFG_TMPSHAREDDIR
# Move document to be uploaded to temporary folder
tmp_file = tempfile.mktemp(prefix=identifier + "_" + time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_", suffix=extension)
shutil.copy(os.path.join(folder, docfile), tmp_file)
# Create MARC temporary file with FFT tag and call bibupload
filename = tempfile.mktemp(prefix=identifier + '_')
filedesc = open(filename, 'w')
marc_content = """ <record>
<controlfield tag="001">%(rec_id)s</controlfield>
<datafield tag="FFT" ind1=" " ind2=" ">
<subfield code="n">%(name)s</subfield>
<subfield code="a">%(path)s</subfield>
</datafield>
</record> """ % {'rec_id': rec_id,
'name': encode_for_xml(identifier),
'path': encode_for_xml(tmp_file),
}
filedesc.write(marc_content)
filedesc.close()
info[1].append(docfile)
user = ""
if req is not None:
user_info = collect_user_info(req)
user = user_info['nickname']
if not user:
user = "batchupload"
# Execute bibupload with the appropiate mode
task_arguments = ('bibupload', user, "--" + mode, "--name=" + docfile, "--priority=" + priority)
if exec_date:
date = '--runtime=' + "\'" + exec_date + ' ' + exec_time + "\'"
task_arguments += (date, )
if email_logs_to:
task_arguments += ("--email-logs-to", email_logs_to)
task_arguments += (filename, )
jobid = task_low_level_submission(*task_arguments)
# write batch upload history
run_sql("""INSERT INTO hstBATCHUPLOAD (user, submitdate,
filename, execdate, id_schTASK, batch_mode)
VALUES (%s, NOW(), %s, %s, %s, "document")""",
(user_info['nickname'], docfile,
exec_date != "" and (exec_date + ' ' + exec_time)
or time.strftime("%Y-%m-%d %H:%M:%S"), str(jobid)))
# Move file to DONE folder
done_filename = docfile + "_" + time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_" + str(jobid)
try:
os.rename(os.path.join(folder, docfile), os.path.join(files_done_dir, done_filename))
except OSError:
errors.append('MoveError')
return errors, info
def get_user_metadata_uploads(req):
"""Retrieve all metadata upload history information for a given user"""
user_info = collect_user_info(req)
upload_list = run_sql("""SELECT DATE_FORMAT(h.submitdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
h.filename, DATE_FORMAT(h.execdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
s.status \
FROM hstBATCHUPLOAD h INNER JOIN schTASK s \
ON h.id_schTASK = s.id \
WHERE h.user=%s and h.batch_mode="metadata"
ORDER BY h.submitdate DESC""", (user_info['nickname'],))
return upload_list
def get_user_document_uploads(req):
"""Retrieve all document upload history information for a given user"""
user_info = collect_user_info(req)
upload_list = run_sql("""SELECT DATE_FORMAT(h.submitdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
h.filename, DATE_FORMAT(h.execdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
s.status \
FROM hstBATCHUPLOAD h INNER JOIN schTASK s \
ON h.id_schTASK = s.id \
WHERE h.user=%s and h.batch_mode="document"
ORDER BY h.submitdate DESC""", (user_info['nickname'],))
return upload_list
def get_daemon_doc_files():
""" Return all files found in batchuploader document folders """
files = {}
for folder in ['/revise', '/append']:
try:
daemon_dir = CFG_BATCHUPLOADER_DAEMON_DIR[0] == '/' and CFG_BATCHUPLOADER_DAEMON_DIR \
or CFG_PREFIX + '/' + CFG_BATCHUPLOADER_DAEMON_DIR
directory = daemon_dir + '/documents' + folder
files[directory] = [(filename, []) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
for file_instance, info in files[directory]:
stat_info = os.lstat(os.path.join(directory, file_instance))
info.append("%s" % pwd.getpwuid(stat_info.st_uid)[0]) # Owner
info.append("%s" % grp.getgrgid(stat_info.st_gid)[0]) # Group
info.append("%d" % stat_info.st_size) # Size
time_stat = stat_info.st_mtime
time_fmt = "%Y-%m-%d %R"
info.append(time.strftime(time_fmt, time.gmtime(time_stat))) # Last modified
except OSError:
pass
return files
def get_daemon_meta_files():
""" Return all files found in batchuploader metadata folders """
files = {}
for folder in ['/correct', '/replace', '/insert', '/append']:
try:
daemon_dir = CFG_BATCHUPLOADER_DAEMON_DIR[0] == '/' and CFG_BATCHUPLOADER_DAEMON_DIR \
or CFG_PREFIX + '/' + CFG_BATCHUPLOADER_DAEMON_DIR
directory = daemon_dir + '/metadata' + folder
files[directory] = [(filename, []) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
for file_instance, info in files[directory]:
stat_info = os.lstat(os.path.join(directory, file_instance))
info.append("%s" % pwd.getpwuid(stat_info.st_uid)[0]) # Owner
info.append("%s" % grp.getgrgid(stat_info.st_gid)[0]) # Group
info.append("%d" % stat_info.st_size) # Size
time_stat = stat_info.st_mtime
time_fmt = "%Y-%m-%d %R"
info.append(time.strftime(time_fmt, time.gmtime(time_stat))) # Last modified
except OSError:
pass
return files
def user_authorization(req, ln):
""" Check user authorization to visit page """
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader')
if auth_code != 0:
referer = '/batchuploader/'
return page_not_authorized(req=req, referer=referer,
text=auth_message, navmenuid="batchuploader")
else:
return None
def perform_basic_upload_checks(xml_record):
""" Performs tests that would provoke the bibupload task to fail with
an exit status 1, to prevent batchupload from crashing while alarming
the user wabout the issue
"""
from invenio.bibupload import writing_rights_p
errors = []
if not writing_rights_p():
errors.append("Error: BibUpload does not have rights to write fulltext files.")
recs = create_records(xml_record, 1, 1)
if recs == []:
errors.append("Error: Cannot parse MARCXML file.")
elif recs[0][0] is None:
errors.append("Error: MARCXML file has wrong format: %s" % recs)
return errors
def perform_upload_check(xml_record, mode):
""" Performs a upload simulation with the given record and mode
@return: string describing errors
@rtype: string
"""
error_cache = []
def my_writer(msg, stream=sys.stdout, verbose=1):
if verbose == 1:
if 'DONE' not in msg:
error_cache.append(msg.strip())
orig_writer = bibupload_module.write_message
bibupload_module.write_message = my_writer
error_cache.extend(perform_basic_upload_checks(xml_record))
if error_cache:
# There has been some critical error
return '\n'.join(error_cache)
recs = xml_marc_to_records(xml_record)
try:
upload_mode = mode[2:]
# Adapt input data for bibupload function
if upload_mode == "r insert-or-replace":
upload_mode = "replace_or_insert"
for record in recs:
if record:
record_strip_empty_volatile_subfields(record)
record_strip_empty_fields(record)
bibupload(record, opt_mode=upload_mode, pretend=True)
finally:
bibupload_module.write_message = orig_writer
return '\n'.join(error_cache)
def _get_useragent(req):
"""Return client user agent from req object."""
user_info = collect_user_info(req)
return user_info['agent']
def _get_client_ip(req):
"""Return client IP address from req object."""
return str(req.remote_ip)
def _check_client_ip(req):
"""
Is this client permitted to use the service?
"""
client_ip = _get_client_ip(req)
if client_ip in CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS.keys():
return True
return False
def _check_client_useragent(req):
"""
Is this user agent permitted to use the service?
"""
client_useragent = _get_useragent(req)
if _CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS_RE.match(client_useragent):
return True
return False
def _check_client_can_submit_file(client_ip="", metafile="", req=None, webupload=0, ln=CFG_SITE_LANG):
"""
Is this client able to upload such a FILENAME?
check 980 $a values and collection tags in the file to see if they are among the
permitted ones as specified by CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS and ACC_AUTHORIZE_ACTION.
Useful to make sure that the client does not override other records by
mistake.
"""
_ = gettext_set_language(ln)
recs = create_records(metafile, 0, 0)
user_info = collect_user_info(req)
filename_tag980_values = _detect_980_values_from_marcxml_file(recs)
for filename_tag980_value in filename_tag980_values:
if not filename_tag980_value:
if not webupload:
return False
else:
return(1, "Invalid collection in tag 980")
if not webupload:
if not filename_tag980_value in CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS[client_ip]:
return False
else:
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=filename_tag980_value)
if auth_code != 0:
error_msg = _("The user '%(x_user)s' is not authorized to modify collection '%(x_coll)s'") % \
{'x_user': user_info['nickname'], 'x_coll': filename_tag980_value}
return (auth_code, error_msg)
filename_rec_id_collections = _detect_collections_from_marcxml_file(recs)
for filename_rec_id_collection in filename_rec_id_collections:
if not webupload:
if not filename_rec_id_collection in CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS[client_ip]:
return False
else:
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=filename_rec_id_collection)
if auth_code != 0:
error_msg = _("The user '%(x_user)s' is not authorized to modify collection '%(x_coll)s'") % \
{'x_user': user_info['nickname'], 'x_coll': filename_rec_id_collection}
return (auth_code, error_msg)
if not webupload:
return True
else:
return (0, " ")
def _detect_980_values_from_marcxml_file(recs):
"""
Read MARCXML file and return list of 980 $a values found in that file.
Useful for checking rights.
"""
from invenio.bibrecord import record_get_field_values
collection_tag = run_sql("SELECT value FROM tag, field_tag, field \
WHERE tag.id=field_tag.id_tag AND \
field_tag.id_field=field.id AND \
field.code='collection'")
collection_tag = collection_tag[0][0]
dbcollids = {}
for rec, dummy1, dummy2 in recs:
if rec:
for tag980 in record_get_field_values(rec,
tag=collection_tag[:3],
ind1=collection_tag[3],
ind2=collection_tag[4],
code=collection_tag[5]):
dbcollids[tag980] = 1
return dbcollids.keys()
def _detect_collections_from_marcxml_file(recs):
"""
Extract all possible recIDs from MARCXML file and guess collections
for these recIDs.
"""
from invenio.bibrecord import record_get_field_values
from invenio.search_engine import guess_collection_of_a_record
from invenio.bibupload import find_record_from_sysno, \
find_records_from_extoaiid, \
find_record_from_oaiid
dbcollids = {}
sysno_tag = CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG
oaiid_tag = CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG
oai_tag = CFG_OAI_ID_FIELD
for rec, dummy1, dummy2 in recs:
if rec:
for tag001 in record_get_field_values(rec, '001'):
collection = guess_collection_of_a_record(int(tag001))
dbcollids[collection] = 1
for tag_sysno in record_get_field_values(rec, tag=sysno_tag[:3],
ind1=sysno_tag[3],
ind2=sysno_tag[4],
code=sysno_tag[5]):
record = find_record_from_sysno(tag_sysno)
if record:
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
for tag_oaiid in record_get_field_values(rec, tag=oaiid_tag[:3],
ind1=oaiid_tag[3],
ind2=oaiid_tag[4],
code=oaiid_tag[5]):
try:
records = find_records_from_extoaiid(tag_oaiid)
except Error:
records = []
if records:
record = records.pop()
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
for tag_oai in record_get_field_values(rec, tag=oai_tag[0:3],
ind1=oai_tag[3],
ind2=oai_tag[4],
code=oai_tag[5]):
record = find_record_from_oaiid(tag_oai)
if record:
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
return dbcollids.keys()
def _transform_input_to_marcxml(file_input=""):
"""
Takes text-marc as input and transforms it
to MARCXML.
"""
# Create temporary file to read from
tmp_fd, filename = tempfile.mkstemp(dir=CFG_TMPSHAREDDIR)
os.write(tmp_fd, file_input)
os.close(tmp_fd)
try:
# Redirect output, transform, restore old references
old_stdout = sys.stdout
new_stdout = StringIO()
sys.stdout = new_stdout
transform_file(filename)
finally:
sys.stdout = old_stdout
return new_stdout.getvalue()
def _log(msg, logfile="webupload.log"):
"""
Log MSG into LOGFILE with timestamp.
"""
filedesc = open(CFG_LOGDIR + "/" + logfile, "a")
filedesc.write(time.strftime("%Y-%m-%d %H:%M:%S") + " --> " + msg + "\n")
filedesc.close()
return
def _write(req, msg):
"""
Write MSG to the output stream for the end user.
"""
req.write(msg + "\n")
return
|
codewarrior0/Shiboken
|
tests/py3kcompat.py
|
# -*- coding: utf-8 -*-
#
# This file is part of the Shiboken Python Bindings Generator project.
#
# Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
#
# Contact: PySide team <contact@pyside.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# version 2.1 as published by the Free Software Foundation. Please
# review the following information to ensure the GNU Lesser General
# Public License version 2.1 requirements will be met:
# http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
# #
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
import sys
IS_PY3K = sys.version_info[0] == 3
if IS_PY3K:
def unicode(s):
return s
def b(s):
return bytes(s, "UTF8")
def l(n):
return n
long = int
else:
def b(s):
return s
def l(n):
return long(n)
unicode = unicode
long = long
|
Mellthas/quodlibet
|
quodlibet/qltk/tagsfrompath.py
|
# Copyright 2004-2005 Joe Wreschnig, Michael Urman, Iñigo Serna
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import re
import os
from gi.repository import Gtk
from senf import fsn2text
import quodlibet
from quodlibet import _
from quodlibet import config
from quodlibet import qltk
from quodlibet import util
from quodlibet.formats import AudioFileError
from quodlibet.plugins import PluginManager
from quodlibet.qltk._editutils import FilterPluginBox, FilterCheckButton
from quodlibet.qltk._editutils import EditingPluginHandler, OverwriteWarning
from quodlibet.qltk._editutils import WriteFailedError
from quodlibet.qltk.wlw import WritingWindow
from quodlibet.qltk.views import TreeViewColumn
from quodlibet.qltk.cbes import ComboBoxEntrySave
from quodlibet.qltk.models import ObjectStore
from quodlibet.qltk import Icons
from quodlibet.util.tagsfrompath import TagsFromPattern
from quodlibet.util.string.splitters import split_value
from quodlibet.util import connect_obj
from quodlibet.plugins.editing import TagsFromPathPlugin
TBP = os.path.join(quodlibet.get_user_dir(), "lists", "tagpatterns")
TBP_EXAMPLES = """\
<tracknumber>. <title>
<tracknumber> - <title>
<tracknumber> - <artist> - <title>
<artist> - <album>/<tracknumber>. <title>
<artist>/<album>/<tracknumber> - <title>"""
class UnderscoresToSpaces(FilterCheckButton):
_label = _("Replace _underscores with spaces")
_section = "tagsfrompath"
_key = "underscores"
_order = 1.0
def filter(self, tag, value):
return value.replace("_", " ")
class TitleCase(FilterCheckButton):
_label = _("_Title-case tags")
_section = "tagsfrompath"
_key = "titlecase"
_order = 1.1
def filter(self, tag, value):
return util.title(value)
class SplitTag(FilterCheckButton):
_label = _("Split into multiple _values")
_section = "tagsfrompath"
_key = "split"
_order = 1.2
def filter(self, tag, value):
spls = config.gettext("editing", "split_on")
spls = spls.split()
return "\n".join(split_value(value, spls))
class TagsFromPathPluginHandler(EditingPluginHandler):
Kind = TagsFromPathPlugin
class ListEntry:
def __init__(self, song):
self.song = song
self.matches = {}
def get_match(self, key):
return self.matches.get(key, u"")
def replace_match(self, key, value):
self.matches[key] = value
@property
def name(self):
return fsn2text(self.song("~basename"))
class TagsFromPath(Gtk.VBox):
title = _("Tags From Path")
FILTERS = [UnderscoresToSpaces, TitleCase, SplitTag]
handler = TagsFromPathPluginHandler()
@classmethod
def init_plugins(cls):
PluginManager.instance.register_handler(cls.handler)
def __init__(self, parent, library):
super().__init__(spacing=6)
self.set_border_width(12)
hbox = Gtk.HBox(spacing=6)
cbes_defaults = TBP_EXAMPLES.split("\n")
self.combo = ComboBoxEntrySave(TBP, cbes_defaults,
title=_("Path Patterns"),
edit_title=_(u"Edit saved patterns…"))
self.combo.show_all()
hbox.pack_start(self.combo, True, True, 0)
self.preview = qltk.Button(_("_Preview"), Icons.VIEW_REFRESH)
self.preview.show()
hbox.pack_start(self.preview, False, True, 0)
self.pack_start(hbox, False, True, 0)
self.combo.get_child().connect('changed', self._changed)
model = ObjectStore()
self.view = Gtk.TreeView(model=model)
self.view.show()
sw = Gtk.ScrolledWindow()
sw.set_shadow_type(Gtk.ShadowType.IN)
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
sw.add(self.view)
self.pack_start(sw, True, True, 0)
vbox = Gtk.VBox()
addreplace = Gtk.ComboBoxText()
addreplace.append_text(_("Tags replace existing ones"))
addreplace.append_text(_("Tags are added to existing ones"))
addreplace.set_active(config.getboolean("tagsfrompath", "add"))
addreplace.connect('changed', self.__add_changed)
vbox.pack_start(addreplace, True, True, 0)
addreplace.show()
self.pack_start(vbox, False, True, 0)
filter_box = FilterPluginBox(self.handler, self.FILTERS)
filter_box.connect("preview", self.__filter_preview)
filter_box.connect("changed", self.__filter_changed)
self.filter_box = filter_box
self.pack_start(filter_box, False, True, 0)
# Save button
self.save = qltk.Button(_("_Save"), Icons.DOCUMENT_SAVE)
self.save.show()
bbox = Gtk.HButtonBox()
bbox.set_layout(Gtk.ButtonBoxStyle.END)
bbox.pack_start(self.save, True, True, 0)
self.pack_start(bbox, False, True, 0)
connect_obj(self.preview, 'clicked', self.__preview, None)
connect_obj(parent, 'changed', self.__class__.__preview, self)
# Save changes
connect_obj(self.save, 'clicked', self.__save, addreplace, library)
for child in self.get_children():
child.show()
def __filter_preview(self, *args):
Gtk.Button.clicked(self.preview)
def __filter_changed(self, *args):
self._changed(self.combo.get_child())
def _changed(self, entry):
self.save.set_sensitive(False)
self.preview.set_sensitive(bool(entry.get_text()))
def __add_changed(self, combo):
config.set("tagsfrompath", "add", str(bool(combo.get_active())))
def __preview(self, songs):
if songs is None:
songs = [row[0].song for row in (self.view.get_model() or [])]
if songs:
pattern_text = self.combo.get_child().get_text()
else:
pattern_text = ""
try:
pattern = TagsFromPattern(pattern_text)
except re.error:
qltk.ErrorMessage(
self, _("Invalid pattern"),
_("The pattern\n\t<b>%s</b>\nis invalid. "
"Possibly it contains the same tag twice or "
"it has unbalanced brackets (< / >).") % (
util.escape(pattern_text))).run()
return
else:
if pattern_text:
self.combo.prepend_text(pattern_text)
self.combo.write(TBP)
invalid = []
for header in pattern.headers:
if not min([song.can_change(header) for song in songs]):
invalid.append(header)
if len(invalid) and songs:
if len(invalid) == 1:
title = _("Invalid tag")
msg = _("Invalid tag <b>%s</b>\n\nThe files currently"
" selected do not support editing this tag.")
else:
title = _("Invalid tags")
msg = _("Invalid tags <b>%s</b>\n\nThe files currently"
" selected do not support editing these tags.")
qltk.ErrorMessage(
self, title, msg % ", ".join(invalid)).run()
pattern = TagsFromPattern("")
self.view.set_model(None)
model = ObjectStore()
for col in self.view.get_columns():
self.view.remove_column(col)
render = Gtk.CellRendererText()
col = TreeViewColumn(title=_('File'))
col.pack_start(render, True)
col.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
def cell_data_file(column, cell, model, iter_, data):
entry = model.get_value(iter_)
cell.set_property("text", entry.name)
col.set_cell_data_func(render, cell_data_file)
def cell_data_header(column, cell, model, iter_, header):
entry = model.get_value(iter_)
cell.set_property("text", entry.get_match(header))
self.view.append_column(col)
for i, header in enumerate(pattern.headers):
render = Gtk.CellRendererText()
render.set_property('editable', True)
render.connect('edited', self.__row_edited, model, header)
escaped_title = header.replace("_", "__")
col = Gtk.TreeViewColumn(escaped_title, render)
col.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
col.set_cell_data_func(render, cell_data_header, header)
self.view.append_column(col)
for song in songs:
entry = ListEntry(song)
match = pattern.match(song)
for h in pattern.headers:
text = match.get(h, '')
for f in self.filter_box.filters:
if f.active:
text = f.filter(h, text)
if not song.can_multiple_values(h):
text = u", ".join(text.split("\n"))
entry.matches[h] = text
model.append([entry])
# save for last to potentially save time
if songs:
self.view.set_model(model)
self.preview.set_sensitive(False)
self.save.set_sensitive(len(pattern.headers) > 0)
def __save(self, addreplace, library):
pattern_text = self.combo.get_child().get_text()
pattern = TagsFromPattern(pattern_text)
model = self.view.get_model()
add = bool(addreplace.get_active())
win = WritingWindow(self, len(model))
win.show()
was_changed = set()
all_done = False
for entry in ((model and model.values()) or []):
song = entry.song
changed = False
if not song.valid():
win.hide()
dialog = OverwriteWarning(self, song)
resp = dialog.run()
win.show()
if resp != OverwriteWarning.RESPONSE_SAVE:
break
for i, h in enumerate(pattern.headers):
text = entry.get_match(h)
if text:
can_multiple = song.can_multiple_values(h)
if not add or h not in song or not can_multiple:
song[h] = text
changed = True
else:
for val in text.split("\n"):
if val not in song.list(h):
song.add(h, val)
changed = True
if changed:
try:
song.write()
except AudioFileError:
util.print_exc()
WriteFailedError(self, song).run()
library.reload(song, changed=was_changed)
break
was_changed.add(song)
if win.step():
break
else:
all_done = True
win.destroy()
library.changed(was_changed)
self.save.set_sensitive(not all_done)
def __row_edited(self, renderer, path, new, model, header):
entry = model[path][0]
if entry.get_match(header) != new:
entry.replace_match(header, new)
self.preview.set_sensitive(True)
self.save.set_sensitive(True)
|
jazzmes/pyroute2
|
pyroute2/netlink/rtnl/ifaddrmsg.py
|
import socket
from pyroute2.common import map_namespace
from pyroute2.netlink import nlmsg
from pyroute2.netlink import nla
# address attributes
#
# Important comment:
# For IPv4, IFA_ADDRESS is a prefix address, not a local interface
# address. It makes no difference for normal interfaces, but
# for point-to-point ones IFA_ADDRESS means DESTINATION address,
# and the local address is supplied in IFA_LOCAL attribute.
#
IFA_F_SECONDARY = 0x01
# IFA_F_TEMPORARY IFA_F_SECONDARY
IFA_F_NODAD = 0x02
IFA_F_OPTIMISTIC = 0x04
IFA_F_DADFAILED = 0x08
IFA_F_HOMEADDRESS = 0x10
IFA_F_DEPRECATED = 0x20
IFA_F_TENTATIVE = 0x40
IFA_F_PERMANENT = 0x80
IFA_F_MANAGETEMPADDR = 0x100
IFA_F_NOPREFIXROUTE = 0x200
(IFA_F_NAMES, IFA_F_VALUES) = map_namespace('IFA_F', globals())
# 8<----------------------------------------------
IFA_F_TEMPORARY = IFA_F_SECONDARY
IFA_F_NAMES['IFA_F_TEMPORARY'] = IFA_F_TEMPORARY
IFA_F_VALUES6 = IFA_F_VALUES
IFA_F_VALUES6[IFA_F_TEMPORARY] = 'IFA_F_TEMPORARY'
# 8<----------------------------------------------
class ifaddrmsg(nlmsg):
'''
IP address information
struct ifaddrmsg {
unsigned char ifa_family; /* Address type */
unsigned char ifa_prefixlen; /* Prefixlength of address */
unsigned char ifa_flags; /* Address flags */
unsigned char ifa_scope; /* Address scope */
int ifa_index; /* Interface index */
};
'''
prefix = 'IFA_'
fields = (('family', 'B'),
('prefixlen', 'B'),
('flags', 'B'),
('scope', 'B'),
('index', 'I'))
nla_map = (('IFA_UNSPEC', 'hex'),
('IFA_ADDRESS', 'ipaddr'),
('IFA_LOCAL', 'ipaddr'),
('IFA_LABEL', 'asciiz'),
('IFA_BROADCAST', 'ipaddr'),
('IFA_ANYCAST', 'ipaddr'),
('IFA_CACHEINFO', 'cacheinfo'),
('IFA_MULTICAST', 'ipaddr'),
('IFA_FLAGS', 'uint32'))
class cacheinfo(nla):
fields = (('ifa_prefered', 'I'),
('ifa_valid', 'I'),
('cstamp', 'I'),
('tstamp', 'I'))
@staticmethod
def flags2names(flags, family=socket.AF_INET):
if family == socket.AF_INET6:
ifa_f_values = IFA_F_VALUES6
else:
ifa_f_values = IFA_F_VALUES
ret = []
for f in ifa_f_values:
if f & flags:
ret.append(ifa_f_values[f])
return ret
@staticmethod
def names2flags(flags):
ret = 0
for f in flags:
if f[0] == '!':
f = f[1:]
else:
ret |= IFA_F_NAMES[f]
return ret
|
pragmatux/systemd
|
src/python-systemd/journal.py
|
# -*- Mode: python; coding:utf-8; indent-tabs-mode: nil -*- */
#
# This file is part of systemd.
#
# Copyright 2012 David Strauss <david@davidstrauss.net>
# Copyright 2012 Zbigniew Jędrzejewski-Szmek <zbyszek@in.waw.pl>
# Copyright 2012 Marti Raudsepp <marti@juffo.org>
#
# systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# systemd is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with systemd; If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import sys as _sys
import datetime as _datetime
import uuid as _uuid
import traceback as _traceback
import os as _os
import logging as _logging
if _sys.version_info >= (3,3):
from collections import ChainMap as _ChainMap
from syslog import (LOG_EMERG, LOG_ALERT, LOG_CRIT, LOG_ERR,
LOG_WARNING, LOG_NOTICE, LOG_INFO, LOG_DEBUG)
from ._journal import __version__, sendv, stream_fd
from ._reader import (_Reader, NOP, APPEND, INVALIDATE,
LOCAL_ONLY, RUNTIME_ONLY, SYSTEM_ONLY,
_get_catalog)
from . import id128 as _id128
if _sys.version_info >= (3,):
from ._reader import Monotonic
else:
Monotonic = tuple
def _convert_monotonic(m):
return Monotonic((_datetime.timedelta(microseconds=m[0]),
_uuid.UUID(bytes=m[1])))
def _convert_source_monotonic(s):
return _datetime.timedelta(microseconds=int(s))
def _convert_realtime(t):
return _datetime.datetime.fromtimestamp(t / 1000000)
def _convert_timestamp(s):
return _datetime.datetime.fromtimestamp(int(s) / 1000000)
if _sys.version_info >= (3,):
def _convert_uuid(s):
return _uuid.UUID(s.decode())
else:
_convert_uuid = _uuid.UUID
DEFAULT_CONVERTERS = {
'MESSAGE_ID': _convert_uuid,
'_MACHINE_ID': _convert_uuid,
'_BOOT_ID': _convert_uuid,
'PRIORITY': int,
'LEADER': int,
'SESSION_ID': int,
'USERSPACE_USEC': int,
'INITRD_USEC': int,
'KERNEL_USEC': int,
'_UID': int,
'_GID': int,
'_PID': int,
'SYSLOG_FACILITY': int,
'SYSLOG_PID': int,
'_AUDIT_SESSION': int,
'_AUDIT_LOGINUID': int,
'_SYSTEMD_SESSION': int,
'_SYSTEMD_OWNER_UID': int,
'CODE_LINE': int,
'ERRNO': int,
'EXIT_STATUS': int,
'_SOURCE_REALTIME_TIMESTAMP': _convert_timestamp,
'__REALTIME_TIMESTAMP': _convert_realtime,
'_SOURCE_MONOTONIC_TIMESTAMP': _convert_source_monotonic,
'__MONOTONIC_TIMESTAMP': _convert_monotonic,
'COREDUMP': bytes,
'COREDUMP_PID': int,
'COREDUMP_UID': int,
'COREDUMP_GID': int,
'COREDUMP_SESSION': int,
'COREDUMP_SIGNAL': int,
'COREDUMP_TIMESTAMP': _convert_timestamp,
}
_IDENT_LETTER = set('ABCDEFGHIJKLMNOPQRTSUVWXYZ_')
def _valid_field_name(s):
return not (set(s) - _IDENT_LETTER)
class Reader(_Reader):
"""Reader allows the access and filtering of systemd journal
entries. Note that in order to access the system journal, a
non-root user must be in the `systemd-journal` group.
Example usage to print out all informational or higher level
messages for systemd-udevd for this boot:
>>> j = journal.Reader()
>>> j.this_boot()
>>> j.log_level(journal.LOG_INFO)
>>> j.add_match(_SYSTEMD_UNIT="systemd-udevd.service")
>>> for entry in j:
... print(entry['MESSAGE'])
See systemd.journal-fields(7) for more info on typical fields
found in the journal.
"""
def __init__(self, flags=0, path=None, converters=None):
"""Create an instance of Reader, which allows filtering and
return of journal entries.
Argument `flags` sets open flags of the journal, which can be one
of, or ORed combination of constants: LOCAL_ONLY (default) opens
journal on local machine only; RUNTIME_ONLY opens only
volatile journal files; and SYSTEM_ONLY opens only
journal files of system services and the kernel.
Argument `path` is the directory of journal files. Note that
`flags` and `path` are exclusive.
Argument `converters` is a dictionary which updates the
DEFAULT_CONVERTERS to convert journal field values. Field
names are used as keys into this dictionary. The values must
be single argument functions, which take a `bytes` object and
return a converted value. When there's no entry for a field
name, then the default UTF-8 decoding will be attempted. If
the conversion fails with a ValueError, unconverted bytes
object will be returned. (Note that ValueEror is a superclass
of UnicodeDecodeError).
Reader implements the context manager protocol: the journal
will be closed when exiting the block.
"""
super(Reader, self).__init__(flags, path)
if _sys.version_info >= (3,3):
self.converters = _ChainMap()
if converters is not None:
self.converters.maps.append(converters)
self.converters.maps.append(DEFAULT_CONVERTERS)
else:
self.converters = DEFAULT_CONVERTERS.copy()
if converters is not None:
self.converters.update(converters)
def _convert_field(self, key, value):
"""Convert value using self.converters[key]
If `key` is not present in self.converters, a standard unicode
decoding will be attempted. If the conversion (either
key-specific or the default one) fails with a ValueError, the
original bytes object will be returned.
"""
convert = self.converters.get(key, bytes.decode)
try:
return convert(value)
except ValueError:
# Leave in default bytes
return value
def _convert_entry(self, entry):
"""Convert entire journal entry utilising _covert_field"""
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result
def __iter__(self):
"""Part of iterator protocol.
Returns self.
"""
return self
if _sys.version_info >= (3,):
def __next__(self):
"""Part of iterator protocol.
Returns self.get_next().
"""
return self.get_next()
else:
def next(self):
"""Part of iterator protocol.
Returns self.get_next().
"""
return self.get_next()
def add_match(self, *args, **kwargs):
"""Add one or more matches to the filter journal log entries.
All matches of different field are combined in a logical AND,
and matches of the same field are automatically combined in a
logical OR.
Matches can be passed as strings of form "FIELD=value", or
keyword arguments FIELD="value".
"""
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg)
def get_next(self, skip=1):
"""Return the next log entry as a mapping type, currently
a standard dictionary of fields.
Optional skip value will return the `skip`\-th log entry.
Entries will be processed with converters specified during
Reader creation.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._get_realtime()
entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic()
entry['__CURSOR'] = self._get_cursor()
return self._convert_entry(entry)
return dict()
def get_previous(self, skip=1):
"""Return the previous log entry as a mapping type,
currently a standard dictionary of fields.
Optional skip value will return the -`skip`\-th log entry.
Entries will be processed with converters specified during
Reader creation.
Equivalent to get_next(-skip).
"""
return self.get_next(-skip)
def query_unique(self, field):
"""Return unique values appearing in the journal for given `field`.
Note this does not respect any journal matches.
Entries will be processed with converters specified during
Reader creation.
"""
return set(self._convert_field(field, value)
for value in super(Reader, self).query_unique(field))
def wait(self, timeout=None):
"""Wait for a change in the journal. `timeout` is the maximum
time in seconds to wait, or None, to wait forever.
Returns one of NOP (no change), APPEND (new entries have been
added to the end of the journal), or INVALIDATE (journal files
have been added or removed).
"""
us = -1 if timeout is None else int(timeout * 1000000)
return super(Reader, self).wait(us)
def seek_realtime(self, realtime):
"""Seek to a matching journal entry nearest to `realtime` time.
Argument `realtime` must be either an integer unix timestamp
or datetime.datetime instance.
"""
if isinstance(realtime, _datetime.datetime):
realtime = float(realtime.strftime("%s.%f")) * 1000000
return super(Reader, self).seek_realtime(int(realtime))
def seek_monotonic(self, monotonic, bootid=None):
"""Seek to a matching journal entry nearest to `monotonic` time.
Argument `monotonic` is a timestamp from boot in either
seconds or a datetime.timedelta instance. Argument `bootid`
is a string or UUID representing which boot the monotonic time
is reference to. Defaults to current bootid.
"""
if isinstance(monotonic, _datetime.timedelta):
monotonic = monotonic.totalseconds()
monotonic = int(monotonic * 1000000)
if isinstance(bootid, _uuid.UUID):
bootid = bootid.get_hex()
return super(Reader, self).seek_monotonic(monotonic, bootid)
def log_level(self, level):
"""Set maximum log `level` by setting matches for PRIORITY.
"""
if 0 <= level <= 7:
for i in range(level+1):
self.add_match(PRIORITY="%d" % i)
else:
raise ValueError("Log level must be 0 <= level <= 7")
def messageid_match(self, messageid):
"""Add match for log entries with specified `messageid`.
`messageid` can be string of hexadicimal digits or a UUID
instance. Standard message IDs can be found in systemd.id128.
Equivalent to add_match(MESSAGE_ID=`messageid`).
"""
if isinstance(messageid, _uuid.UUID):
messageid = messageid.get_hex()
self.add_match(MESSAGE_ID=messageid)
def this_boot(self, bootid=None):
"""Add match for _BOOT_ID equal to current boot ID or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid').
"""
if bootid is None:
bootid = _id128.get_boot().hex
else:
bootid = getattr(bootid, 'hex', bootid)
self.add_match(_BOOT_ID=bootid)
def this_machine(self, machineid=None):
"""Add match for _MACHINE_ID equal to the ID of this machine.
If specified, machineid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_MACHINE_ID='machineid').
"""
if machineid is None:
machineid = _id128.get_machine().hex
else:
machineid = getattr(machineid, 'hex', machineid)
self.add_match(_MACHINE_ID=machineid)
def get_catalog(mid):
if isinstance(mid, _uuid.UUID):
mid = mid.get_hex()
return _get_catalog(mid)
def _make_line(field, value):
if isinstance(value, bytes):
return field.encode('utf-8') + b'=' + value
else:
return field + '=' + value
def send(MESSAGE, MESSAGE_ID=None,
CODE_FILE=None, CODE_LINE=None, CODE_FUNC=None,
**kwargs):
r"""Send a message to the journal.
>>> journal.send('Hello world')
>>> journal.send('Hello, again, world', FIELD2='Greetings!')
>>> journal.send('Binary message', BINARY=b'\xde\xad\xbe\xef')
Value of the MESSAGE argument will be used for the MESSAGE=
field. MESSAGE must be a string and will be sent as UTF-8 to
the journal.
MESSAGE_ID can be given to uniquely identify the type of
message. It must be a string or a uuid.UUID object.
CODE_LINE, CODE_FILE, and CODE_FUNC can be specified to
identify the caller. Unless at least on of the three is given,
values are extracted from the stack frame of the caller of
send(). CODE_FILE and CODE_FUNC must be strings, CODE_LINE
must be an integer.
Additional fields for the journal entry can only be specified
as keyword arguments. The payload can be either a string or
bytes. A string will be sent as UTF-8, and bytes will be sent
as-is to the journal.
Other useful fields include PRIORITY, SYSLOG_FACILITY,
SYSLOG_IDENTIFIER, SYSLOG_PID.
"""
args = ['MESSAGE=' + MESSAGE]
if MESSAGE_ID is not None:
id = getattr(MESSAGE_ID, 'hex', MESSAGE_ID)
args.append('MESSAGE_ID=' + id)
if CODE_LINE == CODE_FILE == CODE_FUNC == None:
CODE_FILE, CODE_LINE, CODE_FUNC = \
_traceback.extract_stack(limit=2)[0][:3]
if CODE_FILE is not None:
args.append('CODE_FILE=' + CODE_FILE)
if CODE_LINE is not None:
args.append('CODE_LINE={:d}'.format(CODE_LINE))
if CODE_FUNC is not None:
args.append('CODE_FUNC=' + CODE_FUNC)
args.extend(_make_line(key, val) for key, val in kwargs.items())
return sendv(*args)
def stream(identifier, priority=LOG_DEBUG, level_prefix=False):
r"""Return a file object wrapping a stream to journal.
Log messages written to this file as simple newline sepearted
text strings are written to the journal.
The file will be line buffered, so messages are actually sent
after a newline character is written.
>>> stream = journal.stream('myapp')
>>> stream
<open file '<fdopen>', mode 'w' at 0x...>
>>> stream.write('message...\n')
will produce the following message in the journal::
PRIORITY=7
SYSLOG_IDENTIFIER=myapp
MESSAGE=message...
Using the interface with print might be more convinient:
>>> from __future__ import print_function
>>> print('message...', file=stream)
priority is the syslog priority, one of `LOG_EMERG`,
`LOG_ALERT`, `LOG_CRIT`, `LOG_ERR`, `LOG_WARNING`,
`LOG_NOTICE`, `LOG_INFO`, `LOG_DEBUG`.
level_prefix is a boolean. If true, kernel-style log priority
level prefixes (such as '<1>') are interpreted. See
sd-daemon(3) for more information.
"""
fd = stream_fd(identifier, priority, level_prefix)
return _os.fdopen(fd, 'w', 1)
class JournalHandler(_logging.Handler):
"""Journal handler class for the Python logging framework.
Please see the Python logging module documentation for an
overview: http://docs.python.org/library/logging.html.
To create a custom logger whose messages go only to journal:
>>> log = logging.getLogger('custom_logger_name')
>>> log.propagate = False
>>> log.addHandler(journal.JournalHandler())
>>> log.warn("Some message: %s", detail)
Note that by default, message levels `INFO` and `DEBUG` are
ignored by the logging framework. To enable those log levels:
>>> log.setLevel(logging.DEBUG)
To redirect all logging messages to journal regardless of where
they come from, attach it to the root logger:
>>> logging.root.addHandler(journal.JournalHandler())
For more complex configurations when using `dictConfig` or
`fileConfig`, specify `systemd.journal.JournalHandler` as the
handler class. Only standard handler configuration options
are supported: `level`, `formatter`, `filters`.
To attach journal MESSAGE_ID, an extra field is supported:
>>> import uuid
>>> mid = uuid.UUID('0123456789ABCDEF0123456789ABCDEF')
>>> log.warn("Message with ID", extra={'MESSAGE_ID': mid})
Fields to be attached to all messages sent through this
handler can be specified as keyword arguments. This probably
makes sense only for SYSLOG_IDENTIFIER and similar fields
which are constant for the whole program:
>>> journal.JournalHandler(SYSLOG_IDENTIFIER='my-cool-app')
The following journal fields will be sent:
`MESSAGE`, `PRIORITY`, `THREAD_NAME`, `CODE_FILE`, `CODE_LINE`,
`CODE_FUNC`, `LOGGER` (name as supplied to getLogger call),
`MESSAGE_ID` (optional, see above), `SYSLOG_IDENTIFIER` (defaults
to sys.argv[0]).
"""
def __init__(self, level=_logging.NOTSET, **kwargs):
super(JournalHandler, self).__init__(level)
for name in kwargs:
if not _valid_field_name(name):
raise ValueError('Invalid field name: ' + name)
if 'SYSLOG_IDENTIFIER' not in kwargs:
kwargs['SYSLOG_IDENTIFIER'] = _sys.argv[0]
self._extra = kwargs
def emit(self, record):
"""Write record as journal event.
MESSAGE is taken from the message provided by the
user, and PRIORITY, LOGGER, THREAD_NAME,
CODE_{FILE,LINE,FUNC} fields are appended
automatically. In addition, record.MESSAGE_ID will be
used if present.
"""
try:
msg = self.format(record)
pri = self.mapPriority(record.levelno)
mid = getattr(record, 'MESSAGE_ID', None)
send(msg,
MESSAGE_ID=mid,
PRIORITY=format(pri),
LOGGER=record.name,
THREAD_NAME=record.threadName,
CODE_FILE=record.pathname,
CODE_LINE=record.lineno,
CODE_FUNC=record.funcName,
**self._extra)
except Exception:
self.handleError(record)
@staticmethod
def mapPriority(levelno):
"""Map logging levels to journald priorities.
Since Python log level numbers are "sparse", we have
to map numbers in between the standard levels too.
"""
if levelno <= _logging.DEBUG:
return LOG_DEBUG
elif levelno <= _logging.INFO:
return LOG_INFO
elif levelno <= _logging.WARNING:
return LOG_WARNING
elif levelno <= _logging.ERROR:
return LOG_ERR
elif levelno <= _logging.CRITICAL:
return LOG_CRIT
else:
return LOG_ALERT
|
tardyp/buildbot
|
master/buildbot/db/steps.py
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import json
import sqlalchemy as sa
from twisted.internet import defer
from buildbot.db import base
from buildbot.util import epoch2datetime
class StepsConnectorComponent(base.DBConnectorComponent):
# Documentation is in developer/db.rst
url_lock = None
@defer.inlineCallbacks
def getStep(self, stepid=None, buildid=None, number=None, name=None):
tbl = self.db.model.steps
if stepid is not None:
wc = (tbl.c.id == stepid)
else:
if buildid is None:
raise RuntimeError('must supply either stepid or buildid')
if number is not None:
wc = (tbl.c.number == number)
elif name is not None:
wc = (tbl.c.name == name)
else:
raise RuntimeError('must supply either number or name')
wc = wc & (tbl.c.buildid == buildid)
def thd(conn):
q = self.db.model.steps.select(whereclause=wc)
res = conn.execute(q)
row = res.fetchone()
rv = None
if row:
rv = self._stepdictFromRow(row)
res.close()
return rv
return (yield self.db.pool.do(thd))
# returns a Deferred that returns a value
def getSteps(self, buildid):
def thd(conn):
tbl = self.db.model.steps
q = tbl.select()
q = q.where(tbl.c.buildid == buildid)
q = q.order_by(tbl.c.number)
res = conn.execute(q)
return [self._stepdictFromRow(row) for row in res.fetchall()]
return self.db.pool.do(thd)
# returns a Deferred that returns a value
def addStep(self, buildid, name, state_string):
def thd(conn):
tbl = self.db.model.steps
# get the highest current number
r = conn.execute(sa.select([sa.func.max(tbl.c.number)],
whereclause=(tbl.c.buildid == buildid)))
number = r.scalar()
number = 0 if number is None else number + 1
# note that there is no chance for a race condition here,
# since only one master is inserting steps. If there is a
# conflict, then the name is likely already taken.
insert_row = dict(buildid=buildid, number=number,
started_at=None, complete_at=None,
state_string=state_string,
urls_json='[]', name=name)
try:
r = conn.execute(self.db.model.steps.insert(), insert_row)
got_id = r.inserted_primary_key[0]
except (sa.exc.IntegrityError, sa.exc.ProgrammingError):
got_id = None
if got_id:
return (got_id, number, name)
# we didn't get an id, so calculate a unique name and use that
# instead. Because names are truncated at the right to fit in a
# 50-character identifier, this isn't a simple query.
res = conn.execute(sa.select([tbl.c.name],
whereclause=((tbl.c.buildid == buildid))))
names = {row[0] for row in res}
num = 1
while True:
numstr = '_%d' % num
newname = name[:50 - len(numstr)] + numstr
if newname not in names:
break
num += 1
insert_row['name'] = newname
r = conn.execute(self.db.model.steps.insert(), insert_row)
got_id = r.inserted_primary_key[0]
return (got_id, number, newname)
return self.db.pool.do(thd)
@defer.inlineCallbacks
def startStep(self, stepid):
started_at = int(self.master.reactor.seconds())
def thd(conn):
tbl = self.db.model.steps
q = tbl.update(whereclause=(tbl.c.id == stepid))
conn.execute(q, started_at=started_at)
yield self.db.pool.do(thd)
# returns a Deferred that returns None
def setStepStateString(self, stepid, state_string):
def thd(conn):
tbl = self.db.model.steps
q = tbl.update(whereclause=(tbl.c.id == stepid))
conn.execute(q, state_string=state_string)
return self.db.pool.do(thd)
def addURL(self, stepid, name, url, _racehook=None):
# This methods adds an URL to the db
# This is a read modify write and thus there is a possibility
# that several urls are added at the same time (e.g with a deferredlist
# at the end of a step)
# this race condition is only inside the same master, as only one master
# is supposed to add urls to a buildstep.
# so threading.lock is used, as we are in the thread pool
if self.url_lock is None:
# this runs in reactor thread, so no race here..
self.url_lock = defer.DeferredLock()
def thd(conn):
tbl = self.db.model.steps
wc = (tbl.c.id == stepid)
q = sa.select([tbl.c.urls_json],
whereclause=wc)
res = conn.execute(q)
row = res.fetchone()
if _racehook is not None:
_racehook()
urls = json.loads(row.urls_json)
url_item = dict(name=name, url=url)
if url_item not in urls:
urls.append(url_item)
q = tbl.update(whereclause=wc)
conn.execute(q, urls_json=json.dumps(urls))
return self.url_lock.run(lambda: self.db.pool.do(thd))
# returns a Deferred that returns None
def finishStep(self, stepid, results, hidden):
def thd(conn):
tbl = self.db.model.steps
q = tbl.update(whereclause=(tbl.c.id == stepid))
conn.execute(q,
complete_at=int(self.master.reactor.seconds()),
results=results,
hidden=1 if hidden else 0)
return self.db.pool.do(thd)
def _stepdictFromRow(self, row):
return dict(
id=row.id,
number=row.number,
name=row.name,
buildid=row.buildid,
started_at=epoch2datetime(row.started_at),
complete_at=epoch2datetime(row.complete_at),
state_string=row.state_string,
results=row.results,
urls=json.loads(row.urls_json),
hidden=bool(row.hidden))
|
ElectroCode/lurklib
|
lurklib/connection.py
|
# This file is part of Lurklib.
# Copyright (C) 2011 LK-
#
# Lurklib is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lurklib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Lurklib. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
class _Connection(object):
def _connect(self, server, port, tls=True, tls_verify=True, proxy=False,
proxy_type='SOCKS5', proxy_server=None,
proxy_port=None, proxy_username=None, proxy_password=None):
"""
Connects the socket to an IRC server.
Required arguments:
* server - Server to connect to.
* port - Port to use.
Optional arguments:
* tls=True - Should we use TLS/SSL?
* tls_verify=True - Verify the TLS certificate?
Only works with Python 3.
* proxy=False - Should we use a proxy?
* proxy_type='SOCKS5' - Proxy type: SOCKS5, SOCKS4 or HTTP
* proxy_server=None - Proxy server's address
* proxy_port=None - Proxy server's port
* proxy_username=None - If SOCKS5 is used,
a proxy username/password can be specified.
* proxy_password=None - If SOCKS5 is used,
a proxy username/password can be specified.
"""
with self.lock:
if proxy:
if proxy_type == 'SOCKS5':
proxy_type = self._m_proxy.PROXY_TYPE_SOCKS5
elif proxy_type == 'SOCKS4':
proxy_type = self._m_proxy.PROXY_TYPE_SOCKS4
elif proxy_type == 'HTTP':
proxy_type = self._m_proxy.PROXY_TYPE_HTTP
self._socket = self._m_proxy.socksocket()
self._socket.setproxy(proxytype=proxy_type, \
addr=proxy_server, \
port=proxy_port, \
username=proxy_username, \
password=proxy_password)
if tls:
if tls_verify:
ca_bundle = self._m_tempfile.NamedTemporaryFile().name
with open(ca_bundle, 'w') as bundle_file:
bundle_file.write(self._ca_bundle)
cert_required = self._m_tls.CERT_REQUIRED
self._socket = \
self._m_tls.wrap_socket(self._socket, \
cert_reqs=cert_required, \
ca_certs=ca_bundle)
self._socket.connect((server, port))
self._m_tls.match_hostname(self._socket.getpeercert(), \
server)
return None
else:
self._socket = self._m_tls.wrap_socket(self._socket)
self._socket.connect((server, port))
def _register(self, nick, user, real_name, password=None):
"""
Register the connection with the IRC server.
Required arguments:
* nick - Nick to use. If a tuple/list is specified -
it will try to use the first,
and if the first is already used -
it will try to use the second and so on.
* user - Username to use.
* real_name - Real name to use.
Optional arguments:
* password=None - IRC server password.
"""
with self.lock:
if password:
self._password(password)
self.nick(nick)
self._user(user, real_name)
def _init(self, server, nick, user, real_name, password, port=None,
tls=True, tls_verify=True,
proxy=False, proxy_type='SOCKS5', proxy_server=None,
proxy_port=None, proxy_username=None, proxy_password=None):
"""
Connect and register with the IRC server and -
set server-related information variables.
Required arguments:
* server - Server to connect to.
* nick - Nick to use.
If a tuple/list is specified it will try to use the first,
and if the first is already used -
it will try to use the second and so on.
* user - Username to use.
* real_name - Real name to use.
* password=None - IRC server password.
Optional arguments:
* port - Port to use.
* tls=True - Should we use TLS/SSL?
* tls_verify=True - Verify the TLS certificate?
Only works with Python 3.
* proxy=False - Should we use a proxy?
* proxy_type='SOCKS5' - Proxy type: SOCKS5, SOCKS4 or HTTP
* proxy_server=None - Proxy server's address
* proxy_port=None - Proxy server's port
* proxy_username=None - If SOCKS5 is used,
a proxy username/password can be specified.
* proxy_password=None - If SOCKS5 is used,
a proxy username/password can be specified.
"""
with self.lock:
self.current_nick = nick
if tls:
if not port:
port = 6697
self._connect(server, port, tls, tls_verify, proxy, \
proxy_type, proxy_server, proxy_port, \
proxy_username, proxy_password)
else:
if not port:
port = 6667
self._connect(server, port, tls, tls_verify, proxy, \
proxy_type, proxy_server, proxy_port, \
proxy_username, proxy_password)
while self.readable(2):
data = self.recv()
if data[0] == 'NOTICE':
self.server = data[1][0]
self.con_msg.append(data)
self._register(nick, user, real_name, password)
while self.readable(timeout=4):
rdata = self.recv()
if rdata[0] == 'UNKNOWN':
data = rdata[1][3].replace(':', '', 1)
ncode = rdata[1][1]
if ncode == '004':
info = data.split()
self.server = info[0]
self.ircd = info[1]
self.umodes = info[2]
self.cmodes = info[3]
elif ncode == '005':
version = rdata[1][3].replace(':are supported' + \
'by this server', '')
version = version.split()
for info in version:
try:
info = info.split('=')
name = info[0]
value = info[1]
self.version[name] = value
if name == 'CHARSET':
self.encoding = value
except IndexError:
self.version[info[0]] = True
elif ncode == '376':
self.con_msg.append(rdata)
break
elif ncode == '422':
self.con_msg.append(rdata)
break
else:
if rdata[0] == 'NOTICE':
self.server = rdata[1][0]
self.con_msg.append(rdata[1])
self.motd = tuple(self.motd)
self.con_msg = tuple(self.con_msg)
self.connected = True
self.keep_going = \
True
def _password(self, password):
"""
Authenticates with the IRC server.
NOTE: Method will not raise an exception,
if the password is wrong. It will just fail..
Required arguments:
* password - Password to send.
"""
with self.lock:
self.send('PASS :%s' % password, error_check=True)
def _nick(self, nick):
"""
Sets your nick.
Required arguments:
* nick - New nick.
"""
with self.lock:
self.send('NICK :%s' % nick)
if self.readable():
msg = self._recv(expected_replies='NICK')
if msg[0] == 'NICK':
if not self.hide_called_events:
self.stepback()
for channel in self.channels:
if 'USERS' in self.channels[channel]:
priv_level = \
self.channels[channel]['USERS'][self.current_nick]
del self.channels[channel]['USERS'][self.current_nick]
self.channels[channel]['USERS'][nick] = priv_level
self.current_nick = nick
def nick(self, nick):
"""
Sets your nick.
Required arguments:
* nick - New nick or a tuple of possible new nicks.
"""
nick_set_successfully = False
try:
self._nick(nick)
nick_set_successfully = True
except TypeError:
for nick_ in nick:
try:
self._nick(nick_)
nick_set_successfully = True
break
except self.NicknameInUse:
pass
if not nick_set_successfully:
self.exception('433')
def _user(self, user, real_name):
"""
Sends the USER message.
Required arguments:
* user - Username to send.
* real_name - Real name to send.
"""
with self.lock:
self.send('USER %s 0 * :%s' % (user, real_name))
if self.readable():
self._recv()
self.stepback()
def oper(self, name, password):
"""
Opers up.
Required arguments:
* name - Oper name.
* password - Oper password.
"""
with self.lock:
self.send('OPER %s %s' % (name, password))
snomasks = ''
new_umodes = ''
if self.readable():
msg = self._recv(expected_replies=( \
'MODE', '381', '008'))
if msg[0] == 'MODE':
new_umodes = msg[2].replace(':', '', 1)
elif msg[0] == '381':
return new_umodes, snomasks
elif msg[0] == '008':
snomasks = msg[2].split('(')[1].split(')')[0]
def umode(self, nick, modes=''):
"""
Sets/gets user modes.
Required arguments:
* nick - Nick to set/get user modes for.
Optional arguments:
* modes='' - Sets these user modes on a nick.
"""
with self.lock:
if not modes:
self.send('MODE %s' % nick)
if self.readable():
msg = self._recv(expected_replies=('221',))
if msg[0] == '221':
modes = msg[2].replace('+', '').replace(':', '', 1)
return modes
self.send('MODE %s %s' % (nick, modes))
if self.readable():
msg = self._recv(expected_replies=('MODE',))
if msg[0] == 'MODE':
if not self.hide_called_events:
self.stepback()
return msg[2].replace(':', '', 1)
def service(self):
""" Not implemented. """
raise self.NotImplemented('LurklibError: NotImplemented')
def _quit(self, reason=''):
"""
Sends a QUIT message to the server.
Optional arguments:
* reason='' - Reason for quitting.
"""
with self.lock:
self.send('QUIT :%s' % reason)
def quit(self, reason=''):
"""
Sends a QUIT message, closes the connection and -
ends Lurklib's main loop.
Optional arguments:
* reason='' - Reason for quitting.
"""
with self.lock:
self.keep_going = False
self._quit(reason)
self._socket.shutdown(self._m_socket.SHUT_RDWR)
self._socket.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
""" For use with the Python 'with' statement. """
with self.lock:
self.quit()
def squit(self, server, reason=''):
"""
Quits a server.
Required arguments:
* server - Server to quit.
Optional arguments:
* reason='' - Reason for the server quitting.
"""
with self.lock:
self.send('SQUIT %s :%s' % (server, reason))
while self.readable():
msg = self._recv(expected_replies=('SQUIT',))
if msg[0] == 'SQUIT':
if not self.hide_called_events:
self.stepback()
def latency(self):
""" Checks the connection latency. """
with self.lock:
self.send('PING %s' % self.server)
ctime = self._m_time.time()
msg = self._recv(expected_replies=('PONG',))
if msg[0] == 'PONG':
latency = self._m_time.time() - ctime
return latency
|
Gustry/inasafe
|
safe/metadata/generic_layer_metadata.py
|
# -*- coding: utf-8 -*-
"""
InaSAFE Disaster risk assessment tool developed by AusAid -
**metadata module.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'marco@opengis.ch'
__revision__ = '$Format:%H$'
__date__ = '27/05/2015'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
from xml.etree import ElementTree
from safe.metadata import BaseMetadata
from safe.metadata.utilities import reading_ancillary_files, prettify_xml
class GenericLayerMetadata(BaseMetadata):
"""
Base class for generic layers such as hazard, exposure and aggregation.
This class can be subclassed so you can create only a minimal
concrete class that implements only _standard_properties to add specific
properties. You can also add a standard XML property that applies to all
subclasses here. In both cases do it as explained below. @property and
@propname.setter will be generated automatically
_standard_properties = {
'TESTprop': (
'gmd:identificationInfo/'
'gmd:MD_DataIdentification/'
'gmd:supplementalInformation/'
'gco:CharacterString')
}
from safe.metadata.utils import merge_dictionaries
_standard_properties = merge_dictionaries(
# change BaseMetadata to GenericLayerMetadata in subclasses
BaseMetadata._standard_properties, _standard_properties)
.. versionadded:: 3.2
"""
def __init__(self, layer_uri, xml_uri=None, json_uri=None):
"""
Constructor
:param layer_uri: uri of the layer for which the metadata ae
:type layer_uri: str
:param xml_uri: uri of an xml file to use
:type xml_uri: str
:param json_uri: uri of a json file to use
:type json_uri: str
"""
# initialize base class
super(GenericLayerMetadata, self).__init__(
layer_uri, xml_uri, json_uri)
@property
def dict(self):
"""
calls the overridden method
:return: dictionary representation of the metadata
:rtype: dict
"""
return super(GenericLayerMetadata, self).dict
@property
def json(self):
"""
calls the overridden method
:return: json representation of the metadata
:rtype: str
"""
return super(GenericLayerMetadata, self).json
@property
def xml(self):
"""
calls the overridden method
:return: xml representation of the metadata
:rtype: str
"""
root = super(GenericLayerMetadata, self).xml
return prettify_xml(ElementTree.tostring(root))
def read_json(self):
"""
calls the overridden method
:return: the read metadata
:rtype: dict
"""
with reading_ancillary_files(self):
metadata = super(GenericLayerMetadata, self).read_json()
return metadata
def read_xml(self):
"""
calls the overridden method
:return: the read metadata
:rtype: ElementTree.Element
"""
with reading_ancillary_files(self):
root = super(GenericLayerMetadata, self).read_xml()
return root
def update_report(self):
"""
update the report.
"""
# TODO (MB): implement this by reading the kw and definitions
self.report = self.report
raise NotImplementedError()
|
ron1818/Singaboat_RobotX2016
|
robotx_nav/nodes/move_base_force_cancel.py
|
#! /usr/bin/env python
import rospy
import actionlib
from move_base_msgs.msg import MoveBaseActionGoal
from actionlib_msgs.msg import GoalID
class ForceCancel(object):
def __init__(self, nodename="force_cancel", is_newnode=True, repetition=10):
self.repetition = rospy.get_param("~repetition", repetition)
if is_newnode:
rospy.init_node(name=nodename, anonymous=False)
rospy.on_shutdown(self.shutdown)
pub = rospy.Publisher("move_base/cancel", GoalID, queue_size=1)
sub = rospy.Subscriber("move_base/goal", MoveBaseActionGoal, self.callback, queue_size=1)
rospy.wait_for_message("move_base/goal", MoveBaseActionGoal, 60)
r = rospy.Rate(1)
counter = 0
while not rospy.is_shutdown() and (counter < self.repetition):
msg = GoalID()
msg.id = self.id
pub.publish(msg)
r.sleep()
counter += 1
def callback(self, msg):
self.id = msg.goal_id.id
def shutdown(self):
rospy.loginfo("cancel job finished")
rospy.sleep(1)
pass
if __name__ == "__main__":
fc = ForceCancel('force_cancel', False, 5)
|
Baloc/TouSIX-Manager
|
tousix_manager/Administration/adminsite.py
|
# Copyright 2015 Rémy Lapeyrade <remy at lapeyrade dot net>
# Copyright 2015 LAAS-CNRS
#
#
# This file is part of TouSIX-Manager.
#
# TouSIX-Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# TouSIX-Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with TouSIX-Manager. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.admin import AdminSite
class TouSIXAdmin(AdminSite):
"""
Special admin site, created for display widgets in the main panel.
"""
site_header = "TouIX - Administration de TouSIX"
site_title = "TouIX"
index_template = "index_touSIX.html"
admin_tousix = TouSIXAdmin(name='Administration')
|
stephane-caron/pymanoid
|
pymanoid/ik.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2020 Stephane Caron <stephane.caron@normalesup.org>
#
# This file is part of pymanoid <https://github.com/stephane-caron/pymanoid>.
#
# pymanoid is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# pymanoid is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# pymanoid. If not, see <http://www.gnu.org/licenses/>.
from numpy import dot, eye, hstack, maximum, minimum, ones, sqrt, vstack, zeros
from threading import Lock
from .misc import norm
from .qpsolvers import solve_qp
from .sim import Process
from .tasks import AxisAngleContactTask, ContactTask, DOFTask, PoseTask
RANK_DEFICIENCY_MSG = "rank deficiency in IK problem, " \
"did you add a regularization task?"
class IKSolver(Process):
"""
Compute velocities bringing the system closer to fulfilling a set of tasks.
Parameters
----------
robot : Robot
Robot to be updated.
active_dofs : list of integers, optional
List of DOFs updated by the IK solver.
doflim_gain : scalar, optional
DOF-limit gain as described in [Kanoun12]_. In `this implementation
<https://scaron.info/teaching/inverse-kinematics.html>`_, it should be
between zero and one.
Attributes
----------
doflim_gain : scalar, optional
DOF-limit gain as described in [Kanoun12]_. In `this implementation
<https://scaron.info/teaching/inverse-kinematics.html>`_, it should be
between zero and one.
lm_damping : scalar
Add Levenberg-Marquardt damping as described in [Sugihara11]_. This
damping significantly improves numerical stability, but convergence
gets slower when its value is too high.
slack_dof_limits : bool
Add slack variables to maximize DOF range? This method is used in
[Nozawa16]_ to keep joint angles as far away from their limits as
possible. It slows down computations as there are twice as many
optimization variables, but is more numerically stable and won't
produce inconsistent constraints. Defaults to False.
slack_maximize : scalar
Linear cost weight applied when ``slack_dof_limits`` is True.
slack_regularize : scalar
Regularization weight applied when ``slack_dof_limits`` is True.
qd : array
Velocity returned by last solver call.
robot : pymanoid.Robot
Robot model.
tasks : dict
Dictionary of active IK tasks, indexed by task name.
Notes
-----
One unsatisfactory aspect of the DOF-limit gain is that it slows down the
robot when approaching DOF limits. For instance, it may slow down a foot
motion when approaching the knee singularity, despite the robot being able
to move faster with a fully extended knee.
"""
DEFAULT_GAINS = {
'COM': 0.85,
'CONTACT': 0.85,
'DOF': 0.85,
'MIN_ACCEL': 0.85,
'MIN_CAM': 0.85,
'MIN_VEL': 0.85,
'PENDULUM': 0.85,
'POSE': 0.85,
'POSTURE': 0.85,
}
DEFAULT_WEIGHTS = {
'CONTACT': 1.,
'COM': 1e-2,
'POSE': 1e-3,
'MIN_ACCEL': 1e-4,
'MIN_CAM': 1e-4,
'DOF': 1e-5,
'POSTURE': 1e-6,
'MIN_VEL': 1e-6,
}
def __init__(self, robot, active_dofs=None, doflim_gain=0.5):
super(IKSolver, self).__init__()
if active_dofs is None:
active_dofs = range(robot.nb_dofs)
assert 0. <= doflim_gain <= 1.
self.__lock = Lock()
self.doflim_gain = doflim_gain
self.interaction_dist = 0.1 # [rad]
self.lm_damping = 1e-3
self.qd = zeros(robot.nb_dofs)
self.robot = robot
self.safety_dist = 0.01 # [rad]
self.slack_dof_limits = False
self.slack_maximize = 1e-3
self.slack_regularize = 1e-5
self.tasks = {}
self.verbosity = 0
#
self.set_active_dofs(active_dofs)
def clear(self):
"""
Clear all tasks in the IK solver.
"""
self.tasks = {}
def set_active_dofs(self, active_dofs):
"""
Set DOF indices modified by the IK.
Parameters
----------
active_dofs : list of integers
List of DOF indices.
"""
self.active_dofs = active_dofs
self.nb_active_dofs = len(active_dofs)
self.__reset_dof_limits()
def __reset_dof_limits(self):
"""
Read DOF position, velocity and acceleration limits from robot model.
"""
self.q_max = self.robot.q_max[self.active_dofs]
self.q_min = self.robot.q_min[self.active_dofs]
self.qd_lim = self.robot.qd_lim[self.active_dofs]
if self.robot.qdd_lim is not None:
self.qdd_lim = self.robot.qdd_lim[self.active_dofs]
else: # robot model has no joint acceleration limit
self.qdd_lim = None
def set_gains(self, gains):
"""
Set task gains from a dictionary.
Parameters
----------
gains : string -> double dictionary
Dictionary mapping task labels to default gain values.
"""
for (name, gain) in gains.iteritems():
self.tasks[name].gain = gain
def set_weights(self, weights):
"""
Set task weights from a dictionary.
Parameters
----------
weights : string -> double dictionary
Dictionary mapping task labels to default weight values.
"""
for (name, weight) in weights.iteritems():
self.tasks[name].weight = weight
def __fill_gain(self, task):
if task.name in self.DEFAULT_GAINS:
task.gain = self.DEFAULT_GAINS[task.name]
elif type(task) in [AxisAngleContactTask, ContactTask]:
task.gain = self.DEFAULT_GAINS['CONTACT']
elif type(task) is DOFTask:
task.gain = self.DEFAULT_GAINS['DOF']
elif type(task) is PoseTask:
task.gain = self.DEFAULT_GAINS['POSE']
else: # task type is not accounted for
raise Exception("no gain provided for task '%s'" % task.name)
def __fill_weight(self, task):
if task.name in self.DEFAULT_WEIGHTS:
task.weight = self.DEFAULT_WEIGHTS[task.name]
elif type(task) in [AxisAngleContactTask, ContactTask]:
task.weight = self.DEFAULT_WEIGHTS['CONTACT']
elif type(task) is DOFTask:
task.weight = self.DEFAULT_WEIGHTS['DOF']
elif type(task) is PoseTask:
task.weight = self.DEFAULT_WEIGHTS['POSE']
else: # task type is not accounted for
raise Exception("no weight provided for task '%s'" % task.name)
def add(self, task):
"""
Add a new task to the IK solver.
Parameters
----------
task : Task
New task to add to the list.
"""
if task.name in self.tasks:
raise Exception("Task '%s' already present in IK" % task.name)
if task.gain is None:
self.__fill_gain(task)
if task.weight is None:
self.__fill_weight(task)
with self.__lock:
self.tasks[task.name] = task
def print_costs(self, qd, dt):
"""
Print task costs for the current IK step.
Parameters
----------
qd : array
Robot DOF velocities.
dt : scalar
Timestep for the IK.
"""
print("\n TASK COST")
print("------------------------------")
for task in self.tasks.itervalues():
J = task.jacobian()
r = task.residual(dt)
print("%20s %.2e" % (task.name, norm(dot(J, qd) - r)))
print("")
def remove(self, ident):
"""
Remove a task.
Parameters
----------
ident : string or object
Name or object with a ``name`` field identifying the task.
"""
name = ident if type(ident) is str else ident.name
with self.__lock:
if name not in self.tasks:
return
del self.tasks[name]
def compute_cost(self, dt):
"""
Compute the IK cost of the present system state for a time step of dt.
Parameters
----------
dt : scalar
Time step in [s].
"""
return sum(task.cost(dt) for task in self.tasks.itervalues())
def build_qp_matrices(self, dt):
"""
Build matrices of the quatratic program.
Parameters
----------
dt : scalar
Time step in [s].
Returns
-------
P : (n, n) array
Positive semi-definite cost matrix.
q : array
Cost vector.
qd_max : array
Maximum joint velocity vector.
qd_min : array
Minimum joint velocity vector.
Notes
-----
When the robot model has joint acceleration limits, special care should
be taken when computing the corresponding velocity bounds for the IK.
In short, the robot now needs to avoid the velocity range where it (1)
is not going to collide with a DOF limit in one iteration but (2)
cannot brake fast enough to avoid a collision in the future due to
acceleration limits. This function implements the solution to this
problem described in Equation (14) of [Flacco15]_.
"""
n = self.nb_active_dofs
P = zeros((n, n))
v = zeros(n)
with self.__lock:
for task in self.tasks.itervalues():
J = task.jacobian()[:, self.active_dofs]
r = task.residual(dt)
mu = self.lm_damping * max(1e-3, dot(r, r))
P += task.weight * (dot(J.T, J) + mu * eye(n))
v += task.weight * dot(-r.T, J)
q = self.robot.q[self.active_dofs]
qd_max_doflim = (self.q_max - q) / dt
qd_min_doflim = (self.q_min - q) / dt
qd_max = minimum(+self.qd_lim, self.doflim_gain * qd_max_doflim)
qd_min = maximum(-self.qd_lim, self.doflim_gain * qd_min_doflim)
if self.qdd_lim is not None: # straightforward acceleration bounds
qd = self.robot.qd[self.active_dofs]
qd_max_acc = qd + self.qdd_lim * dt
qd_min_acc = qd - self.qdd_lim * dt
qd_max = minimum(qd_max, qd_max_acc)
qd_min = maximum(qd_min, qd_min_acc)
if self.qdd_lim is not None: # DOF-limit acceleration bounds
Delta_q_max = maximum(self.q_max - q, 1e-32)
Delta_q_min = maximum(q - self.q_min, 1e-32)
qd_max_doflim_acc = +sqrt(2 * self.qdd_lim * Delta_q_max)
qd_min_doflim_acc = -sqrt(2 * self.qdd_lim * Delta_q_min)
qd_max = minimum(qd_max, self.doflim_gain * qd_max_doflim_acc)
qd_min = maximum(qd_min, self.doflim_gain * qd_min_doflim_acc)
return (P, v, qd_max, qd_min)
def compute_velocity(self, dt):
"""
Compute a new velocity satisfying all tasks at best.
Parameters
----------
dt : scalar
Time step in [s].
Returns
-------
qd : array
Vector of active joint velocities.
Note
----
This QP formulation is the default for
:func:`pymanoid.ik.IKSolver.solve` (posture generation) as it converges
faster.
Notes
-----
The method implemented in this function is reasonably fast but may
become unstable when some tasks are widely infeasible. In such
situations, you can either increase the Levenberg-Marquardt bias
``self.lm_damping`` or set ``slack_dof_limits=True`` which will call
:func:`pymanoid.ik.IKSolver.compute_velocity_with_slack`.
The returned velocity minimizes squared residuals as in the weighted
cost function, which corresponds to the Gauss-Newton algorithm. Indeed,
expanding the square expression in ``cost(task, qd)`` yields
.. math::
\\mathrm{minimize} \\ \\dot{q} J^T J \\dot{q} - 2 r^T J \\dot{q}
Differentiating with respect to :math:`\\dot{q}` shows that the minimum
is attained for :math:`J^T J \\dot{q} = r`, where we recognize the
Gauss-Newton update rule.
"""
n = self.nb_active_dofs
P, v, qd_max, qd_min = self.build_qp_matrices(dt)
G = vstack([+eye(n), -eye(n)])
h = hstack([qd_max, -qd_min])
try:
x = solve_qp(P, v, G, h)
self.qd[self.active_dofs] = x
except ValueError as e:
if "matrix G is not positive definite" in e:
raise Exception(RANK_DEFICIENCY_MSG)
raise
return self.qd
def compute_velocity_with_slack(self, dt):
"""
Compute a new velocity satisfying all tasks at best, while trying to
stay away from kinematic constraints.
Parameters
----------
dt : scalar
Time step in [s].
Returns
-------
qd : array
Vector of active joint velocities.
Note
----
This QP formulation is the default for
:func:`pymanoid.ik.IKSolver.step` as it has a more numerically-stable
behavior.
Notes
-----
Check out the discussion of this method around Equation (10) of
[Nozawa16]_. DOF limits are better taken care of by slack variables,
but the variable count doubles and the QP takes roughly 50% more time
to solve.
"""
n = self.nb_active_dofs
E, Z = eye(n), zeros((n, n))
P0, v0, qd_max, qd_min = self.build_qp_matrices(dt)
P = vstack([hstack([P0, Z]), hstack([Z, self.slack_regularize * E])])
v = hstack([v0, -self.slack_maximize * ones(n)])
G = vstack([
hstack([+E, +E / dt]), hstack([-E, +E / dt]), hstack([Z, -E])])
h = hstack([qd_max, -qd_min, zeros(n)])
try:
x = solve_qp(P, v, G, h)
self.qd[self.active_dofs] = x[:n]
except ValueError as e:
if "matrix G is not positive definite" in e:
raise Exception(RANK_DEFICIENCY_MSG)
raise
return self.qd
def step(self, dt):
"""
Apply velocities computed by inverse kinematics.
Parameters
----------
dt : scalar
Time step in [s].
"""
q = self.robot.q
if self.slack_dof_limits:
qd = self.compute_velocity_with_slack(dt)
else: # default QP formulation
qd = self.compute_velocity(dt)
if self.verbosity >= 2:
self.print_costs(qd, dt)
self.robot.set_dof_values(q + qd * dt, clamp=True)
self.robot.set_dof_velocities(qd)
def solve(self, max_it=1000, cost_stop=1e-10, impr_stop=1e-5, dt=1e-2,
warm_start=False, debug=False):
"""
Compute joint-angles that satisfy all kinematic constraints at best.
Parameters
----------
max_it : integer
Maximum number of solver iterations.
cost_stop : scalar
Stop when cost value is below this threshold.
impr_stop : scalar, optional
Stop when cost improvement (relative variation from one iteration
to the next) is less than this threshold.
dt : scalar, optional
Time step in [s].
warm_start : bool, optional
Set to True if the current robot posture is a good guess for IK.
Otherwise, the solver will start by an exploration phase with DOF
velocity limits relaxed and no Levenberg-Marquardt damping.
debug : bool, optional
Set to True for additional debug messages.
Returns
-------
nb_it : int
Number of solver iterations.
cost : scalar
Final value of the cost function.
Notes
-----
Good values of `dt` depend on the weights of the IK tasks. Small values
make convergence slower, while big values make the optimization
unstable (in which case there may be no convergence at all).
"""
cost = 100000.
init_lm_damping = self.lm_damping
init_slack_dof_limits = self.slack_dof_limits
exploration_phase = not warm_start
if exploration_phase:
self.lm_damping = 0
self.slack_dof_limits = False
self.qd_lim = 10. * self.robot.qd_lim[self.active_dofs]
self.qdd_lim = None
for itnum in range(max_it):
prev_cost = cost
cost = self.compute_cost(dt)
impr = abs(cost - prev_cost) / prev_cost
if debug or self.verbosity >= 1:
print("%2d: %.3e (impr: %+.2e)" % (itnum, cost, impr))
if abs(cost) < cost_stop or impr < impr_stop:
break
if exploration_phase and (itnum >= max_it / 2 or impr < 1e-2):
exploration_phase = False
self.lm_damping = init_lm_damping
self.slack_dof_limits = init_slack_dof_limits
self.qd_lim = self.robot.qd_lim[self.active_dofs]
self.step(dt)
self.lm_damping = init_lm_damping
self.slack_dof_limits = init_slack_dof_limits
self.__reset_dof_limits()
self.robot.set_dof_velocities(zeros(self.robot.qd.shape))
return 1 + itnum, cost
def on_tick(self, sim):
"""
Step the IK at each simulation tick.
Parameters
----------
sim : Simulation
Simulation instance.
"""
self.step(sim.dt)
|
iCarto/siga
|
extScripting/scripts/jython/Lib/xml/dom/html/HTMLStyleElement.py
|
########################################################################
#
# File Name: HTMLStyleElement
#
# Documentation: http://docs.4suite.com/4DOM/HTMLStyleElement.html
#
### This file is automatically generated by GenerateHtml.py.
### DO NOT EDIT!
"""
WWW: http://4suite.com/4DOM e-mail: support@4suite.com
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import string
from xml.dom import Node
from xml.dom.html.HTMLElement import HTMLElement
class HTMLStyleElement(HTMLElement):
def __init__(self, ownerDocument, nodeName="STYLE"):
HTMLElement.__init__(self, ownerDocument, nodeName)
### Attribute Methods ###
def _get_disabled(self):
return self.hasAttribute("DISABLED")
def _set_disabled(self, value):
if value:
self.setAttribute("DISABLED", "DISABLED")
else:
self.removeAttribute("DISABLED")
def _get_media(self):
return self.getAttribute("MEDIA")
def _set_media(self, value):
self.setAttribute("MEDIA", value)
def _get_type(self):
return self.getAttribute("TYPE")
def _set_type(self, value):
self.setAttribute("TYPE", value)
### Attribute Access Mappings ###
_readComputedAttrs = HTMLElement._readComputedAttrs.copy()
_readComputedAttrs.update({
"disabled" : _get_disabled,
"media" : _get_media,
"type" : _get_type
})
_writeComputedAttrs = HTMLElement._writeComputedAttrs.copy()
_writeComputedAttrs.update({
"disabled" : _set_disabled,
"media" : _set_media,
"type" : _set_type
})
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
HTMLElement._readOnlyAttrs + _readComputedAttrs.keys())
|
356255531/SpikingDeepRLControl
|
code/EnvBo/Q-Learning/Testing_Arm_4points/q_networks.py
|
#!/usr/bin/python
import numpy as np
import os
import sys
from keras.layers import Activation, Dense, Input
from keras.layers.normalization import BatchNormalization
from keras.models import Model, Sequential
from keras.optimizers import RMSprop
NUM_OF_HIDDEN_NEURONS = 100
QNETWORK_NAME = 'online_network'
TARGETNET_NAME = 'target_network'
TAU = 0.0001 # soft update / low pass filter
class QNetworks:
def __init__(self, num_of_actions, num_of_states, num_of_hidden_neurons=NUM_OF_HIDDEN_NEURONS, tau=TAU):
self.NUM_OF_ACTIONS = num_of_actions
self.NUM_OF_HIDDEN_NEURONS = num_of_hidden_neurons
self.NUM_OF_STATES = num_of_states
self.TAU = tau
self.online_net = self.init_model(QNETWORK_NAME)
self.target_net = self.init_model(QNETWORK_NAME)
def do_soft_update(self):
weights = self.online_net.get_weights()
target_weights = self.target_net.get_weights()
for i in xrange(len(weights)):
target_weights[i] = self.TAU*weights[i] + (1.0-self.TAU)*target_weights[i]
self.target_net.set_weights(target_weights)
return
def do_hard_update(self):
weights = self.online_net.get_weights()
target_weights = self.target_net.get_weights()
for i in xrange(len(weights)):
target_weights[i] = weights[i]
self.target_net.set_weights(target_weights)
return
def get_weights(self):
# get weights of the online Q network
return self.online_net.get_weights()
def init_model(self, net_name):
model = Sequential()
model.add(Dense(self.NUM_OF_HIDDEN_NEURONS, input_shape=(self.NUM_OF_STATES,)))
model.add(Activation('relu'))
model.add(Dense(self.NUM_OF_HIDDEN_NEURONS))
model.add(Activation('relu'))
model.add(Dense(self.NUM_OF_HIDDEN_NEURONS))
model.add(Activation('relu'))
model.add(Dense(self.NUM_OF_ACTIONS))
model.add(Activation('linear'))
model.compile(loss='mse', optimizer='rmsprop')
filename = net_name+'/'+net_name
if os.path.isfile(filename+str(0)+'.txt'):
weights = model.get_weights()
for i in xrange(len(weights)):
loaded_weights = np.loadtxt(filename+str(i)+'.txt')
weights[i] = loaded_weights
model.set_weights(weights)
else:
print 'No model', filename, 'found. Creating a new model.'
return model
def save_models(self):
weights = self.online_net.get_weights()
for i in xrange(len(weights)):
np.savetxt(QNETWORK_NAME+'/'+QNETWORK_NAME+str(i)+'.txt', weights[i])
weights = self.target_net.get_weights()
for i in xrange(len(weights)):
np.savetxt(TARGETNET_NAME+'/'+TARGETNET_NAME+str(i)+'.txt', weights[i])
print("Saved models to disk.")
|
spbguru/repo1
|
nupic/support/__init__.py
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
Internal package.
Package containing modules that are used internally by Numenta Python
tools and plugins to extend standard library functionality.
These modules should NOT be used by client applications.
The following modules are included:
nupic.support.paths
Module containing filesystem path manipulation utilities.
nupic.support.serialization
Module containing Python object serialization (pickling and unpickling) and
versioning utilities.
nupic.support.compress
Module containing Python object encoding and compression utilities.
nupic.support.processes
Module containing operating system process management utilities and wrappers.
nupic.support.output
Module containing operating system interprocess communication utilities and
wrappers.
nupic.support.diff
Module containing file difference calculation wrappers.
nupic.support.vision
Temporary location for vision framework before the move to nupic.vision.
nupic.support.deprecate
Contains the deprecate decorator used for automatic handling of deprecated
methods.
nupic.support.memchecker
Contains the MemChecker class, for checking physical memory and monitoring
memory usage.
nupic.support.imagesearch
Contains functions for searching for images on the web and downloading them.
"""
from __future__ import with_statement
# Standard imports
import os
import sys
import inspect
import logging
import logging.config
import logging.handlers
from platform import python_version
import struct
from StringIO import StringIO
import time
import traceback
from configuration import Configuration
from nupic.support.fshelpers import makeDirectoryFromAbsolutePath
# Local imports
#############################################################################
def getCallerInfo(depth=2):
"""Utility function to get information about function callers
The information is the tuple (function/method name, filename, class)
The class will be None if the caller is just a function and not an object
method.
depth: how far back in the callstack to go to extract the caller info
"""
f = sys._getframe(depth)
method_name = f.f_code.co_name
filename = f.f_code.co_filename
arg_class = None
args = inspect.getargvalues(f)
if len(args[0]) > 0:
arg_name = args[0][0] # potentially the 'self' arg if its a method
arg_class = args[3][arg_name].__class__.__name__
return (method_name, filename, arg_class)
#############################################################################
def title(s=None, additional='', stream=sys.stdout, frame='-'):
"""Utility function to display nice titles
It automatically extracts the name of the function/method it is called from
and you can add additional text. title() will then print the name
of the function/method and the additional text surrounded by tow lines
of dashes. If you don't want the name of the function, you can provide
alternative text (regardless of the additional text)
@param s - text to display, uses the function name and arguments by default
@param additional - extra text to display (not needed if s is not None)
@param stream - the stream to print to. Ny default goes to standard output
@param frame - the character used for the over and under line. Default is '-'
Examples:
def foo():
title()
will display:
---
foo
---
def foo():
title(additional='(), this is cool!!!')
will display:
----------------------
foo(), this is cool!!!
----------------------
def foo():
title('No function name here!')
will display:
----------------------
No function name here!
----------------------
"""
if s is None:
callable_name, file_name, class_name = getCallerInfo(2)
s = callable_name
if class_name is not None:
method_name = s
s = class_name + '.' + callable_name
lines = (s + additional).split('\n')
length = max(len(line) for line in lines)
print >> stream, '-' * length
print >> stream, s + additional
print >> stream, '-' * length
#############################################################################
def bringToFront(title):
"""Bring a top-level window with a given title
to the front on Windows"""
if sys.platform != 'win32':
return
import ctypes
find_window = ctypes.windll.user32.FindWindowA
set_foreground_window = ctypes.windll.user32.SetForegroundWindow
hwnd = find_window(None, title)
if hwnd == 0:
raise Exception('There is no window titled: "%s"' % title)
set_foreground_window(hwnd)
#############################################################################
def getUserDocumentsPath():
"""
Find the user's "Documents" directory (OS X), "My Documents" directory
(Windows), or home directory (Unix).
"""
# OS X and Windows code from:
# http://www.blueskyonmars.com/2005/08/05
# /finding-a-users-my-documents-folder-on-windows/
# Alternate Windows code from:
# http://bugs.python.org/issue1763
if sys.platform.startswith('win'):
if sys.platform.startswith('win32'):
# Try the primary method on 32-bit windows
try:
from win32com.shell import shell
alt = False
except ImportError:
try:
import ctypes
dll = ctypes.windll.shell32
alt = True
except:
raise Exception("Could not find 'My Documents'")
else:
# Use the alternate method on 64-bit Windows
alt = True
if not alt:
# Primary method using win32com
df = shell.SHGetDesktopFolder()
pidl = df.ParseDisplayName(0, None,
"::{450d8fba-ad25-11d0-98a8-0800361b1103}")[1]
path = shell.SHGetPathFromIDList(pidl)
else:
# Alternate method using ctypes rather than win32com
buf = ctypes.create_string_buffer(300)
dll.SHGetSpecialFolderPathA(None, buf, 0x0005, False)
path = buf.value
elif sys.platform.startswith('darwin'):
from Carbon import Folder, Folders
folderref = Folder.FSFindFolder(Folders.kUserDomain,
Folders.kDocumentsFolderType,
False)
path = folderref.as_pathname()
else:
path = os.getenv('HOME')
return path
#############################################################################
def getArgumentDescriptions(f):
"""
Get the arguments, default values, and argument descriptions for a function.
Returns a list of tuples: (argName, argDescription, defaultValue). If an
argument has no default value, the tuple is only two elements long (as None
cannot be used, since it could be a default value itself).
Parses the argument descriptions out of the function docstring, using a
format something lke this:
[junk]
argument_name: description...
description...
description...
[junk]
[more arguments]
It will find an argument as long as the exact argument name starts the line.
It will then strip a trailing colon, if present, then strip the rest of the
line and use it to start the description. It will then strip and append any
subsequent lines with a greater indent level than the original argument name.
"""
# Get the argument names and default values
argspec = inspect.getargspec(f)
# Scan through the docstring to extract documentation for each argument as
# follows:
# Check the first word of the line, stripping a colon if one is present.
# If it matches an argument name:
# Take the rest of the line, stripping leading whitespeace
# Take each subsequent line if its indentation level is greater than the
# initial indentation level
# Once the indentation level is back to the original level, look for
# another argument
docstring = f.__doc__
descriptions = {}
if docstring:
lines = docstring.split('\n')
i = 0
while i < len(lines):
stripped = lines[i].lstrip()
if not stripped:
i += 1
continue
# Indentation level is index of the first character
indentLevel = lines[i].index(stripped[0])
# Get the first word and remove the colon, if present
firstWord = stripped.split()[0]
if firstWord.endswith(':'):
firstWord = firstWord[:-1]
if firstWord in argspec.args:
# Found an argument
argName = firstWord
restOfLine = stripped[len(firstWord)+1:].strip()
argLines = [restOfLine]
# Take the next lines as long as they are indented more
i += 1
while i < len(lines):
stripped = lines[i].lstrip()
if not stripped:
# Empty line - stop
break
if lines[i].index(stripped[0]) <= indentLevel:
# No longer indented far enough - stop
break
# This line counts too
argLines.append(lines[i].strip())
i += 1
# Store this description
descriptions[argName] = ' '.join(argLines)
else:
# Not an argument
i += 1
# Build the list of (argName, description, defaultValue)
args = []
if argspec.defaults:
defaultCount = len(argspec.defaults)
else:
defaultCount = 0
nonDefaultArgCount = len(argspec.args) - defaultCount
for i, argName in enumerate(argspec.args):
if i >= nonDefaultArgCount:
defaultValue = argspec.defaults[i - nonDefaultArgCount]
args.append((argName, descriptions.get(argName, ""), defaultValue))
else:
args.append((argName, descriptions.get(argName, "")))
return args
#############################################################################
# TODO queryNumInwardIters appears to be unused and should probably be deleted
# from here altogether; it's likely an artifact of the legacy vision support.
#def queryNumInwardIters(configPath, radialLength, numRepetitions=1):
# """
# Public utility API that accepts a config path and
# radial length, and determines the proper number of
# training iterations with which to invoke net.run()
# when running a PictureSensor in 'inward' mode.
# """
# numCats = queryNumCategories(configPath)
# sequenceLen = radialLength + 1
# numItersPerCat = (8 * radialLength) * sequenceLen
# numTrainingItersTP = numItersPerCat * numCats
# return numTrainingItersTP * numRepetitions
#############################################################################
gLoggingInitialized = False
def initLogging(verbose=False, console='stdout', consoleLevel='DEBUG'):
"""
Initilize NuPic logging by reading in from the logging configuration file. The
logging configuration file is named 'nupic-logging.conf' and is expected to be
in the format defined by the python logging module.
If the environment variable 'NTA_CONF_PATH' is defined, then the logging
configuration file is expected to be in the NTA_CONF_PATH directory. If
NTA_CONF_PATH is not defined, then it is found in the 'conf/default'
subdirectory of the NuPic installation directory (typically
~/nta/current/conf/default)
The logging configuration file can use the environment variable 'NTA_LOG_DIR'
to set the locations of log files. If this variable is not defined already in
the environment, this method will set it to the 'logs' subdirectory of the
NuPic install directory (typically ~/nta/eng/logs) before loading in the
configuration file.
console: Defines console output for the default "root" logging
configuration; this may be one of 'stdout', 'stderr', or None;
Use None to suppress console logging output
consoleLevel:
Logging-level filter string for console output corresponding to
logging levels in the logging module; may be one of:
'DEBUG', 'INFO', 'WARNING', 'ERROR', or 'CRITICAL'.
E.g., a value of'WARNING' suppresses DEBUG and INFO level output
to console, but allows WARNING, ERROR, and CRITICAL
"""
# NOTE: If you call this twice from the same process there seems to be a
# bug - logged messages don't show up for loggers that you do another
# logging.getLogger() on.
global gLoggingInitialized
if gLoggingInitialized:
if verbose:
print >> sys.stderr, "Logging already initialized, doing nothing."
return
consoleStreamMappings = {
'stdout' : 'stdoutConsoleHandler',
'stderr' : 'stderrConsoleHandler',
}
consoleLogLevels = ['DEBUG', 'INFO', 'WARNING', 'WARN', 'ERROR', 'CRITICAL',
'FATAL']
assert console is None or console in consoleStreamMappings.keys(), (
'Unexpected console arg value: %r') % (console,)
assert consoleLevel in consoleLogLevels, (
'Unexpected consoleLevel arg value: %r') % (consoleLevel)
# -----------------------------------------------------------------------
# Setup logging. Look for the nupic-logging.conf file, first in the
# NTA_CONFIG_DIR path (if defined), then in a subdirectory of the nupic
# module
# TODO: move into nupic.support
configFilename = 'nupic-logging.conf'
try:
configFilePath = Configuration.findConfigFile(configFilename)
except:
configFilePath = None
# If NTA_LOG_DIR is not defined, set it now. This is used by the logging
# config file to set the path for the log files
if 'NTA_LOG_DIR' not in os.environ:
os.environ['NTA_LOG_DIR'] = os.path.join(os.environ['NUPIC'], 'logs')
if not os.path.exists(os.environ['NTA_LOG_DIR']):
makeDirectoryFromAbsolutePath(os.path.abspath(os.environ['NTA_LOG_DIR']))
# Load in the logging configuration file
if configFilePath is None:
print >> sys.stderr, (
"WARNING: Could not find the logging configuration file " \
"(filename: '%s', expected to be in search path: %s). Logging is " \
" disabled.") % (configFilename, Configuration.getConfigPaths())
else:
if verbose:
print >> sys.stderr, (
"Using logging configuration file: %s") % (configFilePath)
# This dict will hold our replacement strings for logging configuration
replacements = dict()
def makeKey(name):
""" Makes replacement key """
return "$$%s$$" % (name)
platform = sys.platform.lower()
if platform.startswith('java'):
# Jython
import java.lang
platform = java.lang.System.getProperty("os.name").lower()
if platform.startswith('mac os x'):
platform = 'darwin'
if platform.startswith('darwin'):
replacements[makeKey('SYSLOG_HANDLER_ADDRESS')] = '"/var/run/syslog"'
elif platform.startswith('linux'):
replacements[makeKey('SYSLOG_HANDLER_ADDRESS')] = '"/dev/log"'
else:
raise RuntimeError("This platform is neither darwin nor linux: %s" % (
sys.platform,))
if False: #os.path.isdir('/var/log/numenta/nupic'):
# NOTE: Not using syslogHandler for now because it either truncates or
# drops messages over ~1,400 bytes (depending on platform)
# Nupic logs go to syslog. Also, SysLogHandler raises an exception
# on jython (at least on 2.5.2): "AttributeError: 'module' object has no
# attribute 'AF_UNIX'" (jython is used by a sub-moduleof
# ClientJobManager)
replacements[makeKey('PERSISTENT_LOG_HANDLER')] = 'syslogHandler'
else:
# Nupic logs go to file
replacements[makeKey('PERSISTENT_LOG_HANDLER')] = 'fileHandler'
# Set up log file path for the default file handler
logFilePath = _genLoggingFilePath()
makeDirectoryFromAbsolutePath(os.path.dirname(logFilePath))
replacements[makeKey('FILE_HANDLER_LOG_FILENAME')] = repr(logFilePath)
# Set up root logger
replacements[makeKey('ROOT_LOGGER_HANDLERS')] = (
replacements[makeKey('PERSISTENT_LOG_HANDLER')])
if console is not None:
replacements[makeKey('ROOT_LOGGER_HANDLERS')] += (
',' + consoleStreamMappings[console])
# Set up log level for console handlers
replacements[makeKey('CONSOLE_LOG_LEVEL')] = consoleLevel
customConfig = StringIO()
with open(configFilePath) as src:
for lineNum, line in enumerate(src):
if "$$" in line:
for (key, value) in replacements.items():
line = line.replace(key, value)
# If there is still a replacement string in the line, we're missing it
# from our replacements dict
if "$$" in line and "$$<key>$$" not in line:
raise RuntimeError(("The text %r, found at line #%d of file %r, "
"contains a string not found in our replacement "
"dict.") % (line, lineNum, configFilePath))
customConfig.write(line)
customConfig.seek(0)
if python_version()[:3] >= '2.6':
# NOTE: the disable_existing_loggers arg is new as of Python 2.6, so it's
# not supported on our jython interperter, which was v2.5.x as of this
# writing
logging.config.fileConfig(customConfig, disable_existing_loggers=False)
else:
logging.config.fileConfig(customConfig)
gLoggingInitialized = True
#############################################################################
def reinitLoggingDir():
""" (Re-)Initialize the loging directory for the calling application that
uses initLogging() for logging configuration
NOTE: It's typially unnecessary to call this function directly since
initLogging takes care of it for you. This function is exposed primarily for
the benefit of nupic-services.py to allow it to restore its logging directory
after the hard-reset operation.
"""
if gLoggingInitialized:
makeDirectoryFromAbsolutePath(os.path.dirname(_genLoggingFilePath()))
#############################################################################
def _genLoggingFilePath():
""" Generate a filepath for the calling app """
appName = os.path.splitext(os.path.basename(sys.argv[0]))[0] or 'UnknownApp'
appLogDir = os.path.abspath(os.path.join(
os.environ['NTA_LOG_DIR'],
'numenta-logs-%s' % (os.environ['USER'],),
appName))
appLogFileName = '%s-%s-%s.log' % (
appName, long(time.mktime(time.gmtime())), os.getpid())
return os.path.join(appLogDir, appLogFileName)
#############################################################################
def enableLoggingErrorDebugging():
""" Overrides the python logging facility's Handler.handleError function to
raise an exception instead of print and suppressing it. This allows a deeper
stacktrace to be emitted that is very helpful for quickly finding the
file/line that initiated the invalidly-formatted logging operation.
NOTE: This is for debugging only - be sure to remove the call to this function
*before* checking in your changes to the source code repository, as it will
cause the application to fail if some invalidly-formatted logging statement
still exists in your code.
Example usage: enableLoggingErrorDebugging must be called *after*
initLogging()
import nupic.support
nupic.support.initLogging()
nupic.support.enableLoggingErrorDebugging()
"TypeError: not all arguments converted during string formatting" is an
example exception that might be output by the built-in handlers with the
following very shallow traceback that doesn't go deep enough to show the
source of the problem:
File ".../python2.6/logging/__init__.py", line 776, in emit
msg = self.format(record)
File ".../python2.6/logging/__init__.py", line 654, in format
return fmt.format(record)
File ".../python2.6/logging/__init__.py", line 436, in format
record.message = record.getMessage()
File ".../python2.6/logging/__init__.py", line 306, in getMessage
msg = msg % self.args
TypeError: not all arguments converted during string formatting
"""
print >> sys.stderr, ("WARNING")
print >> sys.stderr, ("WARNING: "
"nupic.support.enableLoggingErrorDebugging() was "
"called to install a debugging patch into all logging handlers that "
"will cause the program to fail if a logging exception occurrs; this "
"call is for debugging only and MUST be removed before checking in code "
"into production system. Caller: %s") % (
traceback.format_stack(),)
print >> sys.stderr, ("WARNING")
def handleErrorPatch(*args, **kwargs):
if logging.raiseExceptions:
raise
for handler in logging._handlerList:
handler.handleError = handleErrorPatch
return
#############################################################################
def clippedObj(obj, maxElementSize=64):
"""
Return a clipped version of obj suitable for printing, This
is useful when generating log messages by printing data structures, but
don't want the message to be too long.
If passed in a dict, list, or namedtuple, each element of the structure's
string representation will be limited to 'maxElementSize' characters. This
will return a new object where the string representation of each element
has been truncated to fit within maxElementSize.
"""
# Is it a named tuple?
if hasattr(obj, '_asdict'):
obj = obj._asdict()
# Printing a dict?
if isinstance(obj, dict):
objOut = dict()
for key,val in obj.iteritems():
objOut[key] = clippedObj(val)
# Printing a list?
elif hasattr(obj, '__iter__'):
objOut = []
for val in obj:
objOut.append(clippedObj(val))
# Some other object
else:
objOut = str(obj)
if len(objOut) > maxElementSize:
objOut = objOut[0:maxElementSize] + '...'
return objOut
###############################################################################
def intTo8ByteArray(inValue):
"""
Converts an int to a packed byte array, with left most significant byte
"""
values = (
(inValue >> 56 ) & 0xff,
(inValue >> 48 ) & 0xff,
(inValue >> 40 ) & 0xff,
(inValue >> 32 ) & 0xff,
(inValue >> 24 ) & 0xff,
(inValue >> 16 ) & 0xff,
(inValue >> 8 ) & 0xff,
inValue & 0xff
)
s = struct.Struct('B B B B B B B B')
packed_data = s.pack(*values)
return packed_data
###############################################################################
def byteArrayToInt(packed_data):
"""
Converts a byte array into an integer
"""
value = struct.unpack('B B B B B B B B', packed_data)
return value[0] << 56 | \
value[1] << 48 | \
value[2] << 40 | \
value[3] << 32 | \
value[4] << 24 | \
value[5] << 16 | \
value[6] << 8 | \
value[7]
###############################################################################
def getSpecialRowID():
"""
Special row id is 0xFF FFFF FFFF FFFF FFFF (9 bytes of 0xFF)
"""
values = (0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)
s = struct.Struct('B B B B B B B B B')
packed_data = s.pack(*values)
return packed_data
################################################################################
_FLOAT_SECONDS_IN_A_DAY = 24.0 * 60.0 * 60.0
def floatSecondsFromTimedelta(td):
""" Convert datetime.timedelta to seconds in floating point """
sec = (td.days * _FLOAT_SECONDS_IN_A_DAY + td.seconds * 1.0 +
td.microseconds / 1E6)
return sec
#############################################################################
def aggregationToMonthsSeconds(interval):
"""
Return the number of months and seconds from an aggregation dict that
represents a date and time.
Interval is a dict that contain one or more of the following keys: 'years',
'months', 'weeks', 'days', 'hours', 'minutes', seconds', 'milliseconds',
'microseconds'.
Parameters:
---------------------------------------------------------------------
interval: The aggregation interval, as a dict representing a date and time
retval: number of months and seconds in the interval, as a dict:
{months': XX, 'seconds': XX}. The seconds is
a floating point that can represent resolutions down to a
microsecond.
For example:
aggregationMicroseconds({'years': 1, 'hours': 4, 'microseconds':42}) ==
{'months':12, 'seconds':14400.000042}
"""
seconds = interval.get('microseconds', 0) * 0.000001
seconds += interval.get('milliseconds', 0) * 0.001
seconds += interval.get('seconds', 0)
seconds += interval.get('minutes', 0) * 60
seconds += interval.get('hours', 0) * 60 * 60
seconds += interval.get('days', 0) * 24 * 60 * 60
seconds += interval.get('weeks', 0) * 7 * 24 * 60 * 60
months = interval.get('months', 0)
months += 12 * interval.get('years', 0)
return {'months': months, 'seconds': seconds}
#############################################################################
def aggregationDivide(dividend, divisor):
"""
Return the result from dividing two dicts that represent date and time.
Both dividend and divisor are dicts that contain one or more of the following
keys: 'years', 'months', 'weeks', 'days', 'hours', 'minutes', seconds',
'milliseconds', 'microseconds'.
Parameters:
---------------------------------------------------------------------
dividend: The numerator, as a dict representing a date and time
divisor: the denominator, as a dict representing a date and time
retval: number of times divisor goes into dividend, as a floating point
number.
For example:
aggregationDivide({'hours': 4}, {'minutes': 15}) == 16
"""
# Convert each into microseconds
dividendMonthSec = aggregationToMonthsSeconds(dividend)
divisorMonthSec = aggregationToMonthsSeconds(divisor)
# It is a usage error to mix both months and seconds in the same operation
if (dividendMonthSec['months'] != 0 and divisorMonthSec['seconds'] != 0) \
or (dividendMonthSec['seconds'] != 0 and divisorMonthSec['months'] != 0):
raise RuntimeError("Aggregation dicts with months/years can only be "
"inter-operated with other aggregation dicts that contain "
"months/years")
if dividendMonthSec['months'] > 0:
return float(dividendMonthSec['months']) / divisor['months']
else:
return float(dividendMonthSec['seconds']) / divisorMonthSec['seconds']
|
reedloden/ansible
|
lib/ansible/executor/task_queue_manager.py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import multiprocessing
import os
import tempfile
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.executor.process.result import ResultProcess
from ansible.executor.stats import AggregateStats
from ansible.playbook.block import Block
from ansible.playbook.play_context import PlayContext
from ansible.plugins import callback_loader, strategy_loader, module_loader
from ansible.template import Templar
from ansible.vars.hostvars import HostVars
from ansible.plugins.callback import CallbackBase
from ansible.utils.unicode import to_unicode
from ansible.compat.six import string_types
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['TaskQueueManager']
class TaskQueueManager:
'''
This class handles the multiprocessing requirements of Ansible by
creating a pool of worker forks, a result handler fork, and a
manager object with shared datastructures/queues for coordinating
work between all processes.
The queue manager is responsible for loading the play strategy plugin,
which dispatches the Play's tasks to hosts.
'''
def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None, run_additional_callbacks=True, run_tree=False):
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self._options = options
self._stats = AggregateStats()
self.passwords = passwords
self._stdout_callback = stdout_callback
self._run_additional_callbacks = run_additional_callbacks
self._run_tree = run_tree
self._callbacks_loaded = False
self._callback_plugins = []
self._start_at_done = False
self._result_prc = None
# make sure the module path (if specified) is parsed and
# added to the module_loader object
if options.module_path is not None:
for path in options.module_path.split(os.pathsep):
module_loader.add_directory(path)
# a special flag to help us exit cleanly
self._terminated = False
# this dictionary is used to keep track of notified handlers
self._notified_handlers = dict()
# dictionaries to keep track of failed/unreachable hosts
self._failed_hosts = dict()
self._unreachable_hosts = dict()
self._final_q = multiprocessing.Queue()
# A temporary file (opened pre-fork) used by connection
# plugins for inter-process locking.
self._connection_lockfile = tempfile.TemporaryFile()
def _initialize_processes(self, num):
self._workers = []
for i in range(num):
rslt_q = multiprocessing.Queue()
self._workers.append([None, rslt_q])
self._result_prc = ResultProcess(self._final_q, self._workers)
self._result_prc.start()
def _initialize_notified_handlers(self, handlers):
'''
Clears and initializes the shared notified handlers dict with entries
for each handler in the play, which is an empty array that will contain
inventory hostnames for those hosts triggering the handler.
'''
# Zero the dictionary first by removing any entries there.
# Proxied dicts don't support iteritems, so we have to use keys()
for key in self._notified_handlers.keys():
del self._notified_handlers[key]
def _process_block(b):
temp_list = []
for t in b.block:
if isinstance(t, Block):
temp_list.extend(_process_block(t))
else:
temp_list.append(t)
return temp_list
handler_list = []
for handler_block in handlers:
handler_list.extend(_process_block(handler_block))
# then initialize it with the handler names from the handler list
for handler in handler_list:
self._notified_handlers[handler.get_name()] = []
def load_callbacks(self):
'''
Loads all available callbacks, with the exception of those which
utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout',
only one such callback plugin will be loaded.
'''
if self._callbacks_loaded:
return
stdout_callback_loaded = False
if self._stdout_callback is None:
self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK
if isinstance(self._stdout_callback, CallbackBase):
stdout_callback_loaded = True
elif isinstance(self._stdout_callback, string_types):
if self._stdout_callback not in callback_loader:
raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback)
else:
self._stdout_callback = callback_loader.get(self._stdout_callback)
stdout_callback_loaded = True
else:
raise AnsibleError("callback must be an instance of CallbackBase or the name of a callback plugin")
for callback_plugin in callback_loader.all(class_only=True):
if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0:
# we only allow one callback of type 'stdout' to be loaded, so check
# the name of the current plugin and type to see if we need to skip
# loading this callback plugin
callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', None)
callback_needs_whitelist = getattr(callback_plugin, 'CALLBACK_NEEDS_WHITELIST', False)
(callback_name, _) = os.path.splitext(os.path.basename(callback_plugin._original_path))
if callback_type == 'stdout':
if callback_name != self._stdout_callback or stdout_callback_loaded:
continue
stdout_callback_loaded = True
elif callback_name == 'tree' and self._run_tree:
pass
elif not self._run_additional_callbacks or (callback_needs_whitelist and (C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST)):
continue
self._callback_plugins.append(callback_plugin())
self._callbacks_loaded = True
def run(self, play):
'''
Iterates over the roles/tasks in a play, using the given (or default)
strategy for queueing tasks. The default is the linear strategy, which
operates like classic Ansible by keeping all hosts in lock-step with
a given task (meaning no hosts move on to the next task until all hosts
are done with the current task).
'''
if not self._callbacks_loaded:
self.load_callbacks()
all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
templar = Templar(loader=self._loader, variables=all_vars)
new_play = play.copy()
new_play.post_validate(templar)
self.hostvars = HostVars(
inventory=self._inventory,
variable_manager=self._variable_manager,
loader=self._loader,
)
# Fork # of forks, # of hosts or serial, whichever is lowest
contenders = [self._options.forks, play.serial, len(self._inventory.get_hosts(new_play.hosts))]
contenders = [ v for v in contenders if v is not None and v > 0 ]
self._initialize_processes(min(contenders))
play_context = PlayContext(new_play, self._options, self.passwords, self._connection_lockfile.fileno())
for callback_plugin in self._callback_plugins:
if hasattr(callback_plugin, 'set_play_context'):
callback_plugin.set_play_context(play_context)
self.send_callback('v2_playbook_on_play_start', new_play)
# initialize the shared dictionary containing the notified handlers
self._initialize_notified_handlers(new_play.handlers)
# load the specified strategy (or the default linear one)
strategy = strategy_loader.get(new_play.strategy, self)
if strategy is None:
raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds)
# build the iterator
iterator = PlayIterator(
inventory=self._inventory,
play=new_play,
play_context=play_context,
variable_manager=self._variable_manager,
all_vars=all_vars,
start_at_done = self._start_at_done,
)
# during initialization, the PlayContext will clear the start_at_task
# field to signal that a matching task was found, so check that here
# and remember it so we don't try to skip tasks on future plays
if getattr(self._options, 'start_at_task', None) is not None and play_context.start_at_task is None:
self._start_at_done = True
# and run the play using the strategy and cleanup on way out
play_return = strategy.run(iterator, play_context)
self._cleanup_processes()
return play_return
def cleanup(self):
display.debug("RUNNING CLEANUP")
self.terminate()
self._final_q.close()
self._cleanup_processes()
def _cleanup_processes(self):
if self._result_prc:
self._result_prc.terminate()
for (worker_prc, rslt_q) in self._workers:
rslt_q.close()
if worker_prc and worker_prc.is_alive():
try:
worker_prc.terminate()
except AttributeError:
pass
def clear_failed_hosts(self):
self._failed_hosts = dict()
def get_inventory(self):
return self._inventory
def get_variable_manager(self):
return self._variable_manager
def get_loader(self):
return self._loader
def get_notified_handlers(self):
return self._notified_handlers
def get_workers(self):
return self._workers[:]
def terminate(self):
self._terminated = True
def send_callback(self, method_name, *args, **kwargs):
for callback_plugin in [self._stdout_callback] + self._callback_plugins:
# a plugin that set self.disabled to True will not be called
# see osx_say.py example for such a plugin
if getattr(callback_plugin, 'disabled', False):
continue
# try to find v2 method, fallback to v1 method, ignore callback if no method found
methods = []
for possible in [method_name, 'v2_on_any']:
gotit = getattr(callback_plugin, possible, None)
if gotit is None:
gotit = getattr(callback_plugin, possible.replace('v2_',''), None)
if gotit is not None:
methods.append(gotit)
for method in methods:
try:
# temporary hack, required due to a change in the callback API, so
# we don't break backwards compatibility with callbacks which were
# designed to use the original API
# FIXME: target for removal and revert to the original code here after a year (2017-01-14)
if method_name == 'v2_playbook_on_start':
import inspect
(f_args, f_varargs, f_keywords, f_defaults) = inspect.getargspec(method)
if 'playbook' in f_args:
method(*args, **kwargs)
else:
method()
else:
method(*args, **kwargs)
except Exception as e:
#TODO: add config toggle to make this fatal or not?
display.warning(u"Failure when attempting to use callback plugin (%s): %s" % (to_unicode(callback_plugin), to_unicode(e)))
|
DevinDewitt/pyqt5
|
examples/mainwindows/sdi/sdi_rc.py
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Sun May 12 18:04:51 2013
# by: The Resource Compiler for PyQt (Qt v5.0.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x03\x54\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xd6\xd8\xd4\x4f\x58\x32\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x02\xe6\x49\x44\x41\x54\x58\xc3\xd5\
\x97\xcd\x4e\x13\x61\x14\x86\xeb\x35\x94\x95\x7b\x71\xe1\xd2\xc4\
\xe0\x05\xb8\xe2\x0e\x5c\xb8\xf4\x02\x5c\xb1\x30\xea\x05\x18\x96\
\x26\x62\x58\xb8\xb0\x91\x58\x20\xd1\x9d\xbf\x89\xa4\x14\xb1\x52\
\xa4\x48\x45\x94\xfe\xd0\x02\x43\xff\xa6\x9d\x19\xa6\x65\x80\xe3\
\x79\x7b\xfa\x85\x51\x4a\x82\xc9\x21\x86\x49\xde\x9c\x33\xa7\xf3\
\xcd\xfb\x9c\xf3\x4d\x9b\x4e\x84\x88\x22\xff\x53\x91\x73\x01\xc0\
\xc7\xd5\x90\x6e\xff\xa5\xfb\xac\xc7\x3d\x3d\x64\x0d\xa9\x02\xf0\
\x31\x32\x3c\x3c\xbc\x6a\x34\x3a\x3a\xba\x19\x56\x3c\x1e\xaf\x26\
\x93\xc9\x56\x3a\x9d\x76\x13\x89\x44\x6b\x60\x60\x20\xcd\x6b\x6e\
\x68\x02\xa4\x38\xd2\xe1\xe1\x71\x99\xba\xef\xb7\xc9\xb2\x2c\xda\
\xdf\xdf\x27\x86\xf1\x78\xcd\x18\xeb\x8a\x1a\x40\x3f\xf3\xb0\x1c\
\xc7\xa5\x4c\x66\xb9\x0b\x14\x04\x01\xc5\x62\xb1\x3a\xaf\x7b\x70\
\x1a\x88\x53\x01\x1c\x1c\x10\x77\x77\xb2\x6c\xdb\xa1\xf9\xf9\xcf\
\x64\x0e\xd7\x75\xe9\xf9\xc4\x44\x17\x42\x05\x00\x26\x7b\xc1\xc9\
\xaa\x37\x1c\x4a\xce\xcd\x53\xf8\x70\x5d\x0f\x8b\x17\x54\x00\x82\
\x10\x40\x67\x4f\x14\xce\xed\xa6\x47\x1f\x67\x66\xe9\xf5\x9b\xb7\
\x14\x9f\x9c\xa4\xa9\xa9\x69\x7a\xf7\xfe\x03\x45\xa3\xd1\x65\x5e\
\x7f\x41\x05\xc0\xef\x10\xed\xb6\x25\x86\x85\x9a\xe3\x05\x94\x5d\
\xcd\xd1\xe4\xf4\x2b\x7a\x32\xfe\x94\x9e\xc5\x5e\xd0\x4c\x62\x0e\
\x8b\x17\x55\x00\xda\x81\x18\xf5\x13\x20\x3c\xff\x90\x6a\xcd\x36\
\x15\x37\xab\x94\x2f\x6e\x53\x89\x63\x8d\xb7\x85\xd7\x7e\x51\x01\
\xf0\x79\xcc\xcd\x5d\x1e\xb5\xc7\x7b\xdb\xee\x9f\x3b\xbe\xe4\x88\
\x5d\xb8\xbd\xee\xe2\x94\xca\x33\xe0\x75\xe4\xc6\x75\x57\x62\xd8\
\x10\x39\xea\xe6\x33\x44\xd4\x01\xa7\x06\xe0\xf4\x3a\xad\x39\x22\
\x98\x98\x68\x72\x80\x98\x6b\x50\x53\x9d\x00\x00\x2a\x2d\xb9\x31\
\xe2\x4e\x53\x8c\x10\x0d\x04\xf2\x6d\xfb\x28\xb6\x7c\x45\x00\x9b\
\x3b\xdb\x6a\xfc\x69\x8e\x3c\x6c\x88\x1a\xae\x39\x13\x80\x3a\x8f\
\xb7\x54\x23\x2a\xd7\xc5\x04\x06\x06\x00\x35\x28\x9c\x17\xab\xbc\
\x25\xbb\xca\x13\xc0\x4d\x61\x0e\x15\x2a\x72\x6e\xcc\x7e\x5a\x02\
\x68\x6a\xdd\xad\xf1\x94\x27\x00\x53\xdc\x1c\x71\x6d\x5b\x40\x60\
\x9a\xab\x1c\x75\x9e\xeb\x81\x41\x15\x47\x11\xc0\x6a\x89\x31\x0c\
\xd6\x77\x04\x20\x0c\x64\x26\x62\xb6\x69\x75\x8b\xa8\xaa\x09\x50\
\xb6\xc5\xbc\xd0\x03\xf8\xbe\x29\x63\x87\x29\x60\x0c\x18\x84\x1c\
\x00\x5b\x4d\x45\x00\x74\x03\x53\x98\xad\x94\xc5\x1c\xe7\x46\xe6\
\x1c\x00\xc8\x71\x5d\xa9\xa1\x08\x80\xfd\xfc\x56\x12\x73\x33\x01\
\x08\x35\x18\x42\xe8\xda\x7c\x8e\x29\xa8\x4e\x00\x5b\x00\x03\xc8\
\x98\x67\x36\x04\x00\x32\xe6\x85\xde\xf8\x17\x0b\xfc\x2c\xd8\x8a\
\x00\x18\x67\x3a\x4f\xb4\x54\x14\x23\x98\x02\x00\x02\x0c\x3e\xfb\
\xc5\x53\x28\xf0\x43\xb8\x66\x49\xf7\x6b\xf9\x52\x87\xd7\xbe\x54\
\x01\xc8\x55\x8f\xba\x4e\xad\x4b\x0e\x90\xaf\x85\xde\xb7\xc2\x92\
\x3d\x4f\xa6\xb3\xde\xa3\xb1\x71\xeb\xda\xd0\xf5\x15\x98\xb3\x6e\
\xa9\x00\x6c\x34\xa4\x6b\x18\xff\xe0\x11\x7f\x5a\x17\x53\xd4\x13\
\x0b\x59\x6f\xe4\xee\xbd\xe2\xa5\xc1\xcb\x4b\x7c\x6d\x8c\x75\x87\
\x35\xa8\xfa\xb7\x1c\xdd\x65\xd9\x3c\x8f\x1f\x19\xfe\x9e\xcf\x1e\
\x37\xbd\xc9\xba\x78\x26\x6f\x46\x00\x68\xf2\xff\x81\x99\x94\x9e\
\xe9\x3f\xbf\x19\x01\x42\xd3\xf4\xfc\xbd\x9c\x9e\xa5\x7e\x03\x51\
\x6c\x25\xa1\x92\x95\x0a\x77\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x06\x6d\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x06\x34\x49\x44\x41\x54\x78\x5e\xad\x97\x5b\x6c\x54\xc7\
\x1d\xc6\x7f\x73\xce\xd9\x8b\xbd\xf6\xfa\x16\xa0\xbe\x00\x0e\xb2\
\x69\x63\x24\x42\x4a\x21\x22\xa1\x2d\x95\x62\xa5\x2f\xee\x4b\x68\
\x2b\x95\xa6\x55\xa5\xc6\x60\x55\xaa\xda\xb4\xaa\xfa\x56\x09\x55\
\xca\x03\x94\x27\xda\x07\x84\x14\x29\xad\xc4\x8b\xa5\x52\x83\x79\
\x08\xc5\x18\x39\x0e\x69\xd3\x84\x9a\x9b\x63\x6a\xec\xb2\x04\x1b\
\x3b\xbb\xf6\x7a\x8f\xbd\xbb\xde\xb3\x67\xa6\xc3\x68\x85\xe5\x72\
\x6c\x88\xc9\x27\x7d\xfa\x9f\x9d\x87\xfd\x7e\xf3\x9f\x99\x73\x11\
\x4a\x29\x82\x24\x84\x78\x05\x78\x9e\xc7\x6b\x48\x29\xf5\x77\xd6\
\x28\x27\x20\xb8\x43\xbb\x01\x68\x97\x52\xbe\xc6\x63\x64\x59\xd6\
\x07\x1a\xf6\xbb\x40\xb7\x06\x39\xff\x14\x00\x26\xfc\xb7\xed\xf5\
\xe2\x60\x5d\x44\x44\x6e\xce\x89\x8a\x2b\x57\xae\x50\x5d\x53\x8d\
\x40\x00\xa0\x50\x08\x65\x28\x41\x29\x66\xd3\x69\x5e\xa9\x17\x2f\
\xbc\xb4\x4e\x6c\x3b\xf1\x1f\xb9\x47\x83\x7c\x5b\x43\x4c\x3c\x4d\
\x07\xf6\xff\x60\x8b\xdd\x2c\x25\xf8\x4a\x32\x3c\x3c\x4c\x65\x65\
\x25\x2b\xc9\x75\x5d\x1e\xc0\x6e\xa9\xb0\x22\x1b\xa2\x2a\x72\x3f\
\xa7\xea\x81\xb5\x03\x08\x2d\x05\x48\xa1\x0d\xf4\x5d\xbc\x48\x2e\
\x97\xc3\x2f\x16\x51\x4a\x91\xcf\xe7\x59\x5c\x5c\xa4\x50\x28\x50\
\xd4\x63\xb5\xb5\xb5\x94\x01\x58\x80\xf8\x82\xf6\x80\x01\x00\x36\
\x44\x05\x1f\x0f\xbc\x4b\x3e\x3b\x8f\x85\x44\x95\x32\xe2\xb6\xc4\
\xb6\x04\x21\x21\x70\x3e\x53\x6c\x8c\x3b\x80\x44\x2a\x04\xf0\x9c\
\x10\x02\xe0\xcb\x40\x05\x50\x0f\x34\x60\xc4\x48\x69\x9f\x24\x02\
\x01\x4e\x9c\x38\x21\x00\x81\x05\xd2\x87\x96\x96\x67\x09\x65\x6d\
\x14\xe5\x28\xa5\xb4\x41\x08\x58\x57\x19\x25\xe2\xd8\x44\x42\x16\
\xc3\x13\x73\x5c\xbc\x3d\x41\xf7\x58\x8e\x5c\x24\xbe\xa9\xbd\x7d\
\xf7\xef\x2d\xcb\x5a\xdc\xb1\x63\x47\x59\x55\x55\x95\xd3\xd8\xd8\
\x18\x7e\xe0\x86\x86\x86\xd0\xa5\x4b\x97\xdc\xae\xae\xae\x08\xf0\
\xd6\xaa\x1d\x00\x13\x44\x55\x2c\xc2\x73\xd5\x31\xf2\x9e\x4f\xa1\
\x28\x91\x4a\x61\x09\x41\xd8\xb1\x88\x86\x6c\xe6\x72\x05\x12\xa2\
\x8e\x3f\x9f\xff\x2b\x0d\x4d\x1b\x01\x22\xc0\x66\x96\x84\xef\xfb\
\x78\x9e\x47\x75\x75\xb5\x9e\x50\x4b\xf4\xea\xd5\xab\x87\x84\x10\
\x28\xa5\xde\x5a\x11\xc0\xb2\x41\x00\xb6\x2d\x90\xda\xb6\x14\x38\
\x08\xa4\x12\x58\xc2\x8c\x1b\x8f\x4c\xb9\xec\x7b\xf5\x3b\xd4\x37\
\x36\x11\x7c\x2f\xc1\x84\x67\x32\x19\xca\xcb\xcb\xcd\x66\x3e\x76\
\xec\xd8\x26\xbd\x7f\x0e\x2e\x41\x2c\x01\xd0\xd9\xd9\xa9\x0e\x1d\
\x3a\xa4\x6c\x21\x08\x59\x10\xb6\x2d\x1c\xc7\xc6\x42\x50\xb4\xcd\
\x1a\x1b\x00\xc7\xb2\x88\x38\x96\xae\x02\x60\x59\x78\x10\xc0\xdc\
\xdc\x1c\x35\x35\x35\x06\x20\x1a\x8d\x72\xe4\xc8\x91\xcd\xc0\x03\
\x88\x1b\x1a\xa2\xc7\x62\xb9\xb0\x6d\x74\x30\x66\x8d\xcb\x23\x36\
\xb1\xa8\xa3\xc7\x2c\x32\x8b\x1e\x93\x99\x1c\x63\xa9\x79\xee\xcc\
\x2e\xe8\xdf\x45\x72\xf9\x3c\xab\xc8\x2c\x41\x36\x9b\x35\xa7\x66\
\xe9\xff\x6d\x0e\x1c\x38\xb0\x1e\xe8\x00\x58\x06\xa0\xb4\x74\x16\
\x8e\x0d\xe1\x90\xc0\x53\x8a\xb1\xa4\xcb\x8d\x8c\x83\xd3\xb2\x97\
\xa6\x7d\xaf\xb3\xb5\xe3\x17\xac\xdb\xfb\x3a\x0d\x2f\xb4\x73\xfb\
\xce\x24\xfd\xfd\xfd\x24\x93\x49\x94\x52\xe6\xfa\xf8\xf1\xe3\xe8\
\xba\xac\x33\xe7\xce\x9d\xe3\xe8\xd1\xa3\x1c\x3e\x7c\x98\xde\xde\
\x5e\x12\x89\x84\x04\x2c\xa1\x15\xdc\x01\xed\xff\xce\xe6\xf8\xe7\
\x94\x4f\x6b\xc7\xcf\xf8\xe6\x2f\xdf\x26\xf6\xf5\x37\x99\x7c\xa6\
\x83\x6b\xfe\x2e\xae\xf1\x2d\x64\x6b\x17\xad\x7b\x7f\x4e\x5e\x56\
\x73\xfa\x6f\x67\xd1\x77\x4d\xee\xdc\x9d\xe2\x1b\xaf\x76\x72\xfd\
\xfa\x75\x03\xa0\x67\x6b\xd6\x3f\x16\x8b\x99\xeb\x78\x3c\x8e\xe3\
\x38\x25\x38\x04\xc0\x23\x00\x96\x25\x98\xca\x41\x3a\xde\xca\xfe\
\xdf\xbd\x4d\xd5\xae\xd7\x28\x84\x62\x08\xdb\x42\x59\x82\x6c\x41\
\x72\x7f\x66\x91\x4f\xee\x66\x18\xb8\xea\x72\xfa\x1f\x61\x64\xd5\
\x5e\xae\x8f\xdc\x67\x32\xd7\xc6\x85\x0f\xee\x9b\x00\xed\x87\xa1\
\xcd\xcd\xcd\xb4\xb5\xb5\x19\x37\x35\x35\xa1\xa1\x14\x20\x83\x1f\
\x46\x16\xdc\x71\x15\xdf\xff\xe9\x6f\xa8\x6c\xd8\x48\xe2\xec\x3b\
\x4c\x8f\x5e\xc3\x89\x94\xb1\xb5\x79\x07\x9b\x5b\xb6\xf3\x49\x79\
\x25\x63\x09\x97\xcf\x66\xf2\xdc\x9d\xce\x32\xa1\xed\x88\x0d\x4c\
\x27\xe7\xd8\xb7\x2b\xca\xfa\x25\x00\x33\x7b\x3d\x6b\xea\xea\xea\
\x00\xcc\x75\x2a\x95\x32\x00\x4a\x2b\x10\xa0\xb9\x5a\x70\xe1\x9d\
\x63\x28\x2c\xca\xe6\xc6\xd9\x10\x8f\x52\x94\x92\x7b\xc3\x7d\x24\
\x65\x05\xdb\xda\x7f\x4c\x4d\xdb\xcb\x7c\x3c\x9c\x66\xd2\x5f\xc0\
\xcd\x78\x2c\xcc\x6b\x2f\x78\x20\x00\xb5\x74\x3a\x42\xa1\x90\x09\
\x2d\xdd\xea\x1f\x8e\x01\x2a\xf8\x3e\x60\xc1\xc6\xb8\xa0\x50\x1c\
\x23\x1c\x8b\x53\xb7\xa5\x96\x92\x78\x76\x7d\x05\xe9\xac\xc7\x68\
\xff\x9f\x98\xae\xbc\x4c\xcb\xf6\x83\xb8\x0b\x61\xbc\x82\xa4\x58\
\x94\x78\xda\x21\xc7\x42\x2d\xaa\x80\xe3\x69\xa0\x96\xd5\x15\x01\
\x00\xd6\xc7\x43\x84\xca\x23\xfc\xbf\x6a\x63\x21\x9e\xa9\x0c\x73\
\xe1\xdf\x83\xec\xd9\xf9\x13\xca\xa3\x0e\xb9\x32\x47\x03\x28\x03\
\x61\x6b\x00\x16\x4b\x21\xa5\x1c\x25\x30\x2a\x15\xa4\x5c\x05\x40\
\x58\xa5\x2a\xcc\xf5\x23\xfa\x70\x6c\x86\xf1\x59\x8f\xef\xfd\xfa\
\x8f\xdc\xca\xd4\xe0\x44\x5c\xa2\x11\x1b\xcf\x93\x14\x3d\x07\xd3\
\x01\xa5\x90\x52\xf2\x50\x6a\x59\x01\x56\x05\x10\x08\x4c\x0d\x04\
\x18\x9d\x76\xf9\xd5\x5f\x86\x18\xbd\xb7\x80\x3d\x93\x67\xd3\xba\
\x32\xf2\x79\x5f\xbb\x68\xea\xce\xaf\xd4\x70\xf9\xdd\xe0\x25\x00\
\x9e\x78\x09\x4c\xb8\x10\x3c\xa2\xd6\x2f\x55\xf2\x87\x1f\x3e\xcf\
\xf5\x4f\x33\x44\x1b\xb7\xb1\xf3\xc5\x97\x59\x12\x5c\x4e\x60\x8e\
\xdb\x53\x01\x28\xc0\x12\x25\x00\x6d\xd4\x52\x7d\xb1\xb5\x96\xdd\
\x5b\xe2\x74\xbf\x97\xa5\x6a\xf7\x57\xf9\xd1\x1b\x6f\x10\xa0\xb5\
\x03\x98\xb5\x37\xd5\xd8\x08\x01\xd2\xcb\x53\x70\x53\x78\xf3\x33\
\x14\xb3\x69\x0a\x19\x1f\x25\xfd\xd5\x82\xd6\x08\xf0\xf0\x29\xe7\
\xe3\xe7\x33\x14\xe6\x75\xa8\x0e\xd6\x00\xcb\xf7\x89\x10\xc1\x33\
\x7d\xfa\xd7\x72\x8c\xb2\x13\x37\x03\xc7\x01\xb2\x1e\xfe\xad\x94\
\xcc\x6f\xf7\x44\x54\x03\xd8\x5f\x70\x07\x08\x92\x09\xfd\xd7\x3d\
\x3f\xfd\x7e\x42\xa6\xcf\xdf\xf6\xef\x02\xee\x76\x3b\xfc\x92\x06\
\xa8\xe3\x73\xca\x75\x5d\x1f\x70\x57\xed\x00\x40\x32\xab\x0a\x1f\
\x7e\x2a\xd3\xbd\xb7\xfc\xd4\xcd\x69\x39\x05\xf4\x03\x97\x74\x68\
\xbf\x10\xa2\xd3\xb6\xed\xaf\x7d\x9e\x25\x58\x58\x58\xf0\x07\x06\
\x06\xd2\x27\x4f\x9e\x9c\x06\xba\x83\x00\x3e\x1a\x49\xca\xad\xe3\
\xb3\x2a\xd7\x3b\xe2\xa7\x6e\x4c\xcb\xd1\x52\xe8\x59\x1d\x74\x8b\
\x00\x3d\x09\xc0\xd0\xd0\x90\xdb\xd3\xd3\x93\xd2\x4e\xcf\xce\xce\
\x9e\x2e\xbd\x1d\xdf\x08\x02\xe8\xee\xea\x29\x00\x8c\x04\x84\x06\
\x85\xaf\x08\x30\x35\x35\x55\xd0\x2f\x22\xa9\x53\xa7\x4e\x25\xc7\
\xc7\xc7\x2f\x03\x67\x81\x7e\x1d\xec\xae\xb8\x09\x4b\xdf\x76\xda\
\x4f\x26\x85\x01\x40\x08\x40\x61\x5a\xfc\xde\xe0\x60\xba\xbb\xbb\
\x3b\xa5\xdf\x8a\xcc\x24\xd0\x5e\xed\x73\xcd\x61\xed\x9a\x77\x33\
\x6e\x11\x60\x70\xf0\xfd\x74\x5f\x5f\x5f\xfa\xcc\x99\x33\xa6\xc5\
\xa5\xd0\x8f\x78\x02\x89\xb5\x9e\x63\x21\x44\x18\x78\x13\xd8\x4f\
\x69\x73\x06\xb4\xf8\xb1\xfa\x1f\xbd\xfa\x2a\x5f\xf2\xd8\x15\x9d\
\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x04\xa3\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xd6\xd8\xd4\x4f\x58\x32\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x04\x35\x49\x44\x41\x54\x58\xc3\xe5\
\x97\xcd\x8f\x54\x45\x14\xc5\x7f\xb7\xea\xd6\x7b\xaf\xdb\x6e\xc7\
\xf9\x40\x9d\x89\x46\x4d\x34\x99\x44\x8d\x1a\x48\x98\xc4\x8c\x1f\
\x1b\xfe\x02\x4c\x5c\xf1\x07\x18\x16\x2e\x4d\x5c\x6b\x58\xc3\x8e\
\xc4\x8d\x1b\x17\xce\x82\x68\x74\x41\x5c\x18\x0d\xe2\xc4\xc6\x00\
\x3d\x60\x50\x51\x19\x60\x02\xa2\x0e\x0c\x83\xd3\xfd\x5e\xf7\x94\
\x8b\xaa\xee\xf9\x60\xe6\x0d\x84\x51\x16\x56\x52\xa9\xce\x7b\xb7\
\xeb\x9e\x3a\xf7\xd4\xa9\x7a\xea\xbd\xe7\x7e\x36\xe5\x3e\xb7\x3e\
\x80\x5d\xbb\x76\xbd\x03\xec\xfd\x8f\xf2\x4e\x35\x1a\x8d\x03\xeb\
\x19\xd8\xbb\xef\xbd\xa3\x3b\x1f\x1f\x76\x00\x9c\x3c\x3a\xcf\xcc\
\x97\x37\x58\x9c\xef\xdc\x53\xa6\xda\xa0\xf2\xdc\x6b\x03\xbc\xb8\
\x67\x10\x80\x8b\x7f\x16\x7c\xf8\xee\x1e\x80\xdb\x00\x70\xfc\xec\
\x1c\xdf\x3f\x30\x04\x78\x2e\xfd\xb8\xc0\xfe\xb7\xce\x6f\xcb\x72\
\x0f\x1d\x79\x9a\x0b\x23\x96\xd3\x9f\x1f\x64\xfc\xd5\x7d\x9b\x6b\
\x40\x45\xb0\x16\x40\x78\x70\x2c\x23\xcb\xb2\x6d\x01\x30\x30\x96\
\x61\x8d\x50\x1b\x7c\x14\x23\x25\x22\x14\x2b\xd8\x18\x91\xd5\x95\
\x73\xe7\xce\x83\x2a\xb8\x04\xd2\x14\xb2\x0c\xd2\x2c\x8c\x49\x0a\
\x49\x12\xde\x77\x3a\x90\xe7\x90\xb7\xa1\xd5\x82\x76\x2b\x8e\x6d\
\x28\x72\xb2\xfa\x38\xd6\x0a\xe3\xaf\xbc\x49\x6b\xf1\xfa\xe6\x00\
\xac\x15\xac\x15\x04\xb0\x46\xd8\xbd\x7b\xe7\x16\x6b\xeb\x86\xae\
\x80\x5a\xa8\x56\x81\xea\x6d\x51\x8d\xaf\x04\xb5\x82\xf7\xa0\xa6\
\x84\x01\x67\x05\x35\x82\x08\xa8\x0a\x95\x2c\xc3\x23\x20\x1e\x08\
\xc0\xf0\x1e\x2f\x02\xde\x23\x12\x26\x15\x7c\x88\x23\xc4\x21\x1e\
\x3c\x21\x5e\x40\x4d\x58\x18\x40\xd7\x4a\x89\x06\xac\xa0\xda\x63\
\x00\x9a\x33\xbf\x05\x8a\x53\x07\x69\x02\x95\x04\xb2\x34\xf6\x04\
\x12\x07\x4e\xa1\xe8\x40\x5e\x40\x2b\x8f\xbd\x05\x4b\x39\xb4\x73\
\xc8\x0b\x54\x87\x71\x3d\x00\x2a\xe5\x25\x70\x31\x40\xd5\x30\x39\
\xf9\xd2\xd6\x0a\xf3\x3e\xd0\xaf\x16\xaa\x1b\x8b\xf6\xd8\x27\x61\
\x61\xbd\x1c\x25\x25\x20\x00\xf0\x81\x8d\x34\x4d\xa3\x3a\xc3\xb3\
\x98\x11\x89\x6c\x07\xda\x63\x09\x56\x98\x5f\x29\x46\xfc\x61\xcd\
\x72\x7f\x61\x1d\x2d\xd1\x80\x3a\x09\x54\x49\x18\x4f\x34\x2f\xe0\
\x9d\x85\xc4\x21\x89\xc3\x67\x09\x92\x69\xd8\x11\x89\xe2\x13\x87\
\x58\x8b\xef\x76\x91\xbc\x80\xbc\x03\xed\x02\xdf\x6a\x23\xed\x02\
\xf2\x02\x9f\x77\x50\x1d\x45\xd5\x20\x78\x3a\xeb\x54\x78\x9b\x06\
\x9c\x33\x78\x0f\x03\x8f\x24\xbc\xfe\xf2\xf3\x77\x68\xe8\x36\x68\
\xa4\xbe\xf1\xeb\xc6\xfc\xdf\xb1\x04\x52\x5e\x82\x44\x4d\x5f\x84\
\x8f\x0d\xa5\x38\xe7\xb6\xc5\x88\x9e\x18\x4b\xb9\x76\xb3\x03\x08\
\x9d\x52\x11\xaa\x90\xb8\x50\xef\x5a\xc5\x30\x7d\xb1\xcb\x40\xc5\
\xb0\x0e\xf4\x26\xad\x57\xf9\x55\x2e\xe1\xe1\xc6\xd2\x32\xf5\xcc\
\x70\x7d\xc9\x84\x2d\xe9\x4a\x19\x10\x9c\x1a\xc0\x73\xe5\x66\x97\
\x2b\x37\xbb\xac\x51\x57\x3f\xd7\xaa\x64\x7e\xc5\x27\xa2\x29\xac\
\x05\x15\xc3\x9c\x0b\xb5\x77\xa6\x6c\x17\xa8\xc1\xa9\x20\xc8\x1a\
\x35\xaf\x9b\x35\x1a\x8f\x59\x31\x9e\xfe\x7b\xe9\xef\x14\x00\xf1\
\x82\xef\x9b\x58\x30\x2b\x57\x56\x02\x55\x21\xd1\x90\xfc\xe7\x53\
\xdf\xf2\xeb\x99\x13\x2c\x2d\xde\xb8\xa7\xfa\x57\x6a\x03\x3c\xf5\
\xec\x4e\x9e\x79\x61\x02\x0f\xa8\x33\x5b\x31\x10\x03\x7c\x87\xf7\
\xf7\xbf\xc1\xc2\xc2\x02\xb7\x6e\xdd\xa2\x28\x0a\x44\x04\x6b\x2d\
\xd6\x5a\x54\x15\x55\xc5\x39\x87\xaa\x62\xad\xc5\x98\xf0\xdf\xe5\
\xe5\x65\xf2\x3c\xef\xf7\x23\xcd\xf9\xb8\xf2\x2d\x18\x70\x56\x50\
\x17\x18\xdc\x31\x3a\xb6\x72\x4f\x38\x7e\x9c\xe9\xe9\x69\x8c\x31\
\x78\xef\x99\x98\x98\x60\x72\x72\xf2\x8e\x59\xd8\x31\x3a\xd6\xdf\
\x86\xae\xd4\x09\x55\x70\x36\xac\xa2\x56\xaf\xf7\x6b\x39\x33\x33\
\xc3\xd0\xd0\x10\xd6\x5a\xbc\xf7\x34\x9b\xcd\xbb\x02\x50\xab\xd7\
\x70\xd1\x88\xb4\xd4\x88\x14\x9c\x0b\x27\x5c\xa0\x2a\x00\xa8\x56\
\xab\x64\x59\xd6\xa7\xb8\x37\xde\x69\x73\x1a\xa9\x17\x41\x4b\xad\
\x38\x1e\xc7\xbd\x23\xb4\xd7\x8c\x31\x88\x44\xdf\x8f\x3a\xb8\xab\
\x9b\xaf\x35\xa8\x0d\xf3\xf6\x18\x2e\x3d\x8e\x83\x29\x6d\xe3\xd5\
\xdb\x12\xa9\xf7\xe5\x56\x6c\xad\xf4\x91\x0e\x8e\x0c\xc3\xf2\xef\
\xdb\x02\xe0\xa1\x91\x61\xd4\xc2\xb5\x2b\x97\x59\x9c\xbf\xbe\x05\
\x03\x36\xf8\xc0\x60\xad\x02\x0b\xdb\xc3\xc0\x50\xad\xc2\xec\xc5\
\x4b\x9c\xfd\xee\x1b\xce\x9f\x9c\x9e\x03\xa6\x36\x04\x60\x24\x5e\
\x4a\x05\x12\x0b\xed\x91\x27\xa9\x3d\x0c\x6f\x1f\x38\xc8\x66\xc7\
\x81\x27\x3a\xf1\x2a\xe7\x35\x1e\x32\x81\x14\x28\xba\x70\xf9\xea\
\x55\xce\x34\x8e\xd1\xfc\xfa\x8b\xb9\xd9\x1f\x4e\x1d\x02\x0e\x6f\
\x08\xe0\xb3\x8f\x3e\xe0\xa7\xd3\x27\x57\x99\xe9\xda\xa3\x86\x55\
\xe6\xbb\x1e\x04\x1b\x3c\x5f\x1d\x6f\x7c\x77\xee\x8f\xd9\x5f\x0e\
\x01\x87\x1b\x8d\xc6\x5f\x1b\x01\x98\x9a\xfe\xf4\xe3\x7f\xf5\x73\
\x6c\x7d\xf2\x35\x00\xe2\xb7\xda\x81\xff\xdd\xd7\xf1\x3f\x4d\xf0\
\x4b\xb9\xe8\x46\x89\xaf\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\
\x60\x82\
\x00\x00\x08\x19\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xd6\xd8\xd4\x4f\x58\x32\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x07\xab\x49\x44\x41\x54\x58\xc3\xad\
\x57\x5b\x50\x93\x67\x1a\xf6\xca\xce\xec\xcc\xf6\x62\x2f\xbc\xd9\
\xe9\xce\xec\x6e\xbd\xda\xd9\x9b\xb5\xce\xba\x3b\x7b\xb0\xad\xcc\
\x7a\xb1\xce\xce\x3a\xb3\x76\x54\x70\x75\xdb\xe2\x81\xd6\xb6\x54\
\x04\xbb\xa5\x20\x6d\xc1\x82\x06\x08\x07\x51\x42\x80\x80\x80\x02\
\x21\x81\x10\x92\x40\x48\x10\x73\x24\x21\x67\x72\x80\x04\x42\x20\
\x9c\x09\x47\xb5\x54\x78\xf6\xfb\x7e\x13\x16\x30\x58\x8b\x7d\x67\
\x9e\xf9\x2f\x92\xfc\xcf\xfb\x3e\xcf\xfb\xbe\xdf\x97\x5d\x00\x76\
\xfd\x98\x20\xf1\x0b\x82\x14\x02\x03\xc1\x75\x82\x03\xcf\xfd\xfe\
\x8f\x48\xbc\x9b\x20\xe1\x57\xaf\xef\xb5\x2a\x8c\xd6\x65\xdb\x02\
\x60\x19\x1e\x5b\x09\x27\xf1\x33\xfa\x19\x81\x22\xfc\xdc\x3e\x76\
\x48\x7e\x8a\xa0\xb9\xb6\x59\x1c\x32\xcf\xad\x42\x39\xfe\x1d\x44\
\xf6\x51\xd8\xc7\xe6\xe8\x87\x86\x3d\x7b\xf6\x58\x53\x52\xae\x2c\
\xca\x3a\x3a\x10\x4e\xe2\xe5\x49\xc3\xc4\x31\x04\xb7\x3e\x49\xf9\
\x2c\x60\x9b\x5d\x59\x53\x4d\x03\x4d\xb6\x11\x34\xeb\xfb\x20\x31\
\x79\x60\x19\x9d\xc5\xbb\xef\xbe\x3f\xc5\xab\xbe\x83\xf1\x89\x29\
\x4c\x4f\xcf\xae\x92\xef\xd7\xbc\x74\x02\x11\x9f\x0f\xbe\x1d\xe3\
\xb2\x04\x43\x4f\xb4\x33\x40\x8b\x7b\x06\xcd\x3d\x2e\x34\xeb\xec\
\xa8\x57\xf6\x20\x87\x53\x85\x32\x5e\x35\x43\xbc\xb0\xf4\x90\x81\
\xc1\x60\x5c\x26\xbf\x4b\x7c\xe1\x04\x48\x1c\x24\x38\x41\xfd\xdd\
\xea\x73\x27\xf1\xb9\x27\x04\x48\x87\x97\xc1\xd7\xbb\x20\x22\x55\
\x37\xdc\x37\xa2\xb8\x4e\x88\x2c\x56\x3e\xcc\x56\xdb\x3a\x71\x04\
\x2c\x16\x6b\x2c\xfc\xce\xe7\x27\x10\x91\x36\x93\x95\x3f\x46\x7d\
\xa5\xfe\x12\xc4\x6f\xf4\x59\x31\xb6\x02\x7e\xef\x20\x5a\x7b\x9c\
\xe0\x3f\x30\xa1\x4c\x28\x43\x46\x0e\x1b\xb2\x0e\xf9\x26\xd2\xf9\
\xc5\x65\xcc\x2d\x2c\x21\x34\xbf\x88\xbd\x7b\xf7\x5a\xc9\x3b\x7e\
\xba\x6d\x02\x24\x7e\x43\x90\x46\x3d\x35\x13\x69\x75\xb3\x80\xd2\
\x3f\x0f\xcb\xc4\xe2\x9a\x50\xa1\x5a\xb4\x6c\xf1\x59\xa0\xb6\xa0\
\xa6\x5d\x8d\x2f\xb2\x73\x71\xb7\x9e\xff\x0c\x31\x25\x9d\x09\xcd\
\x63\x62\x6a\x06\x83\x43\x81\x27\xe4\xdd\xbc\x2d\xd3\xb0\x3b\x92\
\x03\x33\x26\xd4\x53\xb5\xd3\xfb\x58\x4f\x88\xc5\x03\x21\x88\x2c\
\x43\x50\xba\x46\xd0\xed\x09\x42\xe5\x9b\x42\x9b\x73\xfc\xa9\xcf\
\x5a\x1b\xee\x2a\x74\xc8\xbc\xc9\x45\x09\xa7\x6c\x93\xcf\x9b\x88\
\x27\xa7\x11\x18\x1d\xc3\x80\x6f\x08\xa2\xd6\xd6\x25\xc2\x51\xdb\
\x28\x12\x87\xc6\x1f\xaf\x82\x2f\x62\x94\x4d\x89\x24\x90\x22\xea\
\x52\x2d\x9a\x42\xab\xe8\x18\x79\x04\xa1\xc5\xcf\x10\x53\x74\xf6\
\x0d\xa3\xd3\xe1\x87\xd4\x3c\x80\x16\xbd\x03\x0d\x5d\x06\x14\xd5\
\x0a\x90\x91\x95\x0d\x2f\x79\xf1\xc6\xaa\xa9\xd4\xb3\x73\x0b\x4c\
\xc5\x94\xd8\xdd\xef\x85\xc9\x62\x05\xb7\xbc\x12\xa5\xe5\x95\x4b\
\x13\xf3\xcb\xab\x23\x0f\x01\x37\xd9\x11\xe6\xd9\x15\x84\x97\x15\
\x13\x06\xcb\x3c\xd0\x68\xf2\xa3\xdd\xee\x5f\x27\x96\x3b\x86\x20\
\xb3\x78\xd7\x7d\xe6\x08\xa4\xf8\x3c\x33\x1b\x2a\x8d\x36\xaa\xdc\
\x53\x33\x21\x8c\x8e\x8d\x33\x15\xd3\x26\xe4\x37\x09\xf1\xc1\xc5\
\x8f\x51\x73\xaf\x01\xbe\x65\x60\xfc\x11\xa0\x23\x13\x23\xf2\xce\
\xa1\xbe\x5d\xb9\xb8\x51\x01\x83\x81\x74\x74\x4d\xa7\x1e\x0a\x67\
\x80\xa9\xb8\xdd\xea\x83\xd8\xe8\x42\x93\xca\xcc\xf8\x7c\xe5\xcb\
\x2c\x88\xda\x24\x51\x89\xa7\x67\xe7\x18\x1b\x86\x86\x47\x60\x77\
\x38\x49\x82\x3a\x24\x7c\xf8\x21\xae\xb3\x0b\xe1\x99\x5c\x80\x6f\
\x09\xd0\x90\xde\xe1\x0f\x2c\x81\xab\x1f\xc4\x7d\xef\x04\xdd\x07\
\x1d\x61\xeb\xff\x9f\xc0\x1d\xb9\x16\x1d\xf6\x21\x48\xcc\xfd\x4f\
\x7d\xee\xd4\x22\x9d\x55\x84\xaa\x9a\xba\x4d\x3e\x47\xe4\x8e\xf8\
\x3c\x3c\x12\x84\xd3\xdd\x0f\xbd\xc1\x88\xc2\xe2\x62\x9c\x7e\x2f\
\x1e\x3d\x03\x01\xf4\x2f\x02\x83\x84\xbc\xc5\xff\x2d\xee\x3a\x43\
\x28\x51\x91\xf7\xf6\x05\xf1\x4e\xdc\xbf\x7d\x84\x33\x69\xe3\x20\
\x18\xf4\x33\xab\xe0\xc9\x54\x68\x35\x38\xd1\xd8\xdd\x0b\x9e\x58\
\x89\xac\x5c\xf6\x33\x3e\x47\xaa\x9e\x9c\x9e\x65\xe4\xee\xf7\x0e\
\xa2\xd7\x6c\x41\x43\x03\x1f\x27\x62\xe3\x20\xe9\xd6\xc0\x45\xcf\
\x01\x52\x90\x24\xb8\x86\xb2\x9e\x00\x6e\xb4\xdb\x50\xd1\x1b\x44\
\x85\xce\x8b\x4a\x7e\x0b\x6d\xbe\x9b\x5b\x27\xd1\xa0\x99\xf8\x16\
\x65\x22\x05\xee\x29\xf4\x28\x13\xc8\x90\x78\x35\x0b\x1a\xad\x3e\
\xaa\xdc\x63\x13\x93\xf0\x0d\x0d\xc3\x66\xef\x83\xb4\x5d\x8e\xc4\
\x4b\x97\x90\xc3\xca\xc3\xd4\x63\xc0\x4e\x7a\x49\x31\x4e\xfa\x89\
\x94\x7f\x5b\x3b\x84\x7c\x85\x13\x25\x6a\x1f\x4a\xd5\x03\xe8\xf2\
\x30\xa3\x28\x22\xf8\xf9\x33\x09\x74\x8f\x2e\xa1\xa8\xbe\x15\xa5\
\x7c\x09\xb2\x4a\x2a\xf0\xcf\xe3\x71\x51\xe5\xf6\x07\x46\xd1\xe7\
\xf2\x40\xab\x37\x20\xfd\x6a\x06\x92\xbf\x48\x83\xcd\x37\x02\x27\
\xa9\xda\x40\x1a\x4c\xe0\x7b\x88\x52\x9d\x1f\x45\xdd\xfd\x0c\x71\
\x41\x97\x1b\xc5\xdd\x1e\x88\x9c\x41\xfc\xf9\xcd\xb7\x5d\x84\xeb\
\x6c\xb4\x43\xd0\x28\xf7\x4e\x23\xa7\xfc\x1e\xb2\x4b\xab\xf1\x51\
\xea\x57\x48\xfe\x6f\xea\xfa\x58\x51\xb9\x47\x82\xe3\xf0\x0c\xf8\
\x60\x34\x99\x51\xc9\xab\xc2\xfb\x67\xcf\x41\xfe\x40\x03\x3f\xe9\
\x6e\xb2\x8d\x19\xb9\x6f\x69\x06\x19\xd2\x9b\x2a\x2f\x72\xe5\x0e\
\xe4\x75\xf6\xa1\xf0\xbe\x1b\x1c\x95\x1b\xf9\x9c\xca\x29\xc2\x53\
\xb8\xdd\x29\xdc\x2b\x76\x04\x90\x51\xc8\xc5\x95\x6b\x79\x38\x11\
\x9f\x80\x9b\xb7\x6e\x33\x63\x15\x91\xdb\x6a\x73\x40\x22\x6d\xc7\
\x85\x84\x0f\x50\x74\xbb\x0c\xf3\x2b\x80\x9f\x34\x58\xf7\x24\x20\
\x1c\x7c\x84\x4a\xd3\x18\x38\xfa\x61\x86\x9c\x56\xfd\x55\xb3\x1e\
\xac\x0e\x3b\xb8\x3a\x1f\xd9\x21\x1e\x7a\x2f\xe0\x13\xbc\xba\x5d\
\x02\x26\xbe\xc1\x83\x94\x6f\xd8\x38\x9f\x9c\x8a\x03\x7f\x3d\x04\
\x63\xaf\x99\xe9\x6e\x2a\xb7\x46\xd7\x83\xa4\xcb\xc9\x48\xff\x3a\
\x8b\x8c\xd5\x3c\x53\xb5\x71\xf6\xa9\xdc\x35\xf6\x69\x5c\x97\x59\
\x19\xd9\xbf\x6e\x21\xa7\xa0\xd4\x82\x74\xbe\x1a\x57\x9b\x34\x60\
\xc9\xcc\x10\xbb\x82\xf8\xe5\xaf\x5f\xa7\x67\xc0\x3b\xe1\x75\x1f\
\x35\xcc\x35\xdd\x66\x7c\x94\x96\x85\xb8\x73\x17\xf1\x97\x43\x31\
\x4c\xd5\x74\x99\xf0\xaa\xaa\x71\xfa\xf4\x19\x68\xcc\x0e\x8c\x92\
\x2d\x36\x14\x1e\xab\x5a\xc7\x0c\x78\xe6\x71\x70\x0d\x23\x4c\xa3\
\x65\x8a\x0c\x8c\xec\xb4\xfa\x9c\xb6\x5e\x94\x74\x39\xd0\x66\xf7\
\xaf\x1e\x3d\x11\x4b\x47\x2e\x6f\xc3\x79\x13\x35\x2c\x5c\x99\x1a\
\xf1\x97\x3e\xc7\xd1\xd8\x33\xf8\x38\x31\x09\x86\x5e\x13\x1a\x9b\
\x04\xf8\xdd\x1b\xfb\x51\x4f\xd4\xf1\x90\x99\xee\x9a\x00\xaa\xad\
\x93\x60\x2b\x5d\x0c\x39\xf5\xbc\xf0\xbe\x67\xbd\xea\xcc\x16\x3d\
\x4a\x55\x1e\x08\x6d\x01\x94\xd4\xf1\x43\xe1\x65\x53\x40\xf0\xca\
\xf7\x25\x60\x2b\x6e\x6a\xc7\xa9\x84\x44\xc4\x1c\x39\x8a\xdc\x7c\
\x36\x5a\x5a\xc5\x38\x14\x13\x83\x2f\x39\x35\xc8\x14\x6a\x98\xe6\
\xa2\xd5\xd2\x27\xf5\x9a\x7a\x4c\x13\xa1\x49\x64\xb7\x99\x90\xdb\
\x6e\x46\xb9\xda\x8d\x06\xa5\x76\x39\x2c\x39\x3d\xf9\x4e\x13\xec\
\xd9\x72\xd4\x47\x0d\x3b\xab\x46\x88\x63\xff\x39\x8f\xdf\xee\xfb\
\x3d\x1a\xf9\x02\x9c\xbf\x90\x80\x93\xf1\x17\x70\xa3\xad\x07\x19\
\xc4\x4f\x4a\x14\xe9\x6e\xba\x58\xa8\xef\x2c\xfa\x94\x98\x50\x28\
\xb7\x40\xe9\x0e\x3c\xf9\x57\xec\x29\x2a\x77\x2d\xc1\x67\x04\xfb\
\xb6\xb9\xe4\x44\x8d\xbe\xcc\xb2\x5a\xfc\xe3\xe4\x19\x1c\x3c\xf4\
\x37\xb0\x72\xf3\xb0\xef\xc0\x1f\x50\x20\xd1\x21\x89\x27\x65\x2a\
\xa6\x4b\x85\x3e\xbf\x21\xd5\x46\xe4\x2e\x90\x5b\x21\xb0\x0c\xae\
\xe5\xdc\xe2\xd2\x11\x13\x13\xe4\x87\x6f\x3c\xaf\x3c\xe7\x96\x15\
\x35\x9c\x69\x45\xe5\xf8\xfb\xb1\x58\x1c\x3f\x19\x87\x37\xf6\xef\
\xc7\x8d\x3a\x11\x92\xab\xa4\x0c\x21\xed\x70\xea\x35\x55\x21\x8b\
\x34\x5b\xc9\x03\x37\x2a\x34\x6e\xd4\x49\x3a\x17\xc3\x72\x73\x08\
\x8e\x6d\x95\xfb\x87\x24\xe0\x4a\x65\x73\x70\xe4\xf8\x29\x1c\x3e\
\x7c\x98\x8c\x63\x2e\x32\x05\x2a\x5c\x22\xd5\xd3\x5d\x7e\x4d\xdc\
\x0b\x36\xe9\x74\x76\xa7\x1d\x77\x8c\xe4\x88\xb6\xf9\x9e\x84\xb7\
\x1a\x95\xfb\x22\xbd\x49\xfd\x80\x0b\x6d\xf4\x04\x32\x4a\x78\x4c\
\x0f\x9c\x4b\x49\xc3\xb5\xa6\x2e\x7c\xc2\x6d\x65\x36\x59\xf1\x83\
\x01\x5c\x97\x9a\xc1\x51\x7b\x20\xf3\x04\xd7\xce\x25\x26\x05\x36\
\xc8\xfd\xc7\x9d\xc8\x1d\xd5\x82\xdc\x1a\x01\xce\x5e\x4e\x45\x81\
\x58\x85\x78\xf6\x5d\x5c\xa9\x55\x90\xaa\xfb\xc0\x96\xdb\x50\xad\
\x75\xe3\xae\x54\x41\x2f\x10\xca\x0d\x72\xbf\xba\xd3\x6a\xa3\x05\
\xb7\xa2\x51\xf8\x1d\xaf\x43\x8d\x4f\xb9\x2d\x88\xcb\xe6\xe1\x9a\
\x48\x8f\xaa\x1e\x2f\x9a\x35\xe6\xc7\x7f\x7a\xf3\x2d\x57\x78\xac\
\xa8\xdc\xaf\xbd\xac\xdc\xd1\xe2\x08\xdd\x05\x5c\x75\x1f\xde\xcb\
\xaf\x45\xb9\x76\x00\x32\x67\x60\xf5\xc2\xa7\x97\xa9\xdc\xf7\x08\
\xd2\xa9\xdc\x3b\xf8\x03\xf3\xc2\xf1\x13\x82\xca\x1c\xee\x9d\x50\
\x0b\x39\x94\xb8\x0d\xc2\xc8\x16\xa3\x17\x87\xc3\x2f\x22\xf7\x0e\
\xff\xda\x6d\x8a\xdd\x61\x99\xd5\x1b\xb6\xd8\x6b\xbb\x5e\x32\xbe\
\x2f\x89\xff\x01\x66\xb9\x5f\xfc\x11\x80\x3d\xcf\x00\x00\x00\x00\
\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x05\x2b\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xd6\xd8\xd4\x4f\x58\x32\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x04\xbd\x49\x44\x41\x54\x58\xc3\xed\
\x57\x6b\x4c\x93\x57\x18\x3e\x23\x71\xc9\x32\xe9\x16\x97\xa8\x54\
\x65\x38\x9d\x02\x15\xf6\x03\x87\x32\x93\x01\x66\x2c\x5b\x70\xc4\
\x30\xff\x60\xa2\x2e\x1a\x3a\x1d\x4e\x03\xba\x31\x89\x5b\xb3\x80\
\xd9\x0c\x84\x02\x19\x58\x1c\x14\x8b\x85\xb2\x82\x95\x5e\xe4\x66\
\x0b\x8e\x31\xf8\xc3\x46\xcb\x2d\x81\x15\xdc\xa8\xc2\x1c\x1b\xb7\
\x6a\x69\x91\xf2\xee\xbc\x87\xaf\x0c\xdc\xb8\x0d\x61\xd9\xb2\x93\
\x3c\xed\x97\xf3\x7d\xfd\xde\xe7\xbc\xef\xf3\x5e\x4a\x00\x80\xfc\
\x93\x20\xff\x0a\x02\x74\x09\x28\x44\x14\xd9\x14\x71\x14\x01\x2b\
\x46\x80\xae\xdd\x64\xdd\xc6\x66\x22\x4c\xf8\x95\xc4\x8b\x47\xc8\
\xa1\xd3\xf7\xc8\x8e\x97\x3b\x38\x32\x61\x2b\x41\x20\x85\x9c\xbe\
\x30\x48\x2e\xdd\x80\x19\x40\x32\xab\x79\x4d\xf4\xbe\xfb\x72\x13\
\x68\x64\x06\x91\x04\x5e\xa3\x51\xf4\x06\xee\x85\x47\xf5\xd0\xbd\
\x83\xcb\x4d\x20\x9b\x9d\xf6\x40\x74\x2f\xbd\x16\x32\x3d\x20\x89\
\x3f\x48\xa5\x2c\x1b\x01\x8c\x31\x79\xc1\xbb\x9d\x88\x4b\xc6\xd7\
\xc6\x26\x0e\xa0\x10\xb9\xfd\x42\xfe\xc5\x2b\x36\x46\x8c\x12\x5c\
\x4e\x02\x93\xa7\xa7\xa7\x0d\xcc\xd3\x39\xb9\x98\x63\x36\x14\x0a\
\xd2\xe4\xa3\x2b\x41\x20\x8c\x29\x9e\x2a\xdf\x37\x47\xeb\xdc\x7b\
\xb5\xcc\x89\x9e\x40\x44\x96\x54\x83\x2b\x2c\x0b\x36\x46\x48\x08\
\x13\xf5\x64\x2a\x7b\x2e\x54\x03\x01\xf8\x03\x37\xbf\xc0\x0e\x34\
\x2a\x54\xdf\x62\x88\x52\xd5\x2c\x58\x03\x74\x1d\x16\x08\x04\x7a\
\x45\x55\xf5\xc8\xa0\x6d\x74\xc2\xd4\x73\xf7\x21\xbe\x73\x51\x95\
\x90\xae\x8f\xd0\x13\xcf\xe5\x94\x83\x87\xb4\x02\x9e\xcc\x2e\x03\
\xd4\x06\xdd\xaf\x99\xcb\xb0\xaf\xaf\xaf\x3e\xbf\xd2\x60\xb5\xdb\
\xed\x80\xf8\x79\xe4\x3e\xc4\x5e\xab\xb4\xb9\x88\x2f\x86\x80\x27\
\xd3\xc0\x67\xf9\x8e\x19\xf5\x60\xd7\x5e\x33\xba\x76\xda\x73\xee\
\x68\xd8\xc7\xc7\x47\x9f\xab\xab\xb0\x0e\x0f\x0d\xc1\x10\x87\xb2\
\xf6\x2e\xe7\x96\x37\xf7\x77\x73\x61\xd8\xbd\xe8\x5e\x80\x2f\x66\
\x9a\xa0\x86\xdf\xa9\x36\x42\xf7\xf0\x03\xd8\x19\x9f\xd4\xcf\xa5\
\xe7\x1a\x8a\x98\x2d\x7e\xfe\x6d\x97\x54\x1a\x6b\x5f\x5f\x1f\xb8\
\xd0\xd1\x73\x07\x62\x72\x15\x56\x4e\xc4\x87\x97\xd4\x8c\x30\x14\
\xe9\x15\xb7\x1e\x38\x1c\x0e\x40\xa4\xd6\x19\x31\x9e\x85\x9b\x05\
\x7e\x6d\xa9\x25\x1a\x5b\x97\xd9\x0c\xe6\x2e\x0a\xf3\x24\x14\xdf\
\x36\x8e\x7b\xbd\x1e\xd1\xcd\x42\xc8\x09\x6f\xa9\x04\x3c\xd1\xbd\
\x56\xab\x15\x10\x77\x7f\x1b\x84\xf3\x92\x5c\xbb\x52\xa9\x84\xfa\
\xfa\x7a\x30\x99\x4c\x0c\x75\xdf\x35\xc1\x51\xb1\x64\x18\xc9\x51\
\x44\x3e\xb6\x76\xcc\xb4\x40\x4f\x93\x5f\x7e\xd3\xd6\xdf\xdf\x0f\
\x32\x99\x0c\x44\x22\x11\xa8\x54\x2a\x90\x4a\xa5\xa0\xd1\x68\x20\
\x4b\x5b\x39\xbe\xe9\x95\xe0\x1f\xb8\x53\xaf\x79\x2c\xf3\x00\x97\
\x8e\x22\x9e\xc7\x86\xe6\x53\x29\x19\xf6\x82\x82\x02\xe6\xe2\xa0\
\xa0\x20\xe0\xf1\x78\x60\xb1\x58\x40\x5b\x5e\x01\xfb\xcf\x26\x0c\
\x2d\xa6\x53\xce\x67\x94\xcf\x09\x4c\x83\xe2\x5b\x7b\xe6\xc2\x60\
\x9a\xb2\x14\x14\x0a\x05\x88\xc5\x62\xc8\xcc\xcc\x84\xa2\xa2\x22\
\x50\xab\xd5\xd0\xd9\xd9\xc9\x60\xec\xfe\xc9\xb9\xc9\xdb\xa7\x75\
\x2e\xb7\xcf\x4b\x80\xae\xb7\xd8\x29\x70\x0e\xc0\x6a\x97\xac\x78\
\x88\xca\x7f\x82\xe2\x29\x89\x0e\x3e\x97\x2b\x21\x5b\x96\x0f\x07\
\x63\xe3\x47\x84\x1f\x26\xd8\x92\x72\x64\x8e\x6f\x1a\xbf\x07\xa3\
\xd1\x08\x2d\xad\x2d\xf0\xcb\xc0\x20\x1c\x38\xf1\xbe\x05\xb3\x62\
\xc1\x04\x5c\x69\x84\x85\x85\x84\x46\xdc\x26\xe7\x32\xac\x2c\xcf\
\x33\xb5\x13\xec\x3b\xe3\xba\xd3\x33\xaf\x82\xe5\xfe\x7a\x89\x06\
\x9e\xde\xfc\x62\x1b\xf7\x3c\x92\x8d\x7b\x66\xab\x4f\x5b\xca\x35\
\xed\x58\x43\x43\x3d\x34\x34\x34\x80\xa5\xb7\x17\x32\x14\xc5\xc3\
\xf3\xe9\xc0\x65\x3c\x92\xe5\x28\x9e\x36\x5d\xe5\x9c\x2a\x32\x78\
\x7d\xf4\x83\x2e\x5a\x6c\x12\x31\x0c\x1b\x25\xea\x71\xf7\x2f\xcb\
\x27\xef\x05\x87\x5f\xfe\xd3\xe4\x44\x0b\x4c\x68\xf4\xc9\x3e\x75\
\x95\x1e\x0c\x06\x03\xb4\xb7\xb7\xc3\xd7\xc6\x96\x31\xae\x81\x09\
\x66\xf1\x36\x6d\x38\x68\x3c\x49\x3a\x3a\x65\xf8\x62\x81\x83\x44\
\xbd\x57\x43\xb6\x0a\x5e\x9b\x2a\xc3\x94\x5c\xb0\x42\x0f\xab\x24\
\xb4\x04\x9f\x4a\xaa\x9b\x43\x37\x31\x28\xd4\x4f\xf2\x0a\xc7\x74\
\x3a\x1d\xd4\xd6\xd6\x82\xc9\x7c\xdb\xb9\x61\x9b\xf7\x5f\xea\x62\
\xb2\xe5\x7e\x9c\x75\x1f\x0d\xf3\xb2\xd4\x4e\xf2\xf6\xb1\xeb\x2e\
\xb6\xae\x94\xc3\x90\x6c\x97\x55\xc1\x4b\x57\xab\x80\x9c\x4d\x6e\
\x5a\xd0\x1c\x49\xbd\xb1\xe7\x88\xb0\xef\xca\x57\xc5\x50\x5a\x5a\
\x0a\x1d\x3f\xf6\x4c\x04\x06\x87\x74\x3c\xaa\x0b\xc2\x84\x46\x8d\
\x07\xc8\x6f\x02\xd9\xf9\xaa\x7e\x9a\xf1\x30\x46\x8e\x36\x20\xaf\
\xbc\x4a\x78\x43\x69\x00\x92\x28\x1d\x98\xcd\x95\xb3\x79\xc3\x7d\
\x3d\xbf\xf9\x44\x6a\xa6\x5d\x2e\x97\x43\x53\x4b\x2b\x44\x1c\x7b\
\xf7\xce\xf4\x14\x25\xae\xf1\x8a\xf5\x77\x9c\xf5\x70\x02\xc2\xd9\
\x0f\x89\xd1\x81\x03\x4f\x8e\xf7\xdc\xd2\x69\xe7\xf3\xdf\x75\xfc\
\x6f\x14\x2e\x36\xd2\xef\xd8\x17\x69\x49\xbe\x2c\x9d\xc8\xd3\x96\
\x3b\xa7\x0f\x31\x8c\x25\xc6\xdf\x9f\xba\x77\x5f\x71\x35\xa0\x41\
\x6c\xb5\x08\x8c\xf9\x94\xf1\xe0\xf0\x33\x4b\x9a\x7c\x68\x13\x5a\
\xbd\xce\xa3\xd9\x6b\x4f\x48\xf7\x0c\x0f\xb0\x0f\xfe\xf3\x87\xc8\
\xf9\x2f\xee\xb9\x49\x6e\x00\xf6\x7b\x3e\xed\xf7\x08\x1e\x2a\x3e\
\x5d\xe5\x58\xaa\xf1\x47\x5a\xf5\xb6\x59\x0b\x11\x1d\xb3\x43\xc9\
\x91\x38\x09\x39\xf9\xa9\x96\x21\xfa\x5c\x1a\x0d\xcf\xb3\xff\xff\
\x37\xfc\x4f\x13\xf8\x1d\xe7\x87\x19\xb9\x44\xc3\x01\xcf\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x05\x3a\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xd6\xd8\xd4\x4f\x58\x32\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x04\xcc\x49\x44\x41\x54\x58\xc3\xb5\
\x97\x5d\x4c\x5b\x65\x1c\xc6\x77\x6f\xbc\xd9\xe5\x12\x49\x20\x71\
\xd7\x26\xe3\x4e\x13\xb8\x70\xd1\x85\x44\xbd\x50\xe3\x10\x18\xe5\
\x2b\x2e\x26\x4a\x04\x27\x86\xaa\x8b\x99\xe0\xd0\xa2\x6c\x19\x86\
\x39\x17\xdc\x1a\x16\x98\x80\x40\x6c\xa6\x43\xca\x20\x2b\x83\x1e\
\x28\xcc\xda\xd1\x96\xd2\xd2\x4a\x7b\xfa\x01\xa5\xd0\xef\x16\x1e\
\xdf\xff\xdb\x1d\xc7\xcc\x04\x2a\x87\x93\x3c\x39\x6f\x21\x9c\xe7\
\xf7\x3c\xef\x47\x0f\x87\x00\x1c\xca\x46\xcf\xbd\xfa\xe9\xbb\x4c\
\x5a\x26\x61\x0f\x6a\x60\xca\xd9\xe9\x79\xd9\x9a\x3f\x5d\x50\xf2\
\xa5\xc1\xe9\x8f\xa7\x57\xc3\x40\x30\x02\x84\xa2\x19\xad\xc7\x32\
\x8a\x27\x81\x58\x22\x73\xbf\x79\x6b\xda\x4b\x10\x72\x02\x1c\x7b\
\xe7\xac\xda\x1c\xd8\xc8\x98\x12\x40\x84\x99\x85\xe3\x19\x91\x31\
\x29\x1a\x4b\x61\x25\x94\x44\x38\x9a\x42\x73\x87\xc6\xbe\x13\xc4\
\xff\x02\x90\x12\x93\x79\x24\xf1\xc8\x58\x92\xcf\x1f\x84\x5d\x8c\
\xc2\xe5\x09\x22\x12\x4b\xa3\xf4\xc3\xef\x4d\x34\x75\x59\x01\xb0\
\xeb\xd8\x36\xd5\x90\x9e\x3a\xfc\xcc\xb9\xe7\x5f\x2e\x11\x3f\x56\
\x9e\x45\x45\x55\x0d\x2a\x99\xde\xaf\xad\xc3\x9d\xb1\x89\xc7\x00\
\xac\xb6\x25\xfc\xb9\xe8\x87\x6b\x15\x58\xf6\x04\x10\x08\xc6\xd2\
\xaf\x9c\xbe\x70\x9f\x41\x1c\xd9\x15\x80\x5d\x87\x99\x1a\x8a\x8a\
\x8a\xcc\x92\x5a\x5b\x5b\xdd\xa4\xaf\x55\xad\xfe\xaf\x54\xdf\xa6\
\x06\x06\x06\x31\x39\x35\x85\xd9\xb9\x39\xe8\x26\x26\x50\x50\x50\
\x80\x21\xcd\x6f\x7c\xde\x49\xa6\xf9\x05\xcc\x98\x5c\x1c\xc0\xe1\
\x4f\x41\xf4\x85\xf0\x43\xaf\xce\xcd\x00\x6a\xf6\x02\x50\x43\x66\
\xd8\xe5\x8a\xc7\xe3\xf0\x7a\xbd\x48\xa7\xd3\x98\x9c\x9c\x44\x65\
\x65\x35\x66\x67\x8d\xbc\x81\x07\x66\x1b\x74\xd3\x16\x0e\x40\x32\
\x2d\x78\xf0\xdd\x8d\x51\x8f\xac\x00\xe1\x70\x18\x46\xa3\x91\x8f\
\x53\xa9\x14\x7e\xea\xed\x45\xe3\x27\x9f\x61\x86\x41\x38\x96\xdc\
\x50\x77\x75\xe3\x4c\x43\x23\xce\x35\x9d\xc7\xed\x91\x71\x5c\xbc\
\x3e\x2c\x2f\xc0\xc6\xc6\x06\xf4\x7a\xfd\x63\x40\x7d\x7d\xfd\x50\
\x32\x88\xd0\x46\x1c\x66\x9b\x0b\x82\xc1\x88\xa9\x19\x13\xac\x0e\
\x11\x97\xba\x64\x6e\x80\x00\xa6\xd8\x3a\xd8\x7e\x45\x22\x11\x94\
\x2b\x2a\x30\xae\x13\x40\xe7\x04\x6d\x57\xda\xaa\x34\xbe\x7c\x53\
\xe6\x35\x40\x66\x3a\x9d\x0e\xc3\xc3\xc3\xe8\x65\xf5\xf7\xf7\xf7\
\x43\xab\xd5\xa2\xaa\xba\x06\x63\x77\xf5\x90\x0e\x2a\x77\x90\xed\
\x04\xb6\x0e\xda\xbb\x65\x06\xa0\x79\xb7\xdb\xed\x18\x1a\x1a\x42\
\x67\x67\x27\x7a\x7a\x7a\x38\x50\x49\x69\x19\x6e\x69\xf5\x10\xd7\
\x00\x6f\x08\xb0\xf9\x00\x67\x00\xb8\xd0\x25\x33\xc0\xd6\xd6\x16\
\xdf\x09\x81\x40\x00\xa2\x28\xc2\xef\xf7\x63\x6d\x6d\x0d\xa7\x14\
\x95\xd0\xfc\xae\xe7\xa9\xc9\x7c\xc1\x0b\x98\x3d\x40\x9b\xdc\x00\
\xdb\x41\x36\x37\x37\xf9\x76\xa4\x56\x14\x15\xd5\xe8\xfb\x55\xe0\
\xa9\x1d\x81\x47\x00\xe7\x3b\x0f\x00\x80\xcc\x25\x80\x24\x33\x4f\
\x24\x12\x28\x2b\xaf\xe2\x00\x7f\xb8\x00\x8b\x98\x01\xa0\x36\x5a\
\xd5\x07\x30\x05\xff\x98\x27\x93\x3c\x3d\x4d\x49\xc9\xa9\x4a\x0e\
\xa0\xb7\xb3\x03\x89\x3d\xc5\xf8\x17\x30\xb1\x00\x7c\x71\xf5\x00\
\x00\xa4\xea\xc9\x98\x14\x8b\xc5\x50\xa6\xa8\x82\x7a\x48\xc0\x98\
\x19\xb8\x6b\x05\xe6\x9c\x99\xfb\xe7\x57\x64\x04\x90\xd2\x53\x6a\
\x02\x88\x46\xa3\xdc\x3c\x14\x0a\xa1\xb8\xb4\x02\xd7\x06\x05\xdc\
\x66\x87\xe4\xa0\x01\x1c\x64\xc4\x04\x28\x3b\x64\x06\x48\x3d\x9c\
\x73\x12\x99\xd3\xb9\x40\x20\xc5\x65\x55\xb8\xd8\x2d\xa0\x7f\x3a\
\x63\xae\x7d\x90\x69\xe0\xa3\x76\x99\x00\xfe\x5d\x3d\xa5\x26\xad\
\xae\xae\x72\x88\xb7\x4a\x2a\x70\xb9\x57\xc0\x3d\x1b\xb8\x7e\x9e\
\x01\xee\xcc\x03\x67\x2e\xed\x13\x40\xaa\x9d\x44\x8b\x8e\x92\xd3\
\x71\x4c\xdf\x01\x2b\x2b\x2b\x58\x5f\x5f\xe7\x10\x27\x59\x03\xdf\
\x74\x09\x50\x4f\x00\xbf\xcc\x65\x1a\xb8\x32\x06\x34\xec\xa7\x01\
\xc9\x58\xda\xeb\x64\x4e\x69\x29\x39\x1d\x44\x04\x40\xf5\xd3\xcf\
\xde\x7c\x5b\x81\x96\xeb\x02\x4f\x7e\x75\x1c\xb8\x71\x0f\xf8\x71\
\x2c\x9e\x7e\xbd\x4e\x6d\xa6\x37\xaa\xac\x00\x9e\x64\x2c\x6d\x37\
\x32\x25\x00\xd1\x23\xf2\xe4\x12\xcc\x1b\x27\x15\x68\xef\x11\xa0\
\xbc\x66\x5b\x7f\x4f\x35\xe2\x3c\x71\x9a\xbf\x8e\x69\xf7\xfc\x4a\
\x26\x01\x90\xa9\x24\x69\xb5\x53\x42\x32\x0f\x06\x83\x70\xb9\x5c\
\xdc\x90\x5e\x4a\xe8\xb3\xc7\xe3\x81\xdb\xed\xc6\xf1\x13\xaf\x25\
\x9f\x7d\xa1\x9c\x4c\x3b\x98\x8a\x99\x8e\x3e\xc9\x78\x47\x00\x95\
\x4a\xc5\x01\xa4\x15\x2e\xcd\x37\x19\x52\x52\x3a\xf7\x29\xb5\xc3\
\xe1\xe0\x22\xe3\xc5\xc5\x45\x0e\xf5\xe2\xf1\x97\x5c\xf4\x1e\xb9\
\x93\xe9\xae\x00\x2d\x2d\x2d\x6e\xe9\x60\xa1\xd4\xd2\x97\x0d\x8d\
\x97\x97\x97\xe1\xf3\xf9\x60\xb3\xd9\xf8\x7d\x69\x69\x89\x43\x10\
\x00\x8d\x0b\x0b\x0b\xcd\xb2\x00\xd0\xa2\x92\x52\x93\x11\x8d\xe9\
\x4e\xdf\x78\x54\x3b\x35\x60\xb5\x5a\x79\xf5\xd4\x0a\xfd\xce\x60\
\x30\x24\xf2\xf2\xf2\xee\xb3\x67\x1c\xd9\x17\x40\x53\x53\x93\x5b\
\x9a\x67\x4a\x4f\x22\x13\xaa\x9a\xc6\x16\x8b\x99\x37\x40\x9f\x47\
\x47\x47\x23\x6d\x6d\x6d\xde\xfc\xfc\x7c\x13\xfb\xdb\x41\xa6\xb2\
\xbd\x9a\xff\x27\x40\x73\x73\x33\x9f\x02\x4a\x47\x10\x54\x3f\x55\
\x3f\x3f\x3f\xcf\xeb\xd6\x68\x34\x91\xba\xba\x3a\xe7\xc3\xb4\x5d\
\x4c\x1f\x30\x1d\xcd\xc6\x78\x47\x00\xa5\x52\xe9\x76\x3a\x9d\xbc\
\x62\x4a\x4a\x6f\x3e\x94\xb4\xbe\xbe\xde\x99\x93\x93\x23\x99\x16\
\x67\x53\x75\x56\x00\x8d\x8d\x8d\x6e\x8b\xc5\x82\x81\x81\x81\x48\
\x6d\x6d\xad\x33\x37\x37\x57\x56\xd3\xdd\x00\xf8\x7f\x46\x4c\xc2\
\x41\x99\x6e\xd7\xdf\x43\x39\x56\x18\x85\x70\xc8\x04\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x06\
\x07\x03\x7d\xc3\
\x00\x69\
\x00\x6d\x00\x61\x00\x67\x00\x65\x00\x73\
\x00\x07\
\x04\xca\x57\xa7\
\x00\x6e\
\x00\x65\x00\x77\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x0a\xa8\xba\x47\
\x00\x70\
\x00\x61\x00\x73\x00\x74\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x08\xc8\x58\x67\
\x00\x73\
\x00\x61\x00\x76\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x06\xc1\x59\x87\
\x00\x6f\
\x00\x70\x00\x65\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x07\
\x0a\xc7\x57\x87\
\x00\x63\
\x00\x75\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x06\x7c\x5a\x07\
\x00\x63\
\x00\x6f\x00\x70\x00\x79\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x06\x00\x00\x00\x02\
\x00\x00\x00\x12\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x7e\x00\x00\x00\x00\x00\x01\x00\x00\x1b\xbc\
\x00\x00\x00\x54\x00\x00\x00\x00\x00\x01\x00\x00\x0e\x70\
\x00\x00\x00\x3e\x00\x00\x00\x00\x00\x01\x00\x00\x09\xc9\
\x00\x00\x00\x26\x00\x00\x00\x00\x00\x01\x00\x00\x03\x58\
\x00\x00\x00\x6a\x00\x00\x00\x00\x00\x01\x00\x00\x16\x8d\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
dvitme/odoo-addons
|
portal_partner_fix/__openerp__.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal Partner Fix',
'version': '8.0.1.0.0',
'category': '',
'sequence': 14,
'summary': '',
'description': """
Portal Partner Fix
==================
Let user read his commercial partner
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'portal',
],
'data': [
'security/portal_security.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/boto/exception.py
|
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Exception classes - Subclassing allows you to check for specific errors
"""
import base64
import xml.sax
import boto
from boto import handler
from boto.compat import json, six, StandardError
from boto.resultset import ResultSet
class BotoClientError(StandardError):
"""
General Boto Client error (error accessing AWS)
"""
def __init__(self, reason, *args):
super(BotoClientError, self).__init__(reason, *args)
self.reason = reason
def __repr__(self):
return 'BotoClientError: %s' % self.reason
def __str__(self):
return 'BotoClientError: %s' % self.reason
class SDBPersistenceError(StandardError):
pass
class StoragePermissionsError(BotoClientError):
"""
Permissions error when accessing a bucket or key on a storage service.
"""
pass
class S3PermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on S3.
"""
pass
class GSPermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on GS.
"""
pass
class BotoServerError(StandardError):
def __init__(self, status, reason, body=None, *args):
super(BotoServerError, self).__init__(status, reason, body, *args)
self.status = status
self.reason = reason
self.body = body or ''
self.request_id = None
self.error_code = None
self._error_message = None
self.message = ''
self.box_usage = None
if isinstance(self.body, bytes):
try:
self.body = self.body.decode('utf-8')
except UnicodeDecodeError:
boto.log.debug('Unable to decode body from bytes!')
# Attempt to parse the error response. If body isn't present,
# then just ignore the error response.
if self.body:
# Check if it looks like a ``dict``.
if hasattr(self.body, 'items'):
# It's not a string, so trying to parse it will fail.
# But since it's data, we can work with that.
self.request_id = self.body.get('RequestId', None)
if 'Error' in self.body:
# XML-style
error = self.body.get('Error', {})
self.error_code = error.get('Code', None)
self.message = error.get('Message', None)
else:
# JSON-style.
self.message = self.body.get('message', None)
else:
try:
h = handler.XmlHandlerWrapper(self, self)
h.parseString(self.body)
except (TypeError, xml.sax.SAXParseException) as pe:
# What if it's JSON? Let's try that.
try:
parsed = json.loads(self.body)
if 'RequestId' in parsed:
self.request_id = parsed['RequestId']
if 'Error' in parsed:
if 'Code' in parsed['Error']:
self.error_code = parsed['Error']['Code']
if 'Message' in parsed['Error']:
self.message = parsed['Error']['Message']
except (TypeError, ValueError):
# Remove unparsable message body so we don't include garbage
# in exception. But first, save self.body in self.error_message
# because occasionally we get error messages from Eucalyptus
# that are just text strings that we want to preserve.
self.message = self.body
self.body = None
def __getattr__(self, name):
if name == 'error_message':
return self.message
if name == 'code':
return self.error_code
raise AttributeError
def __setattr__(self, name, value):
if name == 'error_message':
self.message = value
else:
super(BotoServerError, self).__setattr__(name, value)
def __repr__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def __str__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name in ('RequestId', 'RequestID'):
self.request_id = value
elif name == 'Code':
self.error_code = value
elif name == 'Message':
self.message = value
elif name == 'BoxUsage':
self.box_usage = value
return None
def _cleanupParsedProperties(self):
self.request_id = None
self.error_code = None
self.message = None
self.box_usage = None
class ConsoleOutput(object):
def __init__(self, parent=None):
self.parent = parent
self.instance_id = None
self.timestamp = None
self.comment = None
self.output = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'instanceId':
self.instance_id = value
elif name == 'output':
self.output = base64.b64decode(value)
else:
setattr(self, name, value)
class StorageCreateError(BotoServerError):
"""
Error creating a bucket or key on a storage service.
"""
def __init__(self, status, reason, body=None):
self.bucket = None
super(StorageCreateError, self).__init__(status, reason, body)
def endElement(self, name, value, connection):
if name == 'BucketName':
self.bucket = value
else:
return super(StorageCreateError, self).endElement(name, value, connection)
class S3CreateError(StorageCreateError):
"""
Error creating a bucket or key on S3.
"""
pass
class GSCreateError(StorageCreateError):
"""
Error creating a bucket or key on GS.
"""
pass
class StorageCopyError(BotoServerError):
"""
Error copying a key on a storage service.
"""
pass
class S3CopyError(StorageCopyError):
"""
Error copying a key on S3.
"""
pass
class GSCopyError(StorageCopyError):
"""
Error copying a key on GS.
"""
pass
class SQSError(BotoServerError):
"""
General Error on Simple Queue Service.
"""
def __init__(self, status, reason, body=None):
self.detail = None
self.type = None
super(SQSError, self).__init__(status, reason, body)
def startElement(self, name, attrs, connection):
return super(SQSError, self).startElement(name, attrs, connection)
def endElement(self, name, value, connection):
if name == 'Detail':
self.detail = value
elif name == 'Type':
self.type = value
else:
return super(SQSError, self).endElement(name, value, connection)
def _cleanupParsedProperties(self):
super(SQSError, self)._cleanupParsedProperties()
for p in ('detail', 'type'):
setattr(self, p, None)
class SQSDecodeError(BotoClientError):
"""
Error when decoding an SQS message.
"""
def __init__(self, reason, message):
super(SQSDecodeError, self).__init__(reason, message)
self.message = message
def __repr__(self):
return 'SQSDecodeError: %s' % self.reason
def __str__(self):
return 'SQSDecodeError: %s' % self.reason
class StorageResponseError(BotoServerError):
"""
Error in response from a storage service.
"""
def __init__(self, status, reason, body=None):
self.resource = None
super(StorageResponseError, self).__init__(status, reason, body)
def startElement(self, name, attrs, connection):
return super(StorageResponseError, self).startElement(name, attrs,
connection)
def endElement(self, name, value, connection):
if name == 'Resource':
self.resource = value
else:
return super(StorageResponseError, self).endElement(name, value,
connection)
def _cleanupParsedProperties(self):
super(StorageResponseError, self)._cleanupParsedProperties()
for p in ('resource'):
setattr(self, p, None)
class S3ResponseError(StorageResponseError):
"""
Error in response from S3.
"""
pass
class GSResponseError(StorageResponseError):
"""
Error in response from GS.
"""
pass
class EC2ResponseError(BotoServerError):
"""
Error in response from EC2.
"""
def __init__(self, status, reason, body=None):
self.errors = None
self._errorResultSet = []
super(EC2ResponseError, self).__init__(status, reason, body)
self.errors = [ (e.error_code, e.error_message) \
for e in self._errorResultSet ]
if len(self.errors):
self.error_code, self.error_message = self.errors[0]
def startElement(self, name, attrs, connection):
if name == 'Errors':
self._errorResultSet = ResultSet([('Error', _EC2Error)])
return self._errorResultSet
else:
return None
def endElement(self, name, value, connection):
if name == 'RequestID':
self.request_id = value
else:
return None # don't call subclass here
def _cleanupParsedProperties(self):
super(EC2ResponseError, self)._cleanupParsedProperties()
self._errorResultSet = []
for p in ('errors'):
setattr(self, p, None)
class JSONResponseError(BotoServerError):
"""
This exception expects the fully parsed and decoded JSON response
body to be passed as the body parameter.
:ivar status: The HTTP status code.
:ivar reason: The HTTP reason message.
:ivar body: The Python dict that represents the decoded JSON
response body.
:ivar error_message: The full description of the AWS error encountered.
:ivar error_code: A short string that identifies the AWS error
(e.g. ConditionalCheckFailedException)
"""
def __init__(self, status, reason, body=None, *args):
self.status = status
self.reason = reason
self.body = body
if self.body:
self.error_message = self.body.get('message', None)
self.error_code = self.body.get('__type', None)
if self.error_code:
self.error_code = self.error_code.split('#')[-1]
class DynamoDBResponseError(JSONResponseError):
pass
class SWFResponseError(JSONResponseError):
pass
class EmrResponseError(BotoServerError):
"""
Error in response from EMR
"""
pass
class _EC2Error(object):
def __init__(self, connection=None):
self.connection = connection
self.error_code = None
self.error_message = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Code':
self.error_code = value
elif name == 'Message':
self.error_message = value
else:
return None
class SDBResponseError(BotoServerError):
"""
Error in responses from SDB.
"""
pass
class AWSConnectionError(BotoClientError):
"""
General error connecting to Amazon Web Services.
"""
pass
class StorageDataError(BotoClientError):
"""
Error receiving data from a storage service.
"""
pass
class S3DataError(StorageDataError):
"""
Error receiving data from S3.
"""
pass
class GSDataError(StorageDataError):
"""
Error receiving data from GS.
"""
pass
class InvalidUriError(Exception):
"""Exception raised when URI is invalid."""
def __init__(self, message):
super(InvalidUriError, self).__init__(message)
self.message = message
class InvalidAclError(Exception):
"""Exception raised when ACL XML is invalid."""
def __init__(self, message):
super(InvalidAclError, self).__init__(message)
self.message = message
class InvalidCorsError(Exception):
"""Exception raised when CORS XML is invalid."""
def __init__(self, message):
super(InvalidCorsError, self).__init__(message)
self.message = message
class NoAuthHandlerFound(Exception):
"""Is raised when no auth handlers were found ready to authenticate."""
pass
class InvalidLifecycleConfigError(Exception):
"""Exception raised when GCS lifecycle configuration XML is invalid."""
def __init__(self, message):
super(InvalidLifecycleConfigError, self).__init__(message)
self.message = message
# Enum class for resumable upload failure disposition.
class ResumableTransferDisposition(object):
# START_OVER means an attempt to resume an existing transfer failed,
# and a new resumable upload should be attempted (without delay).
START_OVER = 'START_OVER'
# WAIT_BEFORE_RETRY means the resumable transfer failed but that it can
# be retried after a time delay within the current process.
WAIT_BEFORE_RETRY = 'WAIT_BEFORE_RETRY'
# ABORT_CUR_PROCESS means the resumable transfer failed and that
# delaying/retrying within the current process will not help. If
# resumable transfer included a state tracker file the upload can be
# retried again later, in another process (e.g., a later run of gsutil).
ABORT_CUR_PROCESS = 'ABORT_CUR_PROCESS'
# ABORT means the resumable transfer failed in a way that it does not
# make sense to continue in the current process, and further that the
# current tracker ID should not be preserved (in a tracker file if one
# was specified at resumable upload start time). If the user tries again
# later (e.g., a separate run of gsutil) it will get a new resumable
# upload ID.
ABORT = 'ABORT'
class ResumableUploadException(Exception):
"""
Exception raised for various resumable upload problems.
self.disposition is of type ResumableTransferDisposition.
"""
def __init__(self, message, disposition):
super(ResumableUploadException, self).__init__(message, disposition)
self.message = message
self.disposition = disposition
def __repr__(self):
return 'ResumableUploadException("%s", %s)' % (
self.message, self.disposition)
class ResumableDownloadException(Exception):
"""
Exception raised for various resumable download problems.
self.disposition is of type ResumableTransferDisposition.
"""
def __init__(self, message, disposition):
super(ResumableDownloadException, self).__init__(message, disposition)
self.message = message
self.disposition = disposition
def __repr__(self):
return 'ResumableDownloadException("%s", %s)' % (
self.message, self.disposition)
class TooManyRecordsException(Exception):
"""
Exception raised when a search of Route53 records returns more
records than requested.
"""
def __init__(self, message):
super(TooManyRecordsException, self).__init__(message)
self.message = message
class PleaseRetryException(Exception):
"""
Indicates a request should be retried.
"""
def __init__(self, message, response=None):
self.message = message
self.response = response
def __repr__(self):
return 'PleaseRetryException("%s", %s)' % (
self.message,
self.response
)
|
abstract-open-solutions/l10n-italy
|
l10n_it_ricevute_bancarie/models/account_config.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 Andrea Cometa.
# Email: info@andreacometa.it
# Web site: http://www.andreacometa.it
# Copyright (C) 2012 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2012 Domsense srl (<http://www.domsense.com>)
# Copyright (C) 2012 Associazione OpenERP Italia
# (<http://www.odoo-italia.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class AccountConfigSettings(models.TransientModel):
_inherit = 'account.config.settings'
due_cost_service_id = fields.Many2one(
related='company_id.due_cost_service_id',
help='Default Service for RiBa Due Cost (collection fees) on invoice',
domain=[('type', '=', 'service')])
def default_get(self, cr, uid, fields, context=None):
res = super(AccountConfigSettings, self).default_get(
cr, uid, fields, context)
if res:
user = self.pool['res.users'].browse(cr, uid, uid, context)
res['due_cost_service_id'] = user.company_id.due_cost_service_id.id
return res
class ResCompany(models.Model):
_inherit = 'res.company'
due_cost_service_id = fields.Many2one('product.product')
|
rven/odoo
|
addons/microsoft_calendar/models/res_users.py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import requests
from odoo.addons.microsoft_calendar.models.microsoft_sync import microsoft_calendar_token
from datetime import timedelta
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.loglevels import exception_to_unicode
from odoo.addons.microsoft_account.models.microsoft_service import MICROSOFT_TOKEN_ENDPOINT
from odoo.addons.microsoft_calendar.utils.microsoft_calendar import MicrosoftCalendarService, InvalidSyncToken
_logger = logging.getLogger(__name__)
class User(models.Model):
_inherit = 'res.users'
microsoft_calendar_sync_token = fields.Char('Microsoft Next Sync Token', copy=False)
def _microsoft_calendar_authenticated(self):
return bool(self.sudo().microsoft_calendar_rtoken)
def _get_microsoft_calendar_token(self):
self.ensure_one()
if self._is_microsoft_calendar_valid():
self._refresh_microsoft_calendar_token()
return self.microsoft_calendar_token
def _is_microsoft_calendar_valid(self):
return self.microsoft_calendar_token_validity and self.microsoft_calendar_token_validity < (fields.Datetime.now() + timedelta(minutes=1))
def _refresh_microsoft_calendar_token(self):
self.ensure_one()
get_param = self.env['ir.config_parameter'].sudo().get_param
client_id = get_param('microsoft_calendar_client_id')
client_secret = get_param('microsoft_calendar_client_secret')
if not client_id or not client_secret:
raise UserError(_("The account for the Outlook Calendar service is not configured."))
headers = {"content-type": "application/x-www-form-urlencoded"}
data = {
'refresh_token': self.microsoft_calendar_rtoken,
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'refresh_token',
}
try:
dummy, response, dummy = self.env['microsoft.service']._do_request(MICROSOFT_TOKEN_ENDPOINT, params=data, headers=headers, method='POST', preuri='')
ttl = response.get('expires_in')
self.write({
'microsoft_calendar_token': response.get('access_token'),
'microsoft_calendar_token_validity': fields.Datetime.now() + timedelta(seconds=ttl),
})
except requests.HTTPError as error:
if error.response.status_code == 400: # invalid grant
# Delete refresh token and make sure it's commited
with self.pool.cursor() as cr:
self.env.user.with_env(self.env(cr=cr)).write({'microsoft_calendar_rtoken': False})
error_key = error.response.json().get("error", "nc")
error_msg = _("Something went wrong during your token generation. Maybe your Authorization Code is invalid or already expired [%s]", error_key)
raise UserError(error_msg)
def _sync_microsoft_calendar(self, calendar_service: MicrosoftCalendarService):
self.ensure_one()
full_sync = not bool(self.microsoft_calendar_sync_token)
with microsoft_calendar_token(self) as token:
try:
events, next_sync_token, default_reminders = calendar_service.get_events(self.microsoft_calendar_sync_token, token=token)
except InvalidSyncToken:
events, next_sync_token, default_reminders = calendar_service.get_events(token=token)
full_sync = True
self.microsoft_calendar_sync_token = next_sync_token
# Microsoft -> Odoo
recurrences = events.filter(lambda e: e.is_recurrent())
synced_events, synced_recurrences = self.env['calendar.event']._sync_microsoft2odoo(events, default_reminders=default_reminders) if events else (self.env['calendar.event'], self.env['calendar.recurrence'])
# Odoo -> Microsoft
recurrences = self.env['calendar.recurrence']._get_microsoft_records_to_sync(full_sync=full_sync)
recurrences -= synced_recurrences
recurrences._sync_odoo2microsoft(calendar_service)
synced_events |= recurrences.calendar_event_ids
events = self.env['calendar.event']._get_microsoft_records_to_sync(full_sync=full_sync)
(events - synced_events)._sync_odoo2microsoft(calendar_service)
return bool(events | synced_events) or bool(recurrences | synced_recurrences)
@api.model
def _sync_all_microsoft_calendar(self):
""" Cron job """
users = self.env['res.users'].search([('microsoft_calendar_rtoken', '!=', False)])
microsoft = MicrosoftCalendarService(self.env['microsoft.service'])
for user in users:
_logger.info("Calendar Synchro - Starting synchronization for %s", user)
try:
user.with_user(user).sudo()._sync_microsoft_calendar(microsoft)
except Exception as e:
_logger.exception("[%s] Calendar Synchro - Exception : %s !", user, exception_to_unicode(e))
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/sympy/utilities/tests/test_codegen.py
|
from StringIO import StringIO
from sympy.core import symbols, Eq, pi, Catalan, Lambda, Dummy
from sympy.utilities.codegen import CCodeGen, Routine, InputArgument, Result, \
CodeGenError, FCodeGen, codegen, CodeGenArgumentListError, OutputArgument, \
InOutArgument
from sympy.utilities.pytest import XFAIL, raises
from sympy.utilities.lambdify import implemented_function
# import test:
#FIXME: Fails due to circular import in with core
# from sympy import codegen
#FIXME-py3k: Many AssertionErrors here, perhaps related to unicode;
#FIXME-py3k: some are just due to an extra space at the end of the string
def get_string(dump_fn, routines, prefix="file", header=False, empty=False):
"""Wrapper for dump_fn. dump_fn writes its results to a stream object and
this wrapper returns the contents of that stream as a string. This
auxiliary function is used by many tests below.
The header and the empty lines are not generator to facilitate the
testing of the output.
"""
output = StringIO()
dump_fn(routines, output, prefix, header, empty)
source = output.getvalue()
output.close()
return source
def test_Routine_argument_order():
a, x, y, z = symbols('a x y z')
expr = (x+y)*z
raises(CodeGenArgumentListError, 'Routine("test", expr, argument_sequence=[z, x])')
raises(CodeGenArgumentListError, 'Routine("test", Eq(a, expr), argument_sequence=[z, x, y])')
r = Routine('test', Eq(a, expr), argument_sequence=[z, x, a, y])
assert [ arg.name for arg in r.arguments ] == [z, x, a, y]
assert [ type(arg) for arg in r.arguments ] == [
InputArgument, InputArgument, OutputArgument, InputArgument ]
r = Routine('test', Eq(z, expr), argument_sequence=[z, x, y])
assert [ type(arg) for arg in r.arguments ] == [
InOutArgument, InputArgument, InputArgument ]
from sympy.tensor import IndexedBase, Idx
A, B = map(IndexedBase, ['A', 'B'])
m = symbols('m', integer=True)
i = Idx('i', m)
r = Routine('test', Eq(A[i], B[i]), argument_sequence=[B, A, m])
assert [ arg.name for arg in r.arguments ] == [B.label, A.label, m]
def test_empty_c_code():
code_gen = CCodeGen()
source = get_string(code_gen.dump_c, [])
assert source == "#include \"file.h\"\n#include <math.h>\n"
def test_empty_c_code_with_comment():
code_gen = CCodeGen()
source = get_string(code_gen.dump_c, [], header=True)
assert source[:82] == (
"/******************************************************************************\n *"
)
# " Code generated with sympy 0.7.1 "
assert source[158:] == ( "*\n"
" * *\n"
" * See http://www.sympy.org/ for more information. *\n"
" * *\n"
" * This file is part of 'project' *\n"
" ******************************************************************************/\n"
"#include \"file.h\"\n"
"#include <math.h>\n"
)
def test_empty_c_header():
code_gen = CCodeGen()
source = get_string(code_gen.dump_h, [])
assert source == "#ifndef PROJECT__FILE__H\n#define PROJECT__FILE__H\n#endif\n"
def test_simple_c_code():
x,y,z = symbols('x,y,z')
expr = (x+y)*z
routine = Routine("test", expr)
code_gen = CCodeGen()
source = get_string(code_gen.dump_c, [routine])
expected = (
"#include \"file.h\"\n"
"#include <math.h>\n"
"double test(double x, double y, double z) {\n"
" return z*(x + y);\n"
"}\n"
)
assert source == expected
def test_numbersymbol_c_code():
routine = Routine("test", pi**Catalan)
code_gen = CCodeGen()
source = get_string(code_gen.dump_c, [routine])
expected = (
"#include \"file.h\"\n"
"#include <math.h>\n"
"double test() {\n"
" double const Catalan = 0.915965594177219;\n"
" return pow(M_PI, Catalan);\n"
"}\n"
)
assert source == expected
def test_c_code_argument_order():
x,y,z = symbols('x,y,z')
expr = x + y
routine = Routine("test", expr, argument_sequence=[z, x, y])
code_gen = CCodeGen()
source = get_string(code_gen.dump_c, [routine])
expected = (
"#include \"file.h\"\n"
"#include <math.h>\n"
"double test(double z, double x, double y) {\n"
" return x + y;\n"
"}\n"
)
assert source == expected
def test_simple_c_header():
x,y,z = symbols('x,y,z')
expr = (x+y)*z
routine = Routine("test", expr)
code_gen = CCodeGen()
source = get_string(code_gen.dump_h, [routine])
expected = (
"#ifndef PROJECT__FILE__H\n"
"#define PROJECT__FILE__H\n"
"double test(double x, double y, double z);\n"
"#endif\n"
)
assert source == expected
def test_simple_c_codegen():
x,y,z = symbols('x,y,z')
expr = (x+y)*z
result = codegen(("test", (x+y)*z), "C", "file", header=False, empty=False)
expected = [
("file.c",
"#include \"file.h\"\n"
"#include <math.h>\n"
"double test(double x, double y, double z) {\n"
" return z*(x + y);\n"
"}\n"),
("file.h",
"#ifndef PROJECT__FILE__H\n"
"#define PROJECT__FILE__H\n"
"double test(double x, double y, double z);\n"
"#endif\n")
]
assert result == expected
def test_multiple_results_c():
x,y,z = symbols('x,y,z')
expr1 = (x+y)*z
expr2 = (x-y)*z
routine = Routine(
"test",
[expr1,expr2]
)
code_gen = CCodeGen()
raises(CodeGenError, 'get_string(code_gen.dump_h, [routine])')
def test_no_results_c():
raises(ValueError, 'Routine("test", [])')
def test_ansi_math1_codegen():
# not included: log10
from sympy import (acos, asin, atan, ceiling, cos, cosh, floor, log, ln,
sin, sinh, sqrt, tan, tanh, N, Abs)
x = symbols('x')
name_expr = [
("test_fabs", Abs(x)),
("test_acos", acos(x)),
("test_asin", asin(x)),
("test_atan", atan(x)),
("test_ceil", ceiling(x)),
("test_cos", cos(x)),
("test_cosh", cosh(x)),
("test_floor", floor(x)),
("test_log", log(x)),
("test_ln", ln(x)),
("test_sin", sin(x)),
("test_sinh", sinh(x)),
("test_sqrt", sqrt(x)),
("test_tan", tan(x)),
("test_tanh", tanh(x)),
]
result = codegen(name_expr, "C", "file", header=False, empty=False)
assert result[0][0] == "file.c"
assert result[0][1] == (
'#include "file.h"\n#include <math.h>\n'
'double test_fabs(double x) {\n return fabs(x);\n}\n'
'double test_acos(double x) {\n return acos(x);\n}\n'
'double test_asin(double x) {\n return asin(x);\n}\n'
'double test_atan(double x) {\n return atan(x);\n}\n'
'double test_ceil(double x) {\n return ceil(x);\n}\n'
'double test_cos(double x) {\n return cos(x);\n}\n'
'double test_cosh(double x) {\n return cosh(x);\n}\n'
'double test_floor(double x) {\n return floor(x);\n}\n'
'double test_log(double x) {\n return log(x);\n}\n'
'double test_ln(double x) {\n return log(x);\n}\n'
'double test_sin(double x) {\n return sin(x);\n}\n'
'double test_sinh(double x) {\n return sinh(x);\n}\n'
'double test_sqrt(double x) {\n return sqrt(x);\n}\n'
'double test_tan(double x) {\n return tan(x);\n}\n'
'double test_tanh(double x) {\n return tanh(x);\n}\n'
)
assert result[1][0] == "file.h"
assert result[1][1] == (
'#ifndef PROJECT__FILE__H\n#define PROJECT__FILE__H\n'
'double test_fabs(double x);\ndouble test_acos(double x);\n'
'double test_asin(double x);\ndouble test_atan(double x);\n'
'double test_ceil(double x);\ndouble test_cos(double x);\n'
'double test_cosh(double x);\ndouble test_floor(double x);\n'
'double test_log(double x);\ndouble test_ln(double x);\n'
'double test_sin(double x);\ndouble test_sinh(double x);\n'
'double test_sqrt(double x);\ndouble test_tan(double x);\n'
'double test_tanh(double x);\n#endif\n'
)
def test_ansi_math2_codegen():
# not included: frexp, ldexp, modf, fmod
from sympy import atan2, N
x, y = symbols('x,y')
name_expr = [
("test_atan2", atan2(x,y)),
("test_pow", x**y),
]
result = codegen(name_expr, "C", "file", header=False, empty=False)
assert result[0][0] == "file.c"
assert result[0][1] == (
'#include "file.h"\n#include <math.h>\n'
'double test_atan2(double x, double y) {\n return atan2(x, y);\n}\n'
'double test_pow(double x, double y) {\n return pow(x, y);\n}\n'
)
assert result[1][0] == "file.h"
assert result[1][1] == (
'#ifndef PROJECT__FILE__H\n#define PROJECT__FILE__H\n'
'double test_atan2(double x, double y);\n'
'double test_pow(double x, double y);\n'
'#endif\n'
)
def test_complicated_codegen():
from sympy import sin, cos, tan, N
x,y,z = symbols('x,y,z')
name_expr = [
("test1", ((sin(x)+cos(y)+tan(z))**7).expand()),
("test2", cos(cos(cos(cos(cos(cos(cos(cos(x+y+z))))))))),
]
result = codegen(name_expr, "C", "file", header=False, empty=False)
assert result[0][0] == "file.c"
assert result[0][1] == (
'#include "file.h"\n#include <math.h>\n'
'double test1(double x, double y, double z) {\n'
' return '
'pow(sin(x), 7) + '
'7*pow(sin(x), 6)*cos(y) + '
'7*pow(sin(x), 6)*tan(z) + '
'21*pow(sin(x), 5)*pow(cos(y), 2) + '
'42*pow(sin(x), 5)*cos(y)*tan(z) + '
'21*pow(sin(x), 5)*pow(tan(z), 2) + '
'35*pow(sin(x), 4)*pow(cos(y), 3) + '
'105*pow(sin(x), 4)*pow(cos(y), 2)*tan(z) + '
'105*pow(sin(x), 4)*cos(y)*pow(tan(z), 2) + '
'35*pow(sin(x), 4)*pow(tan(z), 3) + '
'35*pow(sin(x), 3)*pow(cos(y), 4) + '
'140*pow(sin(x), 3)*pow(cos(y), 3)*tan(z) + '
'210*pow(sin(x), 3)*pow(cos(y), 2)*pow(tan(z), 2) + '
'140*pow(sin(x), 3)*cos(y)*pow(tan(z), 3) + '
'35*pow(sin(x), 3)*pow(tan(z), 4) + '
'21*pow(sin(x), 2)*pow(cos(y), 5) + '
'105*pow(sin(x), 2)*pow(cos(y), 4)*tan(z) + '
'210*pow(sin(x), 2)*pow(cos(y), 3)*pow(tan(z), 2) + '
'210*pow(sin(x), 2)*pow(cos(y), 2)*pow(tan(z), 3) + '
'105*pow(sin(x), 2)*cos(y)*pow(tan(z), 4) + '
'21*pow(sin(x), 2)*pow(tan(z), 5) + '
'7*sin(x)*pow(cos(y), 6) + '
'42*sin(x)*pow(cos(y), 5)*tan(z) + '
'105*sin(x)*pow(cos(y), 4)*pow(tan(z), 2) + '
'140*sin(x)*pow(cos(y), 3)*pow(tan(z), 3) + '
'105*sin(x)*pow(cos(y), 2)*pow(tan(z), 4) + '
'42*sin(x)*cos(y)*pow(tan(z), 5) + '
'7*sin(x)*pow(tan(z), 6) + '
'pow(cos(y), 7) + '
'7*pow(cos(y), 6)*tan(z) + '
'21*pow(cos(y), 5)*pow(tan(z), 2) + '
'35*pow(cos(y), 4)*pow(tan(z), 3) + '
'35*pow(cos(y), 3)*pow(tan(z), 4) + '
'21*pow(cos(y), 2)*pow(tan(z), 5) + '
'7*cos(y)*pow(tan(z), 6) + '
'pow(tan(z), 7);\n'
'}\n'
'double test2(double x, double y, double z) {\n'
' return cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))));\n'
'}\n'
)
assert result[1][0] == "file.h"
assert result[1][1] == (
'#ifndef PROJECT__FILE__H\n'
'#define PROJECT__FILE__H\n'
'double test1(double x, double y, double z);\n'
'double test2(double x, double y, double z);\n'
'#endif\n'
)
def test_loops_c():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n,m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
(f1, code), (f2, interface) = codegen(
('matrix_vector', Eq(y[i], A[i, j]*x[j])), "C", "file", header=False, empty=False)
assert f1 == 'file.c'
expected = (
'#include "file.h"\n'
'#include <math.h>\n'
'void matrix_vector(double *A, int m, int n, double *x, double *y) {\n'
' for (int i=0; i<m; i++){\n'
' y[i] = 0;\n'
' }\n'
' for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' y[i] = y[i] + %(rhs)s;\n'
' }\n'
' }\n'
'}\n'
)
assert (code == expected %{'rhs': 'A[i*n + j]*x[j]'} or
code == expected %{'rhs': 'A[j + i*n]*x[j]'} or
code == expected %{'rhs': 'x[j]*A[i*n + j]'} or
code == expected %{'rhs': 'x[j]*A[j + i*n]'})
assert f2 == 'file.h'
assert interface == (
'#ifndef PROJECT__FILE__H\n'
'#define PROJECT__FILE__H\n'
'void matrix_vector(double *A, int m, int n, double *x, double *y);\n'
'#endif\n'
)
def test_dummy_loops_c():
from sympy.tensor import IndexedBase, Idx
# the following line could also be
# [Dummy(s, integer=True) for s in 'im']
# or [Dummy(integer=True) for s in 'im']
i, m = symbols('i m', integer=True, cls=Dummy)
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx(i, m)
expected = (
'#include "file.h"\n'
'#include <math.h>\n'
'void test_dummies(int m_%(mno)i, double *x, double *y) {\n'
' for (int i_%(ino)i=0; i_%(ino)i<m_%(mno)i; i_%(ino)i++){\n'
' y[i_%(ino)i] = x[i_%(ino)i];\n'
' }\n'
'}\n'
) % {'ino': i.label.dummy_index, 'mno': m.dummy_index}
r = Routine('test_dummies', Eq(y[i], x[i]))
c = CCodeGen()
code = get_string(c.dump_c, [r])
assert code == expected
def test_partial_loops_c():
# check that loop boundaries are determined by Idx, and array strides
# determined by shape of IndexedBase object.
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n,m,o,p = symbols('n m o p', integer=True)
A = IndexedBase('A', shape=(m, p))
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', (o, m - 5)) # Note: bounds are inclusive
j = Idx('j', n) # dimension n corresponds to bounds (0, n - 1)
(f1, code), (f2, interface) = codegen(
('matrix_vector', Eq(y[i], A[i, j]*x[j])), "C", "file", header=False, empty=False)
assert f1 == 'file.c'
expected = (
'#include "file.h"\n'
'#include <math.h>\n'
'void matrix_vector(double *A, int m, int n, int o, int p, double *x, double *y) {\n'
' for (int i=o; i<%(upperi)s; i++){\n'
' y[i] = 0;\n'
' }\n'
' for (int i=o; i<%(upperi)s; i++){\n'
' for (int j=0; j<n; j++){\n'
' y[i] = y[i] + %(rhs)s;\n'
' }\n'
' }\n'
'}\n'
) % {'upperi': m - 4, 'rhs': '%(rhs)s'}
assert (code == expected %{'rhs': 'A[i*p + j]*x[j]'} or
code == expected %{'rhs': 'A[j + i*p]*x[j]'} or
code == expected %{'rhs': 'x[j]*A[i*p + j]'} or
code == expected %{'rhs': 'x[j]*A[j + i*p]'})
assert f2 == 'file.h'
assert interface == (
'#ifndef PROJECT__FILE__H\n'
'#define PROJECT__FILE__H\n'
'void matrix_vector(double *A, int m, int n, int o, int p, double *x, double *y);\n'
'#endif\n'
)
def test_output_arg_c():
from sympy import sin, cos, Equality
x, y, z = symbols("x,y,z")
r = Routine("foo", [Equality(y, sin(x)), cos(x)])
c = CCodeGen()
result = c.write([r], "test", header=False, empty=False)
assert result[0][0] == "test.c"
expected = (
'#include "test.h"\n'
'#include <math.h>\n'
'double foo(double x, double &y) {\n'
' y = sin(x);\n'
' return cos(x);\n'
'}\n'
)
assert result[0][1] == expected
def test_empty_f_code():
code_gen = FCodeGen()
source = get_string(code_gen.dump_f95, [])
assert source == ""
def test_empty_f_code_with_header():
code_gen = FCodeGen()
source = get_string(code_gen.dump_f95, [], header=True)
assert source[:82] == (
"!******************************************************************************\n!*"
)
# " Code generated with sympy 0.7.1 "
assert source[158:] == ( "*\n"
"!* *\n"
"!* See http://www.sympy.org/ for more information. *\n"
"!* *\n"
"!* This file is part of 'project' *\n"
"!******************************************************************************\n"
)
def test_empty_f_header():
code_gen = FCodeGen()
source = get_string(code_gen.dump_h, [])
assert source == ""
def test_simple_f_code():
x,y,z = symbols('x,y,z')
expr = (x+y)*z
routine = Routine("test", expr)
code_gen = FCodeGen()
source = get_string(code_gen.dump_f95, [routine])
expected = (
"REAL*8 function test(x, y, z)\n"
"implicit none\n"
"REAL*8, intent(in) :: x\n"
"REAL*8, intent(in) :: y\n"
"REAL*8, intent(in) :: z\n"
"test = z*(x + y)\n"
"end function\n"
)
assert source == expected
def test_numbersymbol_f_code():
routine = Routine("test", pi**Catalan)
code_gen = FCodeGen()
source = get_string(code_gen.dump_f95, [routine])
expected = (
"REAL*8 function test()\n"
"implicit none\n"
"REAL*8, parameter :: Catalan = 0.915965594177219d0\n"
"REAL*8, parameter :: pi = 3.14159265358979d0\n"
"test = pi**Catalan\n"
"end function\n"
)
assert source == expected
def test_f_code_argument_order():
x,y,z = symbols('x,y,z')
expr = x + y
routine = Routine("test", expr, argument_sequence=[z, x, y])
code_gen = FCodeGen()
source = get_string(code_gen.dump_f95, [routine])
expected = (
"REAL*8 function test(z, x, y)\n"
"implicit none\n"
"REAL*8, intent(in) :: z\n"
"REAL*8, intent(in) :: x\n"
"REAL*8, intent(in) :: y\n"
"test = x + y\n"
"end function\n"
)
assert source == expected
def test_simple_f_header():
x,y,z = symbols('x,y,z')
expr = (x+y)*z
routine = Routine("test", expr)
code_gen = FCodeGen()
source = get_string(code_gen.dump_h, [routine])
expected = (
"interface\n"
"REAL*8 function test(x, y, z)\n"
"implicit none\n"
"REAL*8, intent(in) :: x\n"
"REAL*8, intent(in) :: y\n"
"REAL*8, intent(in) :: z\n"
"end function\n"
"end interface\n"
)
assert source == expected
def test_simple_f_codegen():
x,y,z = symbols('x,y,z')
expr = (x+y)*z
result = codegen(("test", (x+y)*z), "F95", "file", header=False, empty=False)
expected = [
("file.f90",
"REAL*8 function test(x, y, z)\n"
"implicit none\n"
"REAL*8, intent(in) :: x\n"
"REAL*8, intent(in) :: y\n"
"REAL*8, intent(in) :: z\n"
"test = z*(x + y)\n"
"end function\n"),
("file.h",
"interface\n"
"REAL*8 function test(x, y, z)\n"
"implicit none\n"
"REAL*8, intent(in) :: x\n"
"REAL*8, intent(in) :: y\n"
"REAL*8, intent(in) :: z\n"
"end function\n"
"end interface\n")
]
assert result == expected
def test_multiple_results_f():
x,y,z = symbols('x,y,z')
expr1 = (x+y)*z
expr2 = (x-y)*z
routine = Routine(
"test",
[expr1,expr2]
)
code_gen = FCodeGen()
raises(CodeGenError, 'get_string(code_gen.dump_h, [routine])')
def test_no_results_f():
raises(ValueError, 'Routine("test", [])')
def test_intrinsic_math_codegen():
# not included: log10
from sympy import (acos, asin, atan, ceiling, cos, cosh, floor, log, ln,
sin, sinh, sqrt, tan, tanh, N, Abs)
x = symbols('x')
name_expr = [
("test_abs", Abs(x)),
("test_acos", acos(x)),
("test_asin", asin(x)),
("test_atan", atan(x)),
# ("test_ceil", ceiling(x)),
("test_cos", cos(x)),
("test_cosh", cosh(x)),
# ("test_floor", floor(x)),
("test_log", log(x)),
("test_ln", ln(x)),
("test_sin", sin(x)),
("test_sinh", sinh(x)),
("test_sqrt", sqrt(x)),
("test_tan", tan(x)),
("test_tanh", tanh(x)),
]
result = codegen(name_expr, "F95", "file", header=False, empty=False)
assert result[0][0] == "file.f90"
expected = (
'REAL*8 function test_abs(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_abs = Abs(x)\n'
'end function\n'
'REAL*8 function test_acos(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_acos = acos(x)\n'
'end function\n'
'REAL*8 function test_asin(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_asin = asin(x)\n'
'end function\n'
'REAL*8 function test_atan(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_atan = atan(x)\n'
'end function\n'
'REAL*8 function test_cos(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_cos = cos(x)\n'
'end function\n'
'REAL*8 function test_cosh(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_cosh = cosh(x)\n'
'end function\n'
'REAL*8 function test_log(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_log = log(x)\n'
'end function\n'
'REAL*8 function test_ln(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_ln = log(x)\n'
'end function\n'
'REAL*8 function test_sin(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_sin = sin(x)\n'
'end function\n'
'REAL*8 function test_sinh(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_sinh = sinh(x)\n'
'end function\n'
'REAL*8 function test_sqrt(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_sqrt = sqrt(x)\n'
'end function\n'
'REAL*8 function test_tan(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_tan = tan(x)\n'
'end function\n'
'REAL*8 function test_tanh(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_tanh = tanh(x)\n'
'end function\n'
)
assert result[0][1] == expected
assert result[1][0] == "file.h"
expected = (
'interface\n'
'REAL*8 function test_abs(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_acos(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_asin(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_atan(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_cos(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_cosh(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_log(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_ln(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_sin(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_sinh(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_sqrt(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_tan(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_tanh(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
)
assert result[1][1] == expected
def test_intrinsic_math2_codegen():
# not included: frexp, ldexp, modf, fmod
from sympy import atan2, N
x, y = symbols('x,y')
name_expr = [
("test_atan2", atan2(x,y)),
("test_pow", x**y),
]
result = codegen(name_expr, "F95", "file", header=False, empty=False)
assert result[0][0] == "file.f90"
expected = (
'REAL*8 function test_atan2(x, y)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'test_atan2 = atan2(x, y)\n'
'end function\n'
'REAL*8 function test_pow(x, y)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'test_pow = x**y\n'
'end function\n'
)
assert result[0][1] == expected
assert result[1][0] == "file.h"
expected = (
'interface\n'
'REAL*8 function test_atan2(x, y)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_pow(x, y)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'end function\n'
'end interface\n'
)
assert result[1][1] == expected
def test_complicated_codegen_f95():
from sympy import sin, cos, tan, N
x,y,z = symbols('x,y,z')
name_expr = [
("test1", ((sin(x)+cos(y)+tan(z))**7).expand()),
("test2", cos(cos(cos(cos(cos(cos(cos(cos(x+y+z))))))))),
]
result = codegen(name_expr, "F95", "file", header=False, empty=False)
assert result[0][0] == "file.f90"
expected = (
'REAL*8 function test1(x, y, z)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'REAL*8, intent(in) :: z\n'
'test1 = sin(x)**7 + 7*sin(x)**6*cos(y) + 7*sin(x)**6*tan(z) + 21*sin(x) &\n'
' **5*cos(y)**2 + 42*sin(x)**5*cos(y)*tan(z) + 21*sin(x)**5*tan(z) &\n'
' **2 + 35*sin(x)**4*cos(y)**3 + 105*sin(x)**4*cos(y)**2*tan(z) + &\n'
' 105*sin(x)**4*cos(y)*tan(z)**2 + 35*sin(x)**4*tan(z)**3 + 35*sin( &\n'
' x)**3*cos(y)**4 + 140*sin(x)**3*cos(y)**3*tan(z) + 210*sin(x)**3* &\n'
' cos(y)**2*tan(z)**2 + 140*sin(x)**3*cos(y)*tan(z)**3 + 35*sin(x) &\n'
' **3*tan(z)**4 + 21*sin(x)**2*cos(y)**5 + 105*sin(x)**2*cos(y)**4* &\n'
' tan(z) + 210*sin(x)**2*cos(y)**3*tan(z)**2 + 210*sin(x)**2*cos(y) &\n'
' **2*tan(z)**3 + 105*sin(x)**2*cos(y)*tan(z)**4 + 21*sin(x)**2*tan &\n'
' (z)**5 + 7*sin(x)*cos(y)**6 + 42*sin(x)*cos(y)**5*tan(z) + 105* &\n'
' sin(x)*cos(y)**4*tan(z)**2 + 140*sin(x)*cos(y)**3*tan(z)**3 + 105 &\n'
' *sin(x)*cos(y)**2*tan(z)**4 + 42*sin(x)*cos(y)*tan(z)**5 + 7*sin( &\n'
' x)*tan(z)**6 + cos(y)**7 + 7*cos(y)**6*tan(z) + 21*cos(y)**5*tan( &\n'
' z)**2 + 35*cos(y)**4*tan(z)**3 + 35*cos(y)**3*tan(z)**4 + 21*cos( &\n'
' y)**2*tan(z)**5 + 7*cos(y)*tan(z)**6 + tan(z)**7\n'
'end function\n'
'REAL*8 function test2(x, y, z)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'REAL*8, intent(in) :: z\n'
'test2 = cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))))\n'
'end function\n'
)
assert result[0][1] == expected
assert result[1][0] == "file.h"
expected = (
'interface\n'
'REAL*8 function test1(x, y, z)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'REAL*8, intent(in) :: z\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test2(x, y, z)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'REAL*8, intent(in) :: z\n'
'end function\n'
'end interface\n'
)
assert result[1][1] == expected
def test_loops():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m = symbols('n,m', integer=True)
A, x, y = map(IndexedBase, 'Axy')
i = Idx('i', m)
j = Idx('j', n)
(f1, code), (f2, interface) = codegen(
('matrix_vector', Eq(y[i], A[i, j]*x[j])), "F95", "file", header=False, empty=False)
assert f1 == 'file.f90'
expected = (
'subroutine matrix_vector(A, m, n, x, y)\n'
'implicit none\n'
'INTEGER*4, intent(in) :: m\n'
'INTEGER*4, intent(in) :: n\n'
'REAL*8, intent(in), dimension(1:m, 1:n) :: A\n'
'REAL*8, intent(in), dimension(1:n) :: x\n'
'REAL*8, intent(out), dimension(1:m) :: y\n'
'INTEGER*4 :: i\n'
'INTEGER*4 :: j\n'
'do i = 1, m\n'
' y(i) = 0\n'
'end do\n'
'do i = 1, m\n'
' do j = 1, n\n'
' y(i) = y(i) + %(rhs)s\n'
' end do\n'
'end do\n'
'end subroutine\n'
) % {'rhs': 'A(i, j)*x(j)'}
assert expected == code
assert f2 == 'file.h'
assert interface == (
'interface\n'
'subroutine matrix_vector(A, m, n, x, y)\n'
'implicit none\n'
'INTEGER*4, intent(in) :: m\n'
'INTEGER*4, intent(in) :: n\n'
'REAL*8, intent(in), dimension(1:m, 1:n) :: A\n'
'REAL*8, intent(in), dimension(1:n) :: x\n'
'REAL*8, intent(out), dimension(1:m) :: y\n'
'end subroutine\n'
'end interface\n'
)
def test_dummy_loops_f95():
from sympy.tensor import IndexedBase, Idx
# the following line could also be
# [Dummy(s, integer=True) for s in 'im']
# or [Dummy(integer=True) for s in 'im']
i, m = symbols('i m', integer=True, cls=Dummy)
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx(i, m)
expected = (
'subroutine test_dummies(m_%(mcount)i, x, y)\n'
'implicit none\n'
'INTEGER*4, intent(in) :: m_%(mcount)i\n'
'REAL*8, intent(in), dimension(1:m_%(mcount)i) :: x\n'
'REAL*8, intent(out), dimension(1:m_%(mcount)i) :: y\n'
'INTEGER*4 :: i_%(icount)i\n'
'do i_%(icount)i = 1, m_%(mcount)i\n'
' y(i_%(icount)i) = x(i_%(icount)i)\n'
'end do\n'
'end subroutine\n'
) % {'icount': i.label.dummy_index, 'mcount': m.dummy_index}
r = Routine('test_dummies', Eq(y[i], x[i]))
c = FCodeGen()
code = get_string(c.dump_f95, [r])
assert code == expected
def test_loops_InOut():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
i,j,n,m = symbols('i,j,n,m', integer=True)
A,x,y = symbols('A,x,y')
A = IndexedBase(A)[Idx(i, m), Idx(j, n)]
x = IndexedBase(x)[Idx(j, n)]
y = IndexedBase(y)[Idx(i, m)]
(f1, code), (f2, interface) = codegen(
('matrix_vector', Eq(y, y + A*x)), "F95", "file", header=False, empty=False)
assert f1 == 'file.f90'
expected = (
'subroutine matrix_vector(A, m, n, x, y)\n'
'implicit none\n'
'INTEGER*4, intent(in) :: m\n'
'INTEGER*4, intent(in) :: n\n'
'REAL*8, intent(in), dimension(1:m, 1:n) :: A\n'
'REAL*8, intent(in), dimension(1:n) :: x\n'
'REAL*8, intent(inout), dimension(1:m) :: y\n'
'INTEGER*4 :: i\n'
'INTEGER*4 :: j\n'
'do i = 1, m\n'
' do j = 1, n\n'
' y(i) = y(i) + %(rhs)s\n'
' end do\n'
'end do\n'
'end subroutine\n'
)
assert (code == expected % {'rhs': 'A(i, j)*x(j)'} or
code == expected % {'rhs': 'x(j)*A(i, j)'})
assert f2 == 'file.h'
assert interface == (
'interface\n'
'subroutine matrix_vector(A, m, n, x, y)\n'
'implicit none\n'
'INTEGER*4, intent(in) :: m\n'
'INTEGER*4, intent(in) :: n\n'
'REAL*8, intent(in), dimension(1:m, 1:n) :: A\n'
'REAL*8, intent(in), dimension(1:n) :: x\n'
'REAL*8, intent(inout), dimension(1:m) :: y\n'
'end subroutine\n'
'end interface\n'
)
def test_partial_loops_f():
# check that loop boundaries are determined by Idx, and array strides
# determined by shape of IndexedBase object.
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n,m,o,p = symbols('n m o p', integer=True)
A = IndexedBase('A', shape=(m, p))
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', (o, m - 5)) # Note: bounds are inclusive
j = Idx('j', n) # dimension n corresponds to bounds (0, n - 1)
(f1, code), (f2, interface) = codegen(
('matrix_vector', Eq(y[i], A[i, j]*x[j])), "F95", "file", header=False, empty=False)
expected = (
'subroutine matrix_vector(A, m, n, o, p, x, y)\n'
'implicit none\n'
'INTEGER*4, intent(in) :: m\n'
'INTEGER*4, intent(in) :: n\n'
'INTEGER*4, intent(in) :: o\n'
'INTEGER*4, intent(in) :: p\n'
'REAL*8, intent(in), dimension(1:m, 1:p) :: A\n'
'REAL*8, intent(in), dimension(1:n) :: x\n'
'REAL*8, intent(out), dimension(1:%(iup-ilow)s) :: y\n'
'INTEGER*4 :: i\n'
'INTEGER*4 :: j\n'
'do i = %(ilow)s, %(iup)s\n'
' y(i) = 0\n'
'end do\n'
'do i = %(ilow)s, %(iup)s\n'
' do j = 1, n\n'
' y(i) = y(i) + %(rhs)s\n'
' end do\n'
'end do\n'
'end subroutine\n'
) % {
'rhs': 'A(i, j)*x(j)',
'iup': str(m - 4),
'ilow': str(1+o),
'iup-ilow': str(m - 4 -o)
}
assert expected == code
def test_output_arg_f():
from sympy import sin, cos, Equality
x, y, z = symbols("x,y,z")
r = Routine("foo", [Equality(y, sin(x)), cos(x)])
c = FCodeGen()
result = c.write([r], "test", header=False, empty=False)
assert result[0][0] == "test.f90"
assert result[0][1] == (
'REAL*8 function foo(x, y)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(out) :: y\n'
'y = sin(x)\n'
'foo = cos(x)\n'
'end function\n'
)
def test_inline_function():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n,m = symbols('n m', integer=True)
A, x, y = map(IndexedBase, 'Axy')
i = Idx('i', m)
j = Idx('j', n)
p = FCodeGen()
func = implemented_function('func', Lambda(n, n*(n+1)))
routine = Routine('test_inline', Eq(y[i], func(x[i])))
code = get_string(p.dump_f95, [routine])
expected = (
'subroutine test_inline(m, x, y)\n'
'implicit none\n'
'INTEGER*4, intent(in) :: m\n'
'REAL*8, intent(in), dimension(1:m) :: x\n'
'REAL*8, intent(out), dimension(1:m) :: y\n'
'INTEGER*4 :: i\n'
'do i = 1, m\n'
' y(i) = (1 + x(i))*x(i)\n'
'end do\n'
'end subroutine\n'
)
assert code == expected
def test_check_case():
x, X = symbols('x,X')
raises(CodeGenError, "codegen(('test', x*X), 'f95', 'prefix')")
def test_check_case_false_positive():
# The upper case/lower case exception should not be triggered by Sympy
# objects that differ only because of assumptions. (It may be useful to
# have a check for that as well, but here we only want to test against
# false positives with respect to case checking.)
x1 = symbols('x')
x2 = symbols('x', my_assumption=True)
try:
codegen(('test', x1*x2), 'f95', 'prefix')
except CodeGenError, e:
if e.args[0][0:21] == "Fortran ignores case.":
raise AssertionError("This exception should not be raised!")
|
voutilad/courtlistener
|
cl/corpus_importer/dup_helpers.py
|
import string
from django.utils.text import slugify
from django.utils.timezone import now
from lxml import html
from lxml.html import tostring
from lxml.html.clean import Cleaner
from cl.lib.string_utils import anonymize, trunc
from cl.search.models import OpinionCluster
from juriscraper.lib.string_utils import clean_string, harmonize, titlecase
import re
import subprocess
BROWSER = 'firefox'
def merge_cases_simple(new, target_id):
"""Add `new` to the database, merging with target_id
Merging is done by picking the best fields from each item.
"""
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# !! THIS CODE IS OUT OF DATE AND UNMAINTAINED. FEEL FREE TO FIX IT, BUT !!
# !! DO NOT TRUST IT IN ITS CURRENT STATE. !!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
target = OpinionCluster.objects.get(pk=target_id)
print "Merging %s with" % new.case_name
print " %s" % target.case_name
cached_source = target.source # Original value is needed below.
if target.source == 'C':
target.source = 'LC'
elif target.source == 'R':
target.source = 'LR'
elif target.source == 'CR':
target.source = 'LCR'
# Add the URL if it's not a court one, replacing public.resource.org's
# info in some cases.
if cached_source == 'R':
target.download_url = new.download_url
# Recreate the slug from the new case name (this changes the URL, but the
# old will continue working)
target.slug = slugify(trunc(new.case_name, 75))
# Take the case name from the new item; they tend to be pretty good
target.case_name = new.case_name
# Add the docket number if the old doesn't exist, but keep the old if one
# does.
if not target.docket.docket_number:
target.docket.docket_number = new.docket.docket_number
# Get the citations from the new item (ditch the old).
target.federal_cite_one = new.federal_cite_one
target.federal_cite_two = new.federal_cite_two
target.federal_cite_three = new.federal_cite_three
target.state_cite_one = new.state_cite_one
target.state_cite_two = new.state_cite_two
target.state_cite_three = new.state_cite_three
target.state_cite_regional = new.state_cite_regional
target.specialty_cite_one = new.specialty_cite_one
target.scotus_early_cite = new.scotus_early_cite
target.lexis_cite = new.lexis_cite
target.westlaw_cite = new.westlaw_cite
target.neutral_cite = new.neutral_cite
# Add judge information if lacking. New is dirty, but better than none.
if not target.judges:
target.judges = new.judges
# Add the text.
target.html_lawbox, blocked = anonymize(new.html)
if blocked:
target.blocked = True
target.date_blocked = now()
target.extracted_by_ocr = False # No longer true for any LB case.
# save_doc_and_cite(target, index=False)
def merge_cases_complex(case, target_ids):
"""Merge data from PRO with multiple cases that seem to be a match.
The process here is a conservative one. We take *only* the information
from PRO that is not already in CL in any form, and add only that.
"""
# THIS CODE ONLY UPDATED IN THE MOST CURSORY FASHION. DO NOT TRUST IT.
for target_id in target_ids:
simulate = False
oc = OpinionCluster.objects.get(pk=target_id)
print "Merging %s with" % case.case_name
print " %s" % oc.case_name
oc.source = 'CR'
oc.west_cite = case.west_cite
if not simulate:
oc.save()
def find_same_docket_numbers(doc, candidates):
"""Identify the candidates that have the same docket numbers as doc after
each has been cleaned.
"""
new_docket_number = re.sub('(\D|0)', '', doc.docket.docket_number)
same_docket_numbers = []
for candidate in candidates:
old_docket_number = re.sub('(\D|0)', '', candidate.get('docketNumber', ''))
if all([len(new_docket_number) > 3, len(old_docket_number) > 3]):
if old_docket_number in new_docket_number:
same_docket_numbers.append(candidate)
return same_docket_numbers
def case_name_in_candidate(case_name_new, case_name_candidate):
"""When there is one candidate match, this compares their case names to see
if one is contained in the other, in the right order.
Returns True if so, else False.
"""
regex = re.compile('[%s]' % re.escape(string.punctuation))
case_name_new_words = regex.sub('', case_name_new.lower()).split()
case_name_candidate_words = regex.sub('', case_name_candidate.lower()).split()
index = 0
for word in case_name_new_words:
if len(word) <= 2:
continue
try:
index = case_name_candidate_words[index:].index(word)
except ValueError:
# The items were out of order or the item wasn't in the candidate.
return False
return True
def filter_by_stats(candidates, stats):
"""Looks at the candidates and their stats, and filters out obviously
different candidates.
"""
filtered_candidates = []
filtered_stats = {
'candidate_count': 0,
'case_name_similarities': [],
'length_diffs': [],
'gestalt_diffs': [],
'cos_sims': [],
}
for i in range(0, len(candidates)):
# Commented out because the casenames in public.resource.org can be so
# long this varies too much.
# if stats['case_name_similarities'][i] < 0.125:
# # The case name is wildly different
# continue
if stats['length_diffs'][i] > 400:
# The documents have wildly different lengths
continue
# Commented out because the headnotes sometimes included in Resource.org made this calculation vary too much.
#elif stats['gestalt_diffs'][i] < 0.4:
# # The contents are wildly different
# continue
elif stats['cos_sims'][i] < 0.90:
# Very different cosine similarities
continue
else:
# It's a reasonably close match.
filtered_candidates.append(candidates[i])
filtered_stats['case_name_similarities'].append(stats['case_name_similarities'][i])
filtered_stats['length_diffs'].append(stats['length_diffs'][i])
filtered_stats['gestalt_diffs'].append(stats['gestalt_diffs'][i])
filtered_stats['cos_sims'].append(stats['cos_sims'][i])
filtered_stats['candidate_count'] = len(filtered_candidates)
return filtered_candidates, filtered_stats
class Case(object):
def _get_case_name_and_status(self):
case_name = self.url_element.get('title').lower()
ca1regex = re.compile('(unpublished disposition )?notice: first circuit local rule 36.2\(b\)6 states unpublished opinions may be cited only in related cases.?')
ca2regex = re.compile('(unpublished disposition )?notice: second circuit local rule 0.23 states unreported opinions shall not be cited or otherwise used in unrelated cases.?')
ca2regex2 = re.compile('(unpublished disposition )?notice: this summary order may not be cited as precedential authority, but may be called to the attention of the court in a subsequent stage of this case, in a related case, or in any case for purposes of collateral estoppel or res judicata. see second circuit rule 0.23.?')
ca3regex = re.compile('(unpublished disposition )?notice: third circuit rule 21\(i\) states citations to federal decisions which have not been formally reported should identify the court, docket number and date.?')
ca4regex = re.compile('(unpublished disposition )?notice: fourth circuit (local rule 36\(c\)|i.o.p. 36.6) states that citation of unpublished dispositions is disfavored except for establishing res judicata, estoppel, or the law of the case and requires service of copies of cited unpublished dispositions of the fourth circuit.?')
ca5regex = re.compile('(unpublished disposition )?notice: fifth circuit local rule 47.5.3 states that unpublished opinions should normally be cited only when they establish the law of the case, are relied upon as a basis for res judicata or collateral estoppel, or involve related facts. if an unpublished opinion is cited, a copy shall be attached to each copy of the brief.?')
ca6regex = re.compile('(unpublished disposition )?notice: sixth circuit rule 24\(c\) states that citation of unpublished dispositions is disfavored except for establishing res judicata, estoppel, or the law of the case and requires service of copies of cited unpublished dispositions of the sixth circuit.?')
ca7regex = re.compile('(unpublished disposition )?notice: seventh circuit rule 53\(b\)\(2\) states unpublished orders shall not be cited or used as precedent except to support a claim of res judicata, collateral estoppel or law of the case in any federal court within the circuit.?')
ca8regex = re.compile('(unpublished disposition )?notice: eighth circuit rule 28a\(k\) governs citation of unpublished opinions and provides that (no party may cite an opinion not intended for publication unless the cases are related by identity between the parties or the causes of action|they are not precedent and generally should not be cited unless relevant to establishing the doctrines of res judicata, collateral estoppel, the law of the case, or if the opinion has persuasive value on a material issue and no published opinion would serve as well).?')
ca9regex = re.compile('(unpublished disposition )?notice: ninth circuit rule 36-3 provides that dispositions other than opinions or orders designated for publication are not precedential and should not be cited except when relevant under the doctrines of law of the case, res judicata, or collateral estoppel.?')
ca10regex = re.compile('(unpublished disposition )?notice: tenth circuit rule 36.3 states that unpublished opinions and orders and judgments have no precedential value and shall not be cited except for purposes of establishing the doctrines of the law of the case, res judicata, or collateral estoppel.?')
cadcregex = re.compile('(unpublished disposition )?notice: d.c. circuit local rule 11\(c\) states that unpublished orders, judgments, and explanatory memoranda may not be cited as precedents, but counsel may refer to unpublished dispositions when the binding or preclusive effect of the disposition, rather than its quality as precedent, is relevant.?')
cafcregex = re.compile('(unpublished disposition )?notice: federal circuit local rule 47.(6|8)\(b\) states that opinions and orders which are designated as not citable as precedent shall not be employed or cited as precedent. this does not preclude assertion of issues of claim preclusion, issue preclusion, judicial estoppel, law of the case or the like based on a decision of the court rendered in a nonprecedential opinion or order.?')
# Clean off special cases
if 'first circuit' in case_name:
case_name = re.sub(ca1regex, '', case_name)
status = 'Unpublished'
elif 'second circuit' in case_name:
case_name = re.sub(ca2regex, '', case_name)
case_name = re.sub(ca2regex2, '', case_name)
status = 'Unpublished'
elif 'third circuit' in case_name:
case_name = re.sub(ca3regex, '', case_name)
status = 'Unpublished'
elif 'fourth circuit' in case_name:
case_name = re.sub(ca4regex, '', case_name)
status = 'Unpublished'
elif 'fifth circuit' in case_name:
case_name = re.sub(ca5regex, '', case_name)
status = 'Unpublished'
elif 'sixth circuit' in case_name:
case_name = re.sub(ca6regex, '', case_name)
status = 'Unpublished'
elif 'seventh circuit' in case_name:
case_name = re.sub(ca7regex, '', case_name)
status = 'Unpublished'
elif 'eighth circuit' in case_name:
case_name = re.sub(ca8regex, '', case_name)
status = 'Unpublished'
elif 'ninth circuit' in case_name:
case_name = re.sub(ca9regex, '', case_name)
status = 'Unpublished'
elif 'tenth circuit' in case_name:
case_name = re.sub(ca10regex, '', case_name)
status = 'Unpublished'
elif 'd.c. circuit' in case_name:
case_name = re.sub(cadcregex, '', case_name)
status = 'Unpublished'
elif 'federal circuit' in case_name:
case_name = re.sub(cafcregex, '', case_name)
status = 'Unpublished'
else:
status = 'Published'
case_name = titlecase(harmonize(clean_string(case_name)))
if case_name == '' or case_name == 'unpublished disposition':
# No luck getting the case name
saved_case_name = self._check_fix_list(self.sha1_hash, self.case_name_dict)
if saved_case_name:
case_name = saved_case_name
else:
print self.url
if BROWSER:
subprocess.Popen([BROWSER, self.url], shell=False).communicate()
case_name = raw_input("Short case name: ")
self.case_name_fix_file.write("%s|%s\n" % (self.sha1_hash, case_name))
return case_name, status
def get_html_from_raw_text(raw_text):
"""Using the raw_text, creates four useful variables:
1. complete_html_tree: A tree of the complete HTML from the file, including <head> tags and whatever else.
2. clean_html_tree: A tree of the HTML after stripping bad stuff.
3. clean_html_str: A str of the HTML after stripping bad stuff.
4. body_text: A str of the text of the body of the document.
We require all of these because sometimes we need the complete HTML tree, other times we don't. We create them all
up front for performance reasons.
"""
complete_html_tree = html.fromstring(raw_text)
cleaner = Cleaner(style=True,
remove_tags=('a', 'body', 'font', 'noscript',),
kill_tags=('title',),)
clean_html_str = cleaner.clean_html(raw_text)
clean_html_tree = html.fromstring(clean_html_str)
body_text = tostring(clean_html_tree, method='text', encoding='unicode')
return clean_html_tree, complete_html_tree, clean_html_str, body_text
|
FriedrichK/volunteer_planner
|
shiftmailer/management/commands/mailer.py
|
# coding: utf-8
import datetime
from django.core.management.base import BaseCommand
# from django.template.loader import render_to_string
from django.db.models import Count
from scheduler.models import Need
from shiftmailer.models import Mailer
from shiftmailer.excelexport import GenerateExcelSheet
DATE_FORMAT = '%d.%m.%Y'
class Command(BaseCommand):
help = 'sends emails taken from addresses (.models.mailer) with a list of shifts for this day' \
'run my cronjob'
def add_arguments(self, parser):
parser.add_argument('--date', dest='print_date', default=datetime.date.today().strftime(DATE_FORMAT),
help='The date to generate scheduler for')
def handle(self, *args, **options):
mailer = Mailer.objects.all()
t = datetime.datetime.strptime(options['print_date'], DATE_FORMAT)
for mail in mailer:
needs = Need.objects.filter(location=mail.location).filter(
ending_time__year=t.strftime("%Y"),
ending_time__month=t.strftime("%m"),
ending_time__day=t.strftime("%d")) \
.order_by('topic', 'ending_time') \
.annotate(volunteer_count=Count('registrationprofile')) \
.select_related('topic', 'location') \
.prefetch_related('registrationprofile_set', 'registrationprofile_set__user')
# if it's not used anyway, we maybe shouldn't even render it? #
# message = render_to_string('shifts_today.html', locals())
iua = GenerateExcelSheet(shifts=needs, mailer=mail)
iua.send_file()
|
josephhardinee/PyDisdrometer
|
pydsd/tests/testNasa2DVDReader_mc3e.py
|
import numpy as np
import unittest
from ..aux_readers import NASA_2DVD_reader
class TestNasa2DvdReaderMc3eSubcase(unittest.TestCase):
"Test module for the NASA_2DVD_reader class in pydsd.aux_io.NASA_2DVD_reader for mc3e files"
def setUp(self):
filename = "testdata/nasa_gv_mc3e_2dvd_test.txt"
self.dsd = NASA_2DVD_reader.read_2dvd_dsd_nasa_gv(filename)
def test_can_read_sample_file(self):
self.assertIsNotNone(self.dsd, "File did not read in correctly, returned None")
def test_dsd_nd_exists(self):
self.assertIsNotNone(self.dsd.fields["Nd"], "DSD Object has no Nd field")
def test_dsd_nd_is_dict(self):
self.assertIsInstance(self.dsd.fields["Nd"], dict, "Nd was not a dictionary.")
def test_RR_works(self):
self.dsd.calculate_RR()
self.assertIsNotNone(
self.dsd.fields["rain_rate"],
"Rain Rate is not in fields after calculate_RR()",
)
self.assertEqual(
len(self.dsd.fields["rain_rate"]["data"]),
5,
"Wrong number of time samples in rain rate",
)
def test_can_run_calc_dsd_params(self):
self.dsd.calculate_dsd_parameterization()
self.assertIsNotNone(
self.dsd.fields["D0"],
"The Field D0 did not exist after dsd_parameterization check",
)
self.assertEqual(
len(self.dsd.fields["D0"]["data"]), 5, "Wrong number of samples in D0"
)
def test_time_same_length_as_Nd(self):
self.assertEqual(
len(self.dsd.time["data"]),
self.dsd.fields["Nd"]["data"].shape[0],
"Different number of samples for time and Nd",
)
|
x8lucas8x/python-zeroless
|
tests/test_client_server.py
|
import pytest
from zeroless import (Server, Client)
class TestClientServer:
def test_server_port_property(self):
port = 1050
server = Server(port=port)
assert server.port == port
def test_client_addresses_property(self):
client = Client()
addresses = (('10.0.0.1', 1567), ('10.0.0.2', 1568), ('10.0.0.3', 1569))
for ip, port in addresses:
client.connect(ip, port)
assert client.addresses == addresses
|
ProfessorKaos64/openlierox
|
share/gamedir/scripts/tools/army_patterns/hungry_zombie_kittens.py
|
#!/usr/bin/python3 -u
#---- Includes ----#
from ..army_architect import Bot_blueprint, Army_blueprint
#---- General Settings ----#
army_name = "The Hungry Zombie Kittens"
army_description = "Previously these kittens ate cat food. But now they wan't to eat your freakin' soul! (And your body to of course, after they ripped it asunder ;,,,;)"
#---- Bot Patterns ----#
class Zombie_kitten(Bot_blueprint):
name = "Zombie Kitten"
skin = "HK-KittyZombie.png"
ai_diff = 0
ai_diff_dynamic = True
can_use_ninja = 0
can_use_ninja_dynamic = False
shield_factor = 0.6
shield_factor_dynamic = True
damage_factor = 1
damage_factor_dynamic = False
speed_factor = 0.6
speed_factor_dynamic = False
class Franken_kitten(Bot_blueprint):
name = "Franken Kitten"
skin = "HK-FrankenKitty.png"
ai_diff = 0
ai_diff_dynamic = True
can_use_ninja = 1
can_use_ninja_dynamic = True
shield_factor = 0.6
shield_factor_dynamic = False
damage_factor = 1
damage_factor_dynamic = False
speed_factor = 0.8
speed_factor_dynamic = True
class Ghoul_kitten(Bot_blueprint):
name = "Ghoul Kitten"
skin = "HK-KittyMime.png"
ai_diff = 2
ai_diff_dynamic = True
can_use_ninja = 1
can_use_ninja_dynamic = True
shield_factor = 0.5
shield_factor_dynamic = True
damage_factor = 1.0
damage_factor_dynamic = False
speed_factor = 1.6
speed_factor_dynamic = True
def generate_blueprint(challenge_amount, lives, team):
global army_name, army_description
#---- Generate a list of of bot blueprints ----#
blueprints = []
# How many of each bot per challenge amount:
blueprint_weights = {
Zombie_kitten: 2,
Franken_kitten: 1,
Ghoul_kitten: 0.5,
}
for blueprint, wieght in blueprint_weights.items():
num_bots = round(wieght * challenge_amount)
for x in range(num_bots):
blueprints.append(blueprint(lives, team))
#---- Scale the blueprints ----#
# Sum the current default_challenge_amount
default_challenge_amount_sum = 0
for blueprint in blueprints:
default_challenge_amount_sum += blueprint.default_challenge_amount
scale_factor = challenge_amount / default_challenge_amount_sum
for blueprint in blueprints:
blueprint.scale_challenge_amount_with(scale_factor)
#---- Return the army ----#
return Army_blueprint(army_name, army_description, blueprints)
|
jakirkham/volumina
|
volumina/layerwidget_plugin.py
|
###############################################################################
# volumina: volume slicing and editing library
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
from PyQt4.QtDesigner import QPyDesignerCustomWidgetPlugin
from PyQt4.QtGui import QPixmap, QIcon, QColor
from volumina.widgets.layerwidget import LayerWidget
from volumina.layerstack import LayerStackModel, Layer
class PyLayerWidgetPlugin(QPyDesignerCustomWidgetPlugin):
def __init__(self, parent = None):
QPyDesignerCustomWidgetPlugin.__init__(self)
self.initialized = False
def initialize(self, core):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def createWidget(self, parent):
model = LayerStackModel()
o1 = Layer()
o1.name = "Fancy Layer"
o1.opacity = 0.5
model.append(o1)
o2 = Layer()
o2.name = "Some other Layer"
o2.opacity = 0.25
model.append(o2)
o3 = Layer()
o3.name = "Invisible Layer"
o3.opacity = 0.15
o3.visible = False
model.append(o3)
o4 = Layer()
o4.name = "Fancy Layer II"
o4.opacity = 0.95
model.append(o4)
o5 = Layer()
o5.name = "Fancy Layer III"
o5.opacity = 0.65
model.append(o5)
view = LayerWidget(parent, model)
view.updateGeometry()
return view
def name(self):
return "LayerWidget"
def group(self):
return "ilastik widgets"
def icon(self):
return QIcon(QPixmap(16,16))
def toolTip(self):
return ""
def whatsThis(self):
return ""
def isContainer(self):
return False
def domXml(self):
return (
'<widget class="LayerWidget" name=\"layerWidget\">\n'
"</widget>\n"
)
def includeFile(self):
return "volumina.widgets.layerwidget"
|
cwmartin/rez
|
src/rez/release_vcs.py
|
from rez.exceptions import ReleaseVCSError
from rez.packages_ import get_developer_package
from rez.util import which
from rez.utils.system import popen
from rez.utils.logging_ import print_debug
from rez.utils.filesystem import walk_up_dirs
from pipes import quote
import subprocess
def get_release_vcs_types():
"""Returns the available VCS implementations - git, hg etc."""
from rez.plugin_managers import plugin_manager
return plugin_manager.get_plugins('release_vcs')
def create_release_vcs(path, vcs_name=None):
"""Return a new release VCS that can release from this source path."""
from rez.plugin_managers import plugin_manager
vcs_types = get_release_vcs_types()
if vcs_name:
if vcs_name not in vcs_types:
raise ReleaseVCSError("Unknown version control system: %r" % vcs_name)
cls = plugin_manager.get_plugin_class('release_vcs', vcs_name)
return cls(path)
classes_by_level = {}
for vcs_name in vcs_types:
cls = plugin_manager.get_plugin_class('release_vcs', vcs_name)
result = cls.find_vcs_root(path)
if not result:
continue
vcs_path, levels_up = result
classes_by_level.setdefault(levels_up, []).append((cls, vcs_path))
if not classes_by_level:
raise ReleaseVCSError("No version control system for package "
"releasing is associated with the path %s" % path)
# it's ok to have multiple results, as long as there is only one at the
# "closest" directory up from this dir - ie, if we start at:
# /blah/foo/pkg_root
# and these dirs exist:
# /blah/.hg
# /blah/foo/.git
# ...then this is ok, because /blah/foo/.git is "closer" to the original
# dir, and will be picked. However, if these two directories exist:
# /blah/foo/.git
# /blah/foo/.hg
# ...then we error, because we can't decide which to use
lowest_level = sorted(classes_by_level)[0]
clss = classes_by_level[lowest_level]
if len(clss) > 1:
clss_str = ", ".join(x[0].name() for x in clss)
raise ReleaseVCSError("Several version control systems are associated "
"with the path %s: %s. Use rez-release --vcs to "
"choose." % (path, clss_str))
else:
cls, vcs_root = clss[0]
return cls(pkg_root=path, vcs_root=vcs_root)
class ReleaseVCS(object):
"""A version control system (VCS) used to release Rez packages.
"""
def __init__(self, pkg_root, vcs_root=None):
if vcs_root is None:
result = self.find_vcs_root(pkg_root)
if not result:
raise ReleaseVCSError("Could not find %s repository for the "
"path %s" % (self.name(), pkg_root))
vcs_root = result[0]
else:
assert(self.is_valid_root(vcs_root))
self.vcs_root = vcs_root
self.pkg_root = pkg_root
self.package = get_developer_package(pkg_root)
self.type_settings = self.package.config.plugins.release_vcs
self.settings = self.type_settings.get(self.name())
@classmethod
def name(cls):
"""Return the name of the VCS type, eg 'git'."""
raise NotImplementedError
@classmethod
def find_executable(cls, name):
exe = which(name)
if not exe:
raise ReleaseVCSError("Couldn't find executable '%s' for VCS '%s'"
% (name, cls.name()))
return exe
@classmethod
def is_valid_root(cls, path):
"""Return True if the given path is a valid root directory for this
version control system.
Note that this is different than whether the path is under the
control of this type of vcs; to answer that question,
use find_vcs_root
"""
raise NotImplementedError
@classmethod
def search_parents_for_root(cls):
"""Return True if this vcs type should check parent directories to
find the root directory
"""
raise NotImplementedError
@classmethod
def find_vcs_root(cls, path):
"""Try to find a version control root directory of this type for the
given path.
If successful, returns (vcs_root, levels_up), where vcs_root is the
path to the version control root directory it found, and levels_up is an
integer indicating how many parent directories it had to search through
to find it, where 0 means it was found in the indicated path, 1 means it
was found in that path's parent, etc. If not sucessful, returns None
"""
if cls.search_parents_for_root():
valid_dirs = walk_up_dirs(path)
else:
valid_dirs = [path]
for i, current_path in enumerate(valid_dirs):
if cls.is_valid_root(current_path):
return current_path, i
return None
def validate_repostate(self):
"""Ensure that the VCS working copy is up-to-date."""
raise NotImplementedError
def get_current_revision(self):
"""Get the current revision, this can be any type (str, dict etc)
appropriate to your VCS implementation.
Note:
You must ensure that a revision contains enough information to
clone/export/checkout the repo elsewhere - otherwise you will not
be able to implement `export`.
"""
raise NotImplementedError
def get_changelog(self, previous_revision=None, max_revisions=None):
"""Get the changelog text since the given revision.
If previous_revision is not an ancestor (for example, the last release
was from a different branch) you should still return a meaningful
changelog - perhaps include a warning, and give changelog back to the
last common ancestor.
Args:
previous_revision: The revision to give the changelog since. If
None, give the entire changelog.
Returns:
Changelog, as a string.
"""
raise NotImplementedError
def tag_exists(self, tag_name):
"""Test if a tag exists in the repo.
Args:
tag_name (str): Tag name to check for.
Returns:
bool: True if the tag exists, False otherwise.
"""
raise NotImplementedError
def create_release_tag(self, tag_name, message=None):
"""Create a tag in the repo.
Create a tag in the repository representing the release of the
given version.
Args:
tag_name (str): Tag name to write to the repo.
message (str): Message string to associate with the release.
"""
raise NotImplementedError
@classmethod
def export(cls, revision, path):
"""Export the repository to the given path at the given revision.
Note:
The directory at `path` must not exist, but the parent directory
must exist.
Args:
revision (object): Revision to export; current revision if None.
path (str): Directory to export the repository to.
"""
raise NotImplementedError
def _cmd(self, *nargs):
"""Convenience function for executing a program such as 'git' etc."""
cmd_str = ' '.join(map(quote, nargs))
if self.package.config.debug("package_release"):
print_debug("Running command: %s" % cmd_str)
p = popen(nargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=self.pkg_root)
out, err = p.communicate()
if p.returncode:
print_debug("command stdout:")
print_debug(out)
print_debug("command stderr:")
print_debug(err)
raise ReleaseVCSError("command failed: %s\n%s" % (cmd_str, err))
out = out.strip()
if out:
return [x.rstrip() for x in out.split('\n')]
else:
return []
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
|
benfinke/ns_python
|
nssrc/com/citrix/netscaler/nitro/resource/config/authentication/authenticationradiuspolicy_authenticationvserver_binding.py
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class authenticationradiuspolicy_authenticationvserver_binding(base_resource) :
""" Binding class showing the authenticationvserver that can be bound to authenticationradiuspolicy.
"""
def __init__(self) :
self._boundto = ""
self._priority = 0
self._activepolicy = 0
self._name = ""
self.___count = 0
@property
def name(self) :
ur"""Name of the RADIUS authentication policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the RADIUS authentication policy.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def boundto(self) :
ur"""The entity name to which policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
ur"""The entity name to which policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def priority(self) :
try :
return self._priority
except Exception as e:
raise e
@property
def activepolicy(self) :
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(authenticationradiuspolicy_authenticationvserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.authenticationradiuspolicy_authenticationvserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch authenticationradiuspolicy_authenticationvserver_binding resources.
"""
try :
obj = authenticationradiuspolicy_authenticationvserver_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of authenticationradiuspolicy_authenticationvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationradiuspolicy_authenticationvserver_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count authenticationradiuspolicy_authenticationvserver_binding resources configued on NetScaler.
"""
try :
obj = authenticationradiuspolicy_authenticationvserver_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of authenticationradiuspolicy_authenticationvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationradiuspolicy_authenticationvserver_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class authenticationradiuspolicy_authenticationvserver_binding_response(base_response) :
def __init__(self, length=1) :
self.authenticationradiuspolicy_authenticationvserver_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.authenticationradiuspolicy_authenticationvserver_binding = [authenticationradiuspolicy_authenticationvserver_binding() for _ in range(length)]
|
YoshikawaMasashi/magenta
|
magenta/music/notebook_utils.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python functions which run only within a Jupyter notebook."""
# internal imports
import IPython
from magenta.music import midi_synth
_DEFAULT_SAMPLE_RATE = 44100
def play_sequence(sequence,
synth=midi_synth.synthesize,
sample_rate=_DEFAULT_SAMPLE_RATE,
**synth_args):
"""Creates an interactive player for a synthesized note sequence.
This function should only be called from a Jupyter notebook.
Args:
sequence: A music_pb2.NoteSequence to synthesize and play.
synth: A synthesis function that takes a sequence and sample rate as input.
sample_rate: The sample rate at which to synthesize.
**synth_args: Additional keyword arguments to pass to the synth function.
"""
array_of_floats = synth(sequence, sample_rate=sample_rate, **synth_args)
IPython.display.display(IPython.display.Audio(array_of_floats,
rate=sample_rate))
|
PawarPawan/h2o-v3
|
h2o-py/tests/testdir_algos/kmeans/pyunit_ozoneKmeans.py
|
import sys
sys.path.insert(1, "../../../")
import h2o
def ozoneKM(ip, port):
# Connect to a pre-existing cluster
# connect to localhost:54321
train = h2o.import_file(path=h2o.locate("smalldata/glm_test/ozone.csv"))
# See that the data is ready
print train.describe()
# Run KMeans
my_km = h2o.kmeans(x=train,
k=10,
init = "PlusPlus",
max_iterations = 100)
my_km.show()
my_km.summary()
my_pred = my_km.predict(train)
my_pred.describe()
if __name__ == "__main__":
h2o.run_test(sys.argv, ozoneKM)
|
arju88nair/projectCulminate
|
venv/lib/python3.5/site-packages/astroid/raw_building.py
|
# Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2014-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2015-2016 Cara Vinson <ceridwenv@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""this module contains a set of functions to create astroid trees from scratch
(build_* functions) or from living object (object_build_* functions)
"""
import inspect
import logging
import os
import sys
import types
import six
from astroid import bases
from astroid import manager
from astroid import node_classes
from astroid import nodes
MANAGER = manager.AstroidManager()
# the keys of CONST_CLS eg python builtin types
_CONSTANTS = tuple(node_classes.CONST_CLS)
_JYTHON = os.name == 'java'
_BUILTINS = vars(six.moves.builtins)
_LOG = logging.getLogger(__name__)
def _io_discrepancy(member):
# _io module names itself `io`: http://bugs.python.org/issue18602
member_self = getattr(member, '__self__', None)
return (member_self and
inspect.ismodule(member_self) and
member_self.__name__ == '_io' and
member.__module__ == 'io')
def _attach_local_node(parent, node, name):
node.name = name # needed by add_local_node
parent.add_local_node(node)
def _add_dunder_class(func, member):
"""Add a __class__ member to the given func node, if we can determine it."""
python_cls = member.__class__
cls_name = getattr(python_cls, '__name__', None)
if not cls_name:
return
cls_bases = [ancestor.__name__ for ancestor in python_cls.__bases__]
ast_klass = build_class(cls_name, cls_bases, python_cls.__doc__)
func.instance_attrs['__class__'] = [ast_klass]
_marker = object()
def attach_dummy_node(node, name, runtime_object=_marker):
"""create a dummy node and register it in the locals of the given
node with the specified name
"""
enode = nodes.EmptyNode()
enode.object = runtime_object
_attach_local_node(node, enode, name)
def _has_underlying_object(self):
return self.object is not None and self.object is not _marker
nodes.EmptyNode.has_underlying_object = _has_underlying_object
def attach_const_node(node, name, value):
"""create a Const node and register it in the locals of the given
node with the specified name
"""
if name not in node.special_attributes:
_attach_local_node(node, nodes.const_factory(value), name)
def attach_import_node(node, modname, membername):
"""create a ImportFrom node and register it in the locals of the given
node with the specified name
"""
from_node = nodes.ImportFrom(modname, [(membername, None)])
_attach_local_node(node, from_node, membername)
def build_module(name, doc=None):
"""create and initialize a astroid Module node"""
node = nodes.Module(name, doc, pure_python=False)
node.package = False
node.parent = None
return node
def build_class(name, basenames=(), doc=None):
"""create and initialize a astroid ClassDef node"""
node = nodes.ClassDef(name, doc)
for base in basenames:
basenode = nodes.Name()
basenode.name = base
node.bases.append(basenode)
basenode.parent = node
return node
def build_function(name, args=None, defaults=None, doc=None):
"""create and initialize a astroid FunctionDef node"""
args, defaults = args or [], defaults or []
# first argument is now a list of decorators
func = nodes.FunctionDef(name, doc)
func.args = argsnode = nodes.Arguments()
argsnode.args = []
for arg in args:
argsnode.args.append(nodes.Name())
argsnode.args[-1].name = arg
argsnode.args[-1].parent = argsnode
argsnode.defaults = []
for default in defaults:
argsnode.defaults.append(nodes.const_factory(default))
argsnode.defaults[-1].parent = argsnode
argsnode.kwarg = None
argsnode.vararg = None
argsnode.parent = func
if args:
register_arguments(func)
return func
def build_from_import(fromname, names):
"""create and initialize an astroid ImportFrom import statement"""
return nodes.ImportFrom(fromname, [(name, None) for name in names])
def register_arguments(func, args=None):
"""add given arguments to local
args is a list that may contains nested lists
(i.e. def func(a, (b, c, d)): ...)
"""
if args is None:
args = func.args.args
if func.args.vararg:
func.set_local(func.args.vararg, func.args)
if func.args.kwarg:
func.set_local(func.args.kwarg, func.args)
for arg in args:
if isinstance(arg, nodes.Name):
func.set_local(arg.name, arg)
else:
register_arguments(func, arg.elts)
def object_build_class(node, member, localname):
"""create astroid for a living class object"""
basenames = [base.__name__ for base in member.__bases__]
return _base_class_object_build(node, member, basenames,
localname=localname)
def object_build_function(node, member, localname):
"""create astroid for a living function object"""
# pylint: disable=deprecated-method; completely removed in 2.0
args, varargs, varkw, defaults = inspect.getargspec(member)
if varargs is not None:
args.append(varargs)
if varkw is not None:
args.append(varkw)
func = build_function(getattr(member, '__name__', None) or localname, args,
defaults, member.__doc__)
node.add_local_node(func, localname)
def object_build_datadescriptor(node, member, name):
"""create astroid for a living data descriptor object"""
return _base_class_object_build(node, member, [], name)
def object_build_methoddescriptor(node, member, localname):
"""create astroid for a living method descriptor object"""
# FIXME get arguments ?
func = build_function(getattr(member, '__name__', None) or localname,
doc=member.__doc__)
# set node's arguments to None to notice that we have no information, not
# and empty argument list
func.args.args = None
node.add_local_node(func, localname)
_add_dunder_class(func, member)
def _base_class_object_build(node, member, basenames, name=None, localname=None):
"""create astroid for a living class object, with a given set of base names
(e.g. ancestors)
"""
klass = build_class(name or getattr(member, '__name__', None) or localname,
basenames, member.__doc__)
klass._newstyle = isinstance(member, type)
node.add_local_node(klass, localname)
try:
# limit the instantiation trick since it's too dangerous
# (such as infinite test execution...)
# this at least resolves common case such as Exception.args,
# OSError.errno
if issubclass(member, Exception):
instdict = member().__dict__
else:
raise TypeError
except: # pylint: disable=bare-except
pass
else:
for item_name, obj in instdict.items():
valnode = nodes.EmptyNode()
valnode.object = obj
valnode.parent = klass
valnode.lineno = 1
klass.instance_attrs[item_name] = [valnode]
return klass
def _build_from_function(node, name, member, module):
# verify this is not an imported function
try:
code = six.get_function_code(member)
except AttributeError:
# Some implementations don't provide the code object,
# such as Jython.
code = None
filename = getattr(code, 'co_filename', None)
if filename is None:
assert isinstance(member, object)
object_build_methoddescriptor(node, member, name)
elif filename != getattr(module, '__file__', None):
attach_dummy_node(node, name, member)
else:
object_build_function(node, member, name)
class InspectBuilder(object):
"""class for building nodes from living object
this is actually a really minimal representation, including only Module,
FunctionDef and ClassDef nodes and some others as guessed.
"""
# astroid from living objects ###############################################
def __init__(self):
self._done = {}
self._module = None
def inspect_build(self, module, modname=None, path=None):
"""build astroid from a living module (i.e. using inspect)
this is used when there is no python source code available (either
because it's a built-in module or because the .py is not available)
"""
self._module = module
if modname is None:
modname = module.__name__
try:
node = build_module(modname, module.__doc__)
except AttributeError:
# in jython, java modules have no __doc__ (see #109562)
node = build_module(modname)
node.file = node.path = os.path.abspath(path) if path else path
node.name = modname
MANAGER.cache_module(node)
node.package = hasattr(module, '__path__')
self._done = {}
self.object_build(node, module)
return node
def object_build(self, node, obj):
"""recursive method which create a partial ast from real objects
(only function, class, and method are handled)
"""
if obj in self._done:
return self._done[obj]
self._done[obj] = node
for name in dir(obj):
try:
member = getattr(obj, name)
except AttributeError:
# damned ExtensionClass.Base, I know you're there !
attach_dummy_node(node, name)
continue
if inspect.ismethod(member):
member = six.get_method_function(member)
if inspect.isfunction(member):
_build_from_function(node, name, member, self._module)
elif inspect.isbuiltin(member):
if (not _io_discrepancy(member) and
self.imported_member(node, member, name)):
continue
object_build_methoddescriptor(node, member, name)
elif inspect.isclass(member):
if self.imported_member(node, member, name):
continue
if member in self._done:
class_node = self._done[member]
if class_node not in node.locals.get(name, ()):
node.add_local_node(class_node, name)
else:
class_node = object_build_class(node, member, name)
# recursion
self.object_build(class_node, member)
if name == '__class__' and class_node.parent is None:
class_node.parent = self._done[self._module]
elif inspect.ismethoddescriptor(member):
assert isinstance(member, object)
object_build_methoddescriptor(node, member, name)
elif inspect.isdatadescriptor(member):
assert isinstance(member, object)
object_build_datadescriptor(node, member, name)
elif isinstance(member, _CONSTANTS):
attach_const_node(node, name, member)
elif inspect.isroutine(member):
# This should be called for Jython, where some builtin
# methods aren't caught by isbuiltin branch.
_build_from_function(node, name, member, self._module)
else:
# create an empty node so that the name is actually defined
attach_dummy_node(node, name, member)
def imported_member(self, node, member, name):
"""verify this is not an imported class or handle it"""
# /!\ some classes like ExtensionClass doesn't have a __module__
# attribute ! Also, this may trigger an exception on badly built module
# (see http://www.logilab.org/ticket/57299 for instance)
try:
modname = getattr(member, '__module__', None)
except: # pylint: disable=bare-except
_LOG.exception('unexpected error while building '
'astroid from living object')
modname = None
if modname is None:
if (name in ('__new__', '__subclasshook__')
or (name in _BUILTINS and _JYTHON)):
# Python 2.5.1 (r251:54863, Sep 1 2010, 22:03:14)
# >>> print object.__new__.__module__
# None
modname = six.moves.builtins.__name__
else:
attach_dummy_node(node, name, member)
return True
real_name = {
'gtk': 'gtk_gtk',
'_io': 'io',
}.get(modname, modname)
if real_name != self._module.__name__:
# check if it sounds valid and then add an import node, else use a
# dummy node
try:
getattr(sys.modules[modname], name)
except (KeyError, AttributeError):
attach_dummy_node(node, name, member)
else:
attach_import_node(node, modname, name)
return True
return False
### astroid bootstrapping ######################################################
Astroid_BUILDER = InspectBuilder()
_CONST_PROXY = {}
def _astroid_bootstrapping(astroid_builtin=None):
"""astroid boot strapping the builtins module"""
# this boot strapping is necessary since we need the Const nodes to
# inspect_build builtins, and then we can proxy Const
if astroid_builtin is None:
from six.moves import builtins
astroid_builtin = Astroid_BUILDER.inspect_build(builtins)
# pylint: disable=redefined-outer-name
for cls, node_cls in node_classes.CONST_CLS.items():
if cls is type(None):
proxy = build_class('NoneType')
proxy.parent = astroid_builtin
elif cls is type(NotImplemented):
proxy = build_class('NotImplementedType')
proxy.parent = astroid_builtin
else:
proxy = astroid_builtin.getattr(cls.__name__)[0]
if cls in (dict, list, set, tuple):
node_cls._proxied = proxy
else:
_CONST_PROXY[cls] = proxy
_astroid_bootstrapping()
# TODO : find a nicer way to handle this situation;
# However __proxied introduced an
# infinite recursion (see https://bugs.launchpad.net/pylint/+bug/456870)
def _set_proxied(const):
return _CONST_PROXY[const.value.__class__]
nodes.Const._proxied = property(_set_proxied)
_GeneratorType = nodes.ClassDef(types.GeneratorType.__name__, types.GeneratorType.__doc__)
_GeneratorType.parent = MANAGER.astroid_cache[six.moves.builtins.__name__]
bases.Generator._proxied = _GeneratorType
Astroid_BUILDER.object_build(bases.Generator._proxied, types.GeneratorType)
_builtins = MANAGER.astroid_cache[six.moves.builtins.__name__]
BUILTIN_TYPES = (types.GetSetDescriptorType, types.GeneratorType,
types.MemberDescriptorType, type(None), type(NotImplemented),
types.FunctionType, types.MethodType,
types.BuiltinFunctionType, types.ModuleType, types.TracebackType)
for _type in BUILTIN_TYPES:
if _type.__name__ not in _builtins:
cls = nodes.ClassDef(_type.__name__, _type.__doc__)
cls.parent = MANAGER.astroid_cache[six.moves.builtins.__name__]
Astroid_BUILDER.object_build(cls, _type)
_builtins[_type.__name__] = cls
|
alex/flanker
|
flanker/mime/message/headers/encodedword.py
|
# coding:utf-8
import logging
import regex as re
import email.quoprimime
import email.base64mime
from base64 import b64encode
from flanker.mime.message import charsets, errors
log = logging.getLogger(__name__)
#deal with unfolding
foldingWhiteSpace = re.compile(r"(\n\r?|\r\n?)(\s*)")
def unfold(value):
"""
Unfolding is accomplished by simply removing any CRLF
that is immediately followed by WSP. Each header field should be
treated in its unfolded form for further syntactic and semantic
evaluation.
"""
return re.sub(foldingWhiteSpace, r"\2", value)
def decode(header):
return mime_to_unicode(header)
def mime_to_unicode(header):
"""
Takes a header value and returns a fully decoded unicode string.
It differs from standard Python's mail.header.decode_header() because:
- it is higher level, i.e. returns a unicode string instead of
an array of tuples
- it accepts Unicode and non-ASCII strings as well
>>> header_to_unicode("=?UTF-8?B?UmVbMl06INCX0LXQvNC70Y/QutC4?=")
u"Земляки"
>>> header_to_unicode("hello")
u"Hello"
"""
try:
header = unfold(header)
decoded = [] # decoded parts
while header:
match = encodedWord.search(header)
if match:
start = match.start()
if start != 0:
# decodes unencoded ascii part to unicode
value = charsets.convert_to_unicode(ascii, header[0:start])
if value.strip():
decoded.append(value)
# decode a header =?...?= of encoding
charset, value = decode_part(
match.group('charset').lower(),
match.group('encoding').lower(),
match.group('encoded'))
decoded.append(charsets.convert_to_unicode(charset, value))
header = header[match.end():]
else:
# no match? append the remainder
# of the string to the list of chunks
decoded.append(charsets.convert_to_unicode(ascii, header))
break
return u"".join(decoded)
except Exception:
try:
log.warning(
u"HEADER-DECODE-FAIL: ({0}) - b64encoded".format(
b64encode(header)))
except Exception:
log.exception("Failed to log exception")
return header
ascii = 'ascii'
#this spec refers to
#http://tools.ietf.org/html/rfc2047
encodedWord = re.compile(r'''(?P<encodedWord>
=\? # literal =?
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
\? # literal ?
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
\? # literal ?
(?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string
\?= # literal ?=
)''', re.VERBOSE | re.IGNORECASE | re.MULTILINE)
def decode_part(charset, encoding, value):
"""
Attempts to decode part, understands
'q' - quoted encoding
'b' - base64 mime encoding
Returns (charset, decoded-string)
"""
if encoding == 'q':
return (charset, email.quoprimime.header_decode(str(value)))
elif encoding == 'b':
# Postel's law: add missing padding
paderr = len(value) % 4
if paderr:
value += '==='[:4 - paderr]
return (charset, email.base64mime.decode(value))
elif not encoding:
return (charset, value)
else:
raise errors.DecodingError(
"Unknown encoding: {0}".format(encoding))
|
derekjchow/models
|
research/feelvos/utils/video_input_generator.py
|
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper for providing semantic segmentation video data."""
import tensorflow as tf
from feelvos import input_preprocess
from feelvos import model
from feelvos.utils import mask_damaging
from feelvos.utils import train_utils
slim = tf.contrib.slim
dataset_data_provider = slim.dataset_data_provider
MIN_LABEL_COUNT = 10
def decode_image_sequence(tensor, image_format='jpeg', shape=None,
channels=3, raw_dtype=tf.uint8):
"""Decodes a sequence of images.
Args:
tensor: the tensor of strings to decode, shape: [num_images]
image_format: a string (possibly tensor) with the format of the image.
Options include 'jpeg', 'png', and 'raw'.
shape: a list or tensor of the decoded image shape for a single image.
channels: if 'shape' is None, the third dimension of the image is set to
this value.
raw_dtype: if the image is encoded as raw bytes, this is the method of
decoding the bytes into values.
Returns:
The decoded images with shape [time, height, width, channels].
"""
handler = slim.tfexample_decoder.Image(
shape=shape, channels=channels, dtype=raw_dtype, repeated=True)
return handler.tensors_to_item({'image/encoded': tensor,
'image/format': image_format})
def _get_data(data_provider, dataset_split, video_frames_are_decoded):
"""Gets data from data provider.
Args:
data_provider: An object of slim.data_provider.
dataset_split: Dataset split.
video_frames_are_decoded: Boolean, whether the video frames are already
decoded
Returns:
image: Image Tensor.
label: Label Tensor storing segmentation annotations.
object_label: An integer refers to object_label according to labelmap. If
the example has more than one object_label, take the first one.
image_name: Image name.
height: Image height.
width: Image width.
video_id: String tensor representing the name of the video.
Raises:
ValueError: Failed to find label.
"""
if video_frames_are_decoded:
image, = data_provider.get(['image'])
else:
image, = data_provider.get(['image/encoded'])
# Some datasets do not contain image_name.
if 'image_name' in data_provider.list_items():
image_name, = data_provider.get(['image_name'])
else:
image_name = tf.constant('')
height, width = data_provider.get(['height', 'width'])
label = None
if dataset_split != 'test':
if video_frames_are_decoded:
if 'labels_class' not in data_provider.list_items():
raise ValueError('Failed to find labels.')
label, = data_provider.get(['labels_class'])
else:
key = 'segmentation/object/encoded'
if key not in data_provider.list_items():
raise ValueError('Failed to find labels.')
label, = data_provider.get([key])
object_label = None
video_id, = data_provider.get(['video_id'])
return image, label, object_label, image_name, height, width, video_id
def _has_foreground_and_background_in_first_frame(label, subsampling_factor):
"""Checks if the labels have foreground and background in the first frame.
Args:
label: Label tensor of shape [num_frames, height, width, 1].
subsampling_factor: Integer, the subsampling factor.
Returns:
Boolean, whether the labels have foreground and background in the first
frame.
"""
h, w = train_utils.resolve_shape(label)[1:3]
label_downscaled = tf.squeeze(
tf.image.resize_nearest_neighbor(label[0, tf.newaxis],
[h // subsampling_factor,
w // subsampling_factor],
align_corners=True),
axis=0)
is_bg = tf.equal(label_downscaled, 0)
is_fg = tf.logical_not(is_bg)
# Just using reduce_any was not robust enough, so lets make sure the count
# is above MIN_LABEL_COUNT.
fg_count = tf.reduce_sum(tf.cast(is_fg, tf.int32))
bg_count = tf.reduce_sum(tf.cast(is_bg, tf.int32))
has_bg = tf.greater_equal(fg_count, MIN_LABEL_COUNT)
has_fg = tf.greater_equal(bg_count, MIN_LABEL_COUNT)
return tf.logical_and(has_bg, has_fg)
def _has_foreground_and_background_in_first_frame_2(label,
decoder_output_stride):
"""Checks if the labels have foreground and background in the first frame.
Second attempt, this time we use the actual output dimension for resizing.
Args:
label: Label tensor of shape [num_frames, height, width, 1].
decoder_output_stride: Integer, the stride of the decoder output.
Returns:
Boolean, whether the labels have foreground and background in the first
frame.
"""
h, w = train_utils.resolve_shape(label)[1:3]
h_sub = model.scale_dimension(h, 1.0 / decoder_output_stride)
w_sub = model.scale_dimension(w, 1.0 / decoder_output_stride)
label_downscaled = tf.squeeze(
tf.image.resize_nearest_neighbor(label[0, tf.newaxis], [h_sub, w_sub],
align_corners=True), axis=0)
is_bg = tf.equal(label_downscaled, 0)
is_fg = tf.logical_not(is_bg)
# Just using reduce_any was not robust enough, so lets make sure the count
# is above MIN_LABEL_COUNT.
fg_count = tf.reduce_sum(tf.cast(is_fg, tf.int32))
bg_count = tf.reduce_sum(tf.cast(is_bg, tf.int32))
has_bg = tf.greater_equal(fg_count, MIN_LABEL_COUNT)
has_fg = tf.greater_equal(bg_count, MIN_LABEL_COUNT)
return tf.logical_and(has_bg, has_fg)
def _has_enough_pixels_of_each_object_in_first_frame(
label, decoder_output_stride):
"""Checks if for each object (incl. background) enough pixels are visible.
During test time, we will usually not see a reference frame in which only
very few pixels of one object are visible. These cases can be problematic
during training, especially if more than the 1-nearest neighbor is used.
That's why this function can be used to detect and filter these cases.
Args:
label: Label tensor of shape [num_frames, height, width, 1].
decoder_output_stride: Integer, the stride of the decoder output.
Returns:
Boolean, whether the labels have enough pixels of each object in the first
frame.
"""
h, w = train_utils.resolve_shape(label)[1:3]
h_sub = model.scale_dimension(h, 1.0 / decoder_output_stride)
w_sub = model.scale_dimension(w, 1.0 / decoder_output_stride)
label_downscaled = tf.squeeze(
tf.image.resize_nearest_neighbor(label[0, tf.newaxis], [h_sub, w_sub],
align_corners=True), axis=0)
_, _, counts = tf.unique_with_counts(
tf.reshape(label_downscaled, [-1]))
has_enough_pixels_per_object = tf.reduce_all(
tf.greater_equal(counts, MIN_LABEL_COUNT))
return has_enough_pixels_per_object
def get(dataset,
num_frames_per_video,
crop_size,
batch_size,
min_resize_value=None,
max_resize_value=None,
resize_factor=None,
min_scale_factor=1.,
max_scale_factor=1.,
scale_factor_step_size=0,
preprocess_image_and_label=True,
num_readers=1,
num_threads=1,
dataset_split=None,
is_training=True,
model_variant=None,
batch_capacity_factor=32,
video_frames_are_decoded=False,
decoder_output_stride=None,
first_frame_finetuning=False,
sample_only_first_frame_for_finetuning=False,
sample_adjacent_and_consistent_query_frames=False,
remap_labels_to_reference_frame=True,
generate_prev_frame_mask_by_mask_damaging=False,
three_frame_dataset=False,
add_prev_frame_label=True):
"""Gets the dataset split for semantic segmentation.
This functions gets the dataset split for semantic segmentation. In
particular, it is a wrapper of (1) dataset_data_provider which returns the raw
dataset split, (2) input_preprcess which preprocess the raw data, and (3) the
Tensorflow operation of batching the preprocessed data. Then, the output could
be directly used by training, evaluation or visualization.
Args:
dataset: An instance of slim Dataset.
num_frames_per_video: The number of frames used per video
crop_size: Image crop size [height, width].
batch_size: Batch size.
min_resize_value: Desired size of the smaller image side.
max_resize_value: Maximum allowed size of the larger image side.
resize_factor: Resized dimensions are multiple of factor plus one.
min_scale_factor: Minimum scale factor value.
max_scale_factor: Maximum scale factor value.
scale_factor_step_size: The step size from min scale factor to max scale
factor. The input is randomly scaled based on the value of
(min_scale_factor, max_scale_factor, scale_factor_step_size).
preprocess_image_and_label: Boolean variable specifies if preprocessing of
image and label will be performed or not.
num_readers: Number of readers for data provider.
num_threads: Number of threads for batching data.
dataset_split: Dataset split.
is_training: Is training or not.
model_variant: Model variant (string) for choosing how to mean-subtract the
images. See feature_extractor.network_map for supported model variants.
batch_capacity_factor: Batch capacity factor affecting the training queue
batch capacity.
video_frames_are_decoded: Boolean, whether the video frames are already
decoded
decoder_output_stride: Integer, the stride of the decoder output.
first_frame_finetuning: Boolean, whether to only sample the first frame
for fine-tuning.
sample_only_first_frame_for_finetuning: Boolean, whether to only sample the
first frame during fine-tuning. This should be False when using lucid or
wonderland data, but true when fine-tuning on the first frame only.
Only has an effect if first_frame_finetuning is True.
sample_adjacent_and_consistent_query_frames: Boolean, if true, the query
frames (all but the first frame which is the reference frame) will be
sampled such that they are adjacent video frames and have the same
crop coordinates and flip augmentation.
remap_labels_to_reference_frame: Boolean, whether to remap the labels of
the query frames to match the labels of the (downscaled) reference frame.
If a query frame contains a label which is not present in the reference,
it will be mapped to background.
generate_prev_frame_mask_by_mask_damaging: Boolean, whether to generate
the masks used as guidance from the previous frame by damaging the
ground truth mask.
three_frame_dataset: Boolean, whether the dataset has exactly three frames
per video of which the first is to be used as reference and the two
others are consecutive frames to be used as query frames.
add_prev_frame_label: Boolean, whether to sample one more frame before the
first query frame to obtain a previous frame label. Only has an effect,
if sample_adjacent_and_consistent_query_frames is True and
generate_prev_frame_mask_by_mask_damaging is False.
Returns:
A dictionary of batched Tensors for semantic segmentation.
Raises:
ValueError: dataset_split is None, or Failed to find labels.
"""
if dataset_split is None:
raise ValueError('Unknown dataset split.')
if model_variant is None:
tf.logging.warning('Please specify a model_variant. See '
'feature_extractor.network_map for supported model '
'variants.')
data_provider = dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=num_readers,
num_epochs=None if is_training else 1,
shuffle=is_training)
image, label, object_label, image_name, height, width, video_id = _get_data(
data_provider, dataset_split, video_frames_are_decoded)
sampling_is_valid = tf.constant(True)
if num_frames_per_video is not None:
total_num_frames = tf.shape(image)[0]
if first_frame_finetuning or three_frame_dataset:
if sample_only_first_frame_for_finetuning:
assert not sample_adjacent_and_consistent_query_frames, (
'this option does not make sense for sampling only first frame.')
# Sample the first frame num_frames_per_video times.
sel_indices = tf.tile(tf.constant(0, dtype=tf.int32)[tf.newaxis],
multiples=[num_frames_per_video])
else:
if sample_adjacent_and_consistent_query_frames:
if add_prev_frame_label:
num_frames_per_video += 1
# Since this is first frame fine-tuning, we'll for now assume that
# each sequence has exactly 3 images: the ref frame and 2 adjacent
# query frames.
assert num_frames_per_video == 3
with tf.control_dependencies([tf.assert_equal(total_num_frames, 3)]):
sel_indices = tf.constant([1, 2], dtype=tf.int32)
else:
# Sample num_frames_per_video - 1 query frames which are not the
# first frame.
sel_indices = tf.random_shuffle(
tf.range(1, total_num_frames))[:(num_frames_per_video - 1)]
# Concat first frame as reference frame to the front.
sel_indices = tf.concat([tf.constant(0, dtype=tf.int32)[tf.newaxis],
sel_indices], axis=0)
else:
if sample_adjacent_and_consistent_query_frames:
if add_prev_frame_label:
# Sample one more frame which we can use to provide initial softmax
# feedback.
num_frames_per_video += 1
ref_idx = tf.random_shuffle(tf.range(total_num_frames))[0]
sampling_is_valid = tf.greater_equal(total_num_frames,
num_frames_per_video)
def sample_query_start_idx():
return tf.random_shuffle(
tf.range(total_num_frames - num_frames_per_video + 1))[0]
query_start_idx = tf.cond(sampling_is_valid, sample_query_start_idx,
lambda: tf.constant(0, dtype=tf.int32))
def sample_sel_indices():
return tf.concat(
[ref_idx[tf.newaxis],
tf.range(
query_start_idx,
query_start_idx + (num_frames_per_video - 1))], axis=0)
sel_indices = tf.cond(
sampling_is_valid, sample_sel_indices,
lambda: tf.zeros((num_frames_per_video,), dtype=tf.int32))
else:
# Randomly sample some frames from the video.
sel_indices = tf.random_shuffle(
tf.range(total_num_frames))[:num_frames_per_video]
image = tf.gather(image, sel_indices, axis=0)
if not video_frames_are_decoded:
image = decode_image_sequence(image)
if label is not None:
if num_frames_per_video is not None:
label = tf.gather(label, sel_indices, axis=0)
if not video_frames_are_decoded:
label = decode_image_sequence(label, image_format='png', channels=1)
# Sometimes, label is saved as [num_frames_per_video, height, width] or
# [num_frames_per_video, height, width, 1]. We change it to be
# [num_frames_per_video, height, width, 1].
if label.shape.ndims == 3:
label = tf.expand_dims(label, 3)
elif label.shape.ndims == 4 and label.shape.dims[3] == 1:
pass
else:
raise ValueError('Input label shape must be '
'[num_frames_per_video, height, width],'
' or [num_frames, height, width, 1]. '
'Got {}'.format(label.shape.ndims))
label.set_shape([None, None, None, 1])
# Add size of first dimension since tf can't figure it out automatically.
image.set_shape((num_frames_per_video, None, None, None))
if label is not None:
label.set_shape((num_frames_per_video, None, None, None))
preceding_frame_label = None
if preprocess_image_and_label:
if num_frames_per_video is None:
raise ValueError('num_frame_per_video must be specified for preproc.')
original_images = []
images = []
labels = []
if sample_adjacent_and_consistent_query_frames:
num_frames_individual_preproc = 1
else:
num_frames_individual_preproc = num_frames_per_video
for frame_idx in range(num_frames_individual_preproc):
original_image_t, image_t, label_t = (
input_preprocess.preprocess_image_and_label(
image[frame_idx],
label[frame_idx],
crop_height=crop_size[0] if crop_size is not None else None,
crop_width=crop_size[1] if crop_size is not None else None,
min_resize_value=min_resize_value,
max_resize_value=max_resize_value,
resize_factor=resize_factor,
min_scale_factor=min_scale_factor,
max_scale_factor=max_scale_factor,
scale_factor_step_size=scale_factor_step_size,
ignore_label=dataset.ignore_label,
is_training=is_training,
model_variant=model_variant))
original_images.append(original_image_t)
images.append(image_t)
labels.append(label_t)
if sample_adjacent_and_consistent_query_frames:
imgs_for_preproc = [image[frame_idx] for frame_idx in
range(1, num_frames_per_video)]
labels_for_preproc = [label[frame_idx] for frame_idx in
range(1, num_frames_per_video)]
original_image_rest, image_rest, label_rest = (
input_preprocess.preprocess_images_and_labels_consistently(
imgs_for_preproc,
labels_for_preproc,
crop_height=crop_size[0] if crop_size is not None else None,
crop_width=crop_size[1] if crop_size is not None else None,
min_resize_value=min_resize_value,
max_resize_value=max_resize_value,
resize_factor=resize_factor,
min_scale_factor=min_scale_factor,
max_scale_factor=max_scale_factor,
scale_factor_step_size=scale_factor_step_size,
ignore_label=dataset.ignore_label,
is_training=is_training,
model_variant=model_variant))
original_images.extend(original_image_rest)
images.extend(image_rest)
labels.extend(label_rest)
assert len(original_images) == num_frames_per_video
assert len(images) == num_frames_per_video
assert len(labels) == num_frames_per_video
if remap_labels_to_reference_frame:
# Remap labels to indices into the labels of the (downscaled) reference
# frame, or 0, i.e. background, for labels which are not present
# in the reference.
reference_labels = labels[0][tf.newaxis]
h, w = train_utils.resolve_shape(reference_labels)[1:3]
embedding_height = model.scale_dimension(
h, 1.0 / decoder_output_stride)
embedding_width = model.scale_dimension(
w, 1.0 / decoder_output_stride)
reference_labels_embedding_size = tf.squeeze(
tf.image.resize_nearest_neighbor(
reference_labels, tf.stack([embedding_height, embedding_width]),
align_corners=True),
axis=0)
# Get sorted unique labels in the reference frame.
labels_in_ref_frame, _ = tf.unique(
tf.reshape(reference_labels_embedding_size, [-1]))
labels_in_ref_frame = tf.contrib.framework.sort(labels_in_ref_frame)
for idx in range(1, len(labels)):
ref_label_mask = tf.equal(
labels[idx],
labels_in_ref_frame[tf.newaxis, tf.newaxis, :])
remapped = tf.argmax(tf.cast(ref_label_mask, tf.uint8), axis=-1,
output_type=tf.int32)
# Set to 0 if label is not present
is_in_ref = tf.reduce_any(ref_label_mask, axis=-1)
remapped *= tf.cast(is_in_ref, tf.int32)
labels[idx] = remapped[..., tf.newaxis]
if sample_adjacent_and_consistent_query_frames:
if first_frame_finetuning and generate_prev_frame_mask_by_mask_damaging:
preceding_frame_label = mask_damaging.damage_masks(labels[1])
elif add_prev_frame_label:
# Discard the image of the additional frame and take the label as
# initialization for softmax feedback.
original_images = [original_images[0]] + original_images[2:]
preceding_frame_label = labels[1]
images = [images[0]] + images[2:]
labels = [labels[0]] + labels[2:]
num_frames_per_video -= 1
original_image = tf.stack(original_images, axis=0)
image = tf.stack(images, axis=0)
label = tf.stack(labels, axis=0)
else:
if label is not None:
# Need to set label shape due to batching.
label.set_shape([num_frames_per_video,
None if crop_size is None else crop_size[0],
None if crop_size is None else crop_size[1],
1])
original_image = tf.to_float(tf.zeros_like(label))
if crop_size is None:
height = tf.shape(image)[1]
width = tf.shape(image)[2]
else:
height = crop_size[0]
width = crop_size[1]
sample = {'image': image,
'image_name': image_name,
'height': height,
'width': width,
'video_id': video_id}
if label is not None:
sample['label'] = label
if object_label is not None:
sample['object_label'] = object_label
if preceding_frame_label is not None:
sample['preceding_frame_label'] = preceding_frame_label
if not is_training:
# Original image is only used during visualization.
sample['original_image'] = original_image
if is_training:
if first_frame_finetuning:
keep_input = tf.constant(True)
else:
keep_input = tf.logical_and(sampling_is_valid, tf.logical_and(
_has_enough_pixels_of_each_object_in_first_frame(
label, decoder_output_stride),
_has_foreground_and_background_in_first_frame_2(
label, decoder_output_stride)))
batched = tf.train.maybe_batch(sample,
keep_input=keep_input,
batch_size=batch_size,
num_threads=num_threads,
capacity=batch_capacity_factor * batch_size,
dynamic_pad=True)
else:
batched = tf.train.batch(sample,
batch_size=batch_size,
num_threads=num_threads,
capacity=batch_capacity_factor * batch_size,
dynamic_pad=True)
# Flatten from [batch, num_frames_per_video, ...] to
# batch * num_frames_per_video, ...].
cropped_height = train_utils.resolve_shape(batched['image'])[2]
cropped_width = train_utils.resolve_shape(batched['image'])[3]
if num_frames_per_video is None:
first_dim = -1
else:
first_dim = batch_size * num_frames_per_video
batched['image'] = tf.reshape(batched['image'],
[first_dim, cropped_height, cropped_width, 3])
if label is not None:
batched['label'] = tf.reshape(batched['label'],
[first_dim, cropped_height, cropped_width, 1])
return batched
|
dakiri/splunk-app-twitter
|
twitter2/bin/twython/streaming/types.py
|
# -*- coding: utf-8 -*-
"""
twython.streaming.types
~~~~~~~~~~~~~~~~~~~~~~~
This module contains classes and methods for :class:`TwythonStreamer` to use.
"""
class TwythonStreamerTypes(object):
"""Class for different stream endpoints
Not all streaming endpoints have nested endpoints.
User Streams and Site Streams are single streams with no nested endpoints
Status Streams include filter, sample and firehose endpoints
"""
def __init__(self, streamer):
self.streamer = streamer
self.statuses = TwythonStreamerTypesStatuses(streamer)
def user(self, **params):
"""Stream user
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/user
"""
url = 'https://userstream.twitter.com/%s/user.json' \
% self.streamer.api_version
self.streamer._request(url, params=params)
def site(self, **params):
"""Stream site
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/site
"""
url = 'https://sitestream.twitter.com/%s/site.json' \
% self.streamer.api_version
self.streamer._request(url, params=params)
class TwythonStreamerTypesStatuses(object):
"""Class for different statuses endpoints
Available so TwythonStreamer.statuses.filter() is available.
Just a bit cleaner than TwythonStreamer.statuses_filter(),
statuses_sample(), etc. all being single methods in TwythonStreamer
"""
def __init__(self, streamer):
self.streamer = streamer
def filter(self, **params):
"""Stream statuses/filter
:param \*\*params: Paramters to send with your stream request
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/post/statuses/filter
"""
url = 'https://stream.twitter.com/%s/statuses/filter.json' \
% self.streamer.api_version
self.streamer._request(url, 'POST', params=params)
def sample(self, **params):
"""Stream statuses/sample
:param \*\*params: Paramters to send with your stream request
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/statuses/sample
"""
url = 'https://stream.twitter.com/%s/statuses/sample.json' \
% self.streamer.api_version
self.streamer._request(url, params=params)
def firehose(self, **params):
"""Stream statuses/firehose
:param \*\*params: Paramters to send with your stream request
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/statuses/firehose
"""
url = 'https://stream.twitter.com/%s/statuses/firehose.json' \
% self.streamer.api_version
self.streamer._request(url, params=params)
|
jawilson/home-assistant
|
homeassistant/components/hive/light.py
|
"""Support for Hive light devices."""
from datetime import timedelta
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
LightEntity,
)
from homeassistant.helpers.entity import DeviceInfo
import homeassistant.util.color as color_util
from . import HiveEntity, refresh_system
from .const import ATTR_MODE, DOMAIN
PARALLEL_UPDATES = 0
SCAN_INTERVAL = timedelta(seconds=15)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Hive thermostat based on a config entry."""
hive = hass.data[DOMAIN][entry.entry_id]
devices = hive.session.deviceList.get("light")
entities = []
if devices:
for dev in devices:
entities.append(HiveDeviceLight(hive, dev))
async_add_entities(entities, True)
class HiveDeviceLight(HiveEntity, LightEntity):
"""Hive Active Light Device."""
@property
def unique_id(self):
"""Return unique ID of entity."""
return self._unique_id
@property
def device_info(self) -> DeviceInfo:
"""Return device information."""
return DeviceInfo(
identifiers={(DOMAIN, self.device["device_id"])},
manufacturer=self.device["deviceData"]["manufacturer"],
model=self.device["deviceData"]["model"],
name=self.device["device_name"],
sw_version=self.device["deviceData"]["version"],
via_device=(DOMAIN, self.device["parentDevice"]),
)
@property
def name(self):
"""Return the display name of this light."""
return self.device["haName"]
@property
def available(self):
"""Return if the device is available."""
return self.device["deviceData"]["online"]
@property
def extra_state_attributes(self):
"""Show Device Attributes."""
return {
ATTR_MODE: self.attributes.get(ATTR_MODE),
}
@property
def brightness(self):
"""Brightness of the light (an integer in the range 1-255)."""
return self.device["status"]["brightness"]
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return self.device.get("min_mireds")
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return self.device.get("max_mireds")
@property
def color_temp(self):
"""Return the CT color value in mireds."""
return self.device["status"].get("color_temp")
@property
def hs_color(self):
"""Return the hs color value."""
if self.device["status"]["mode"] == "COLOUR":
rgb = self.device["status"].get("hs_color")
return color_util.color_RGB_to_hs(*rgb)
return None
@property
def is_on(self):
"""Return true if light is on."""
return self.device["status"]["state"]
@refresh_system
async def async_turn_on(self, **kwargs):
"""Instruct the light to turn on."""
new_brightness = None
new_color_temp = None
new_color = None
if ATTR_BRIGHTNESS in kwargs:
tmp_new_brightness = kwargs.get(ATTR_BRIGHTNESS)
percentage_brightness = (tmp_new_brightness / 255) * 100
new_brightness = int(round(percentage_brightness / 5.0) * 5.0)
if new_brightness == 0:
new_brightness = 5
if ATTR_COLOR_TEMP in kwargs:
tmp_new_color_temp = kwargs.get(ATTR_COLOR_TEMP)
new_color_temp = round(1000000 / tmp_new_color_temp)
if ATTR_HS_COLOR in kwargs:
get_new_color = kwargs.get(ATTR_HS_COLOR)
hue = int(get_new_color[0])
saturation = int(get_new_color[1])
new_color = (hue, saturation, 100)
await self.hive.light.turnOn(
self.device, new_brightness, new_color_temp, new_color
)
@refresh_system
async def async_turn_off(self, **kwargs):
"""Instruct the light to turn off."""
await self.hive.light.turnOff(self.device)
@property
def supported_features(self):
"""Flag supported features."""
supported_features = None
if self.device["hiveType"] == "warmwhitelight":
supported_features = SUPPORT_BRIGHTNESS
elif self.device["hiveType"] == "tuneablelight":
supported_features = SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP
elif self.device["hiveType"] == "colourtuneablelight":
supported_features = SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_COLOR
return supported_features
async def async_update(self):
"""Update all Node data from Hive."""
await self.hive.session.updateData(self.device)
self.device = await self.hive.light.getLight(self.device)
self.attributes.update(self.device.get("attributes", {}))
|
joelagnel/trappy
|
trappy/plotter/ILinePlot.py
|
# Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This module contains the class for plotting and customizing
Line/Linear Plots with :mod:`trappy.trace.BareTrace` or derived
classes. This plot only works when run from an IPython notebook
"""
from collections import OrderedDict
import matplotlib.pyplot as plt
from trappy.plotter import AttrConf
from trappy.plotter import Utils
from trappy.plotter.Constraint import ConstraintManager
from trappy.plotter.ILinePlotGen import ILinePlotGen
from trappy.plotter.AbstractDataPlotter import AbstractDataPlotter
from trappy.plotter.ColorMap import ColorMap
from trappy.plotter import IPythonConf
from trappy.utils import handle_duplicate_index
import pandas as pd
if not IPythonConf.check_ipython():
raise ImportError("Ipython Environment not Found")
class ILinePlot(AbstractDataPlotter):
"""
This class uses :mod:`trappy.plotter.Constraint.Constraint` to
represent different permutations of input parameters. These
constraints are generated by creating an instance of
:mod:`trappy.plotter.Constraint.ConstraintManager`.
:param traces: The input data
:type traces: a list of :mod:`trappy.trace.FTrace`,
:mod:`trappy.trace.SysTrace`, :mod:`trappy.trace.BareTrace`
or :mod:`pandas.DataFrame` or a single instance of them.
:param column: specifies the name of the column to
be plotted.
:type column: (str, list(str))
:param templates: TRAPpy events
.. note::
This is not required if a :mod:`pandas.DataFrame` is
used
:type templates: :mod:`trappy.base.Base`
:param filters: Filter the column to be plotted as per the
specified criteria. For Example:
::
filters =
{
"pid": [ 3338 ],
"cpu": [0, 2, 4],
}
:type filters: dict
:param per_line: Used to control the number of graphs
in each graph subplot row
:type per_line: int
:param concat: Draw all the pivots on a single graph
:type concat: bool
:param permute: Draw one plot for each of the traces specified
:type permute: bool
:param fill: Fill the area under the plots
:type fill: bool
:param fill_alpha: Opacity of filled area under the plots.
Implies fill=True.
:type fill_alpha: float
:param xlim: A tuple representing the upper and lower xlimits
:type xlim: tuple
:param ylim: A tuple representing the upper and lower ylimits
:type ylim: tuple
:param drawstyle: Set the drawstyle to a matplotlib compatible
drawing style.
.. note::
Only "steps-post" is supported as a valid value for
the drawstyle. This creates a step plot.
:type drawstyle: str
:param sync_zoom: Synchronize the zoom of a group of plots.
Zooming in one plot of a group (see below) will zoom in every
plot of that group. Defaults to False.
:type sync_zoom: boolean
:param group: Name given to the plots created by this ILinePlot
instance. This name is only used for synchronized zoom. If
you zoom on any plot in a group all plots will zoom at the
same time.
:type group: string
:param signals: A string of the type event_name:column to indicate
the value that needs to be plotted. You can add an additional
parameter to specify the color of the lin in rgb:
"event_name:column:color". The color is specified as a comma
separated list of rgb values, from 0 to 255 or from 0x0 to
0xff. E.g. 0xff,0x0,0x0 is red and 100,40,32 is brown.
.. note::
- Only one of `signals` or both `templates` and
`columns` should be specified
- Signals format won't work for :mod:`pandas.DataFrame`
input
:type signals: str
"""
def __init__(self, traces, templates=None, **kwargs):
# Default keys, each can be overridden in kwargs
self._layout = None
super(ILinePlot, self).__init__(traces=traces,
templates=templates)
self.set_defaults()
for key in kwargs:
self._attr[key] = kwargs[key]
if "signals" in self._attr:
self._describe_signals()
self._check_data()
if "column" not in self._attr:
raise RuntimeError("Value Column not specified")
if self._attr["drawstyle"] and self._attr["drawstyle"].startswith("steps"):
self._attr["step_plot"] = True
zip_constraints = not self._attr["permute"]
window = self._attr["xlim"] if "xlim" in self._attr else None
self.c_mgr = ConstraintManager(traces, self._attr["column"], self.templates,
self._attr["pivot"],
self._attr["filters"],
window=window,
zip_constraints=zip_constraints)
def savefig(self, *args, **kwargs):
raise NotImplementedError("Not Available for ILinePlot")
def view(self, max_datapoints=75000, test=False):
"""Displays the graph
:param max_datapoints: Maximum number of datapoints to plot.
Dygraph can make the browser unresponsive if it tries to plot
too many datapoints. Chrome 50 chokes at around 75000 on an
i7-4770 @ 3.4GHz, Firefox 47 can handle up to 200000 before
becoming too slow in the same machine. You can increase this
number if you know what you're doing and are happy to wait for
the plot to render. :type max_datapoints: int
:param test: For testing purposes. Only set to true if run
from the testsuite.
:type test: boolean
"""
# Defer installation of IPython components
# to the .view call to avoid any errors at
# when importing the module. This facilitates
# the importing of the module from outside
# an IPython notebook
if not test:
IPythonConf.iplot_install("ILinePlot")
self._attr["max_datapoints"] = max_datapoints
if self._attr["concat"]:
self._plot_concat()
else:
self._plot(self._attr["permute"], test)
def set_defaults(self):
"""Sets the default attrs"""
self._attr["per_line"] = AttrConf.PER_LINE
self._attr["concat"] = AttrConf.CONCAT
self._attr["filters"] = {}
self._attr["pivot"] = AttrConf.PIVOT
self._attr["permute"] = False
self._attr["drawstyle"] = None
self._attr["step_plot"] = False
self._attr["fill"] = AttrConf.FILL
self._attr["scatter"] = AttrConf.PLOT_SCATTER
self._attr["point_size"] = AttrConf.POINT_SIZE
self._attr["map_label"] = {}
self._attr["title"] = AttrConf.TITLE
def _plot(self, permute, test):
"""Internal Method called to draw the plot"""
pivot_vals, len_pivots = self.c_mgr.generate_pivots(permute)
self._layout = ILinePlotGen(len_pivots, **self._attr)
plot_index = 0
for p_val in pivot_vals:
data_dict = OrderedDict()
for constraint in self.c_mgr:
if permute:
trace_idx, pivot = p_val
if constraint.trace_index != trace_idx:
continue
legend = constraint._template.name + ":" + constraint.column
else:
pivot = p_val
legend = str(constraint)
result = constraint.result
if pivot in result:
data_dict[legend] = result[pivot]
if permute:
title = self.traces[plot_index].name
elif pivot != AttrConf.PIVOT_VAL:
title = "{0}: {1}".format(self._attr["pivot"], self._attr["map_label"].get(pivot, pivot))
else:
title = ""
if len(data_dict) > 1:
data_frame = self._fix_indexes(data_dict)
else:
data_frame = pd.DataFrame(data_dict)
self._layout.add_plot(plot_index, data_frame, title, test=test)
plot_index += 1
self._layout.finish()
def _plot_concat(self):
"""Plot all lines on a single figure"""
pivot_vals, _ = self.c_mgr.generate_pivots()
plot_index = 0
self._layout = ILinePlotGen(len(self.c_mgr), **self._attr)
for constraint in self.c_mgr:
result = constraint.result
title = str(constraint)
data_dict = OrderedDict()
for pivot in pivot_vals:
if pivot in result:
if pivot == AttrConf.PIVOT_VAL:
key = ",".join(self._attr["column"])
else:
key = "{0}: {1}".format(self._attr["pivot"], self._attr["map_label"].get(pivot, pivot))
data_dict[key] = result[pivot]
if len(data_dict) > 1:
data_frame = self._fix_indexes(data_dict)
else:
data_frame = pd.DataFrame(data_dict)
self._layout.add_plot(plot_index, data_frame, title)
plot_index += 1
self._layout.finish()
def _fix_indexes(self, data_dict):
"""
In case of multiple traces with different indexes (i.e. x-axis values),
create new ones with same indexes
"""
# 1) Check if we are processing multiple traces
if len(data_dict) <= 1:
raise ValueError("Cannot fix indexes for single trace. "\
"Expecting multiple traces!")
# 2) Merge the data frames to obtain common indexes
df_columns = list(data_dict.keys())
dedup_data = [handle_duplicate_index(s) for s in data_dict.values()]
ret = pd.Series(dedup_data, index=df_columns)
merged_df = pd.concat(ret.get_values(), axis=1)
merged_df.columns = df_columns
# 3) Fill NaN values depending on drawstyle
if self._attr["drawstyle"] == "steps-post":
merged_df = merged_df.ffill()
elif self._attr["drawstyle"] == "steps-pre":
merged_df = merged_df.bfill()
elif self._attr["drawstyle"] == "steps-mid":
merged_df = merged_df.ffill()
else:
# default
merged_df = merged_df.interpolate()
return merged_df
|
mdrumond/tensorflow
|
tensorflow/contrib/eager/python/network_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.eager.python import network
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.layers import core
# pylint: disable=not-callable
class MyNetwork(network.Network):
def __init__(self):
super(MyNetwork, self).__init__(name="abcd")
self.l1 = self.add_layer(core.Dense(1, use_bias=False))
def call(self, x):
return self.l1(x)
class NetworkTest(test.TestCase):
def testTrainableAttribute(self):
net = network.Network()
self.assertTrue(net.trainable)
with self.assertRaises(AttributeError):
net.trainable = False
self.assertTrue(net.trainable)
def testNetworkCall(self):
net = MyNetwork()
net(constant_op.constant([[2.0]])) # Force variables to be created.
self.assertEqual(1, len(net.trainable_variables))
net.trainable_variables[0].assign([[17.0]])
# TODO(josh11b): Support passing Python values to networks.
result = net(constant_op.constant([[2.0]]))
self.assertEqual(34.0, result.numpy())
def testNetworkAsAGraph(self):
self.skipTest("TODO(ashankar,josh11b): FIX THIS")
# Verify that we're using ResourceVariables
def testNetworkVariablesDoNotInterfere(self):
self.skipTest("TODO: FIX THIS")
net1 = MyNetwork()
net2 = MyNetwork()
one = constant_op.constant([[1.]])
print(type(net1(one)))
net2(one)
net1.trainable_weights[0].assign(constant_op.constant([[1.]]))
net2.trainable_weights[0].assign(constant_op.constant([[2.]]))
print("NET1")
print(net1.name)
print(net1.variables)
print(net1(one))
print("NET2")
print(net2.name)
print(net2.variables)
print(net2(one))
class SequentialTest(test.TestCase):
def testTwoLayers(self):
# Create a sequential network with one layer.
net = network.Sequential([core.Dense(1, use_bias=False)])
# Set that layer's weights so it multiplies by 3
l1 = net.get_layer(index=0)
net(constant_op.constant([[2.0]])) # Create l1's variables
self.assertEqual(1, len(l1.trainable_variables))
l1.trainable_variables[0].assign([[3.0]])
self.assertEqual(21.0, net(constant_op.constant([[7.0]])).numpy())
# Add a second layer to the network.
l2 = core.Dense(1, use_bias=False)
net.add_layer(l2)
# Set the second layer's weights so it multiplies by 11
net(constant_op.constant([[2.0]])) # Create l2's variables
self.assertEqual(1, len(l2.trainable_variables))
l2.trainable_variables[0].assign([[11.0]])
self.assertEqual(231.0, net(constant_op.constant([[7.0]])).numpy())
if __name__ == "__main__":
test.main()
|
gunan/tensorflow
|
tensorflow/python/keras/engine/training_dataset_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import sys
import numpy as np
import six
from tensorflow.python import keras
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.keras import callbacks
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class BatchCounterCallback(callbacks.Callback):
def __init__(self):
self.batch_begin_count = 0
self.batch_end_count = 0
def on_batch_begin(self, *args, **kwargs):
self.batch_begin_count += 1
def on_batch_end(self, *args, **kwargs):
self.batch_end_count += 1
class TestTrainingWithDataset(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_calling_model_on_same_dataset(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = 'rmsprop'
loss = 'mse'
metrics = ['mae']
model.compile(
optimizer,
loss,
metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
# Call fit with validation data
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_training_and_eval_methods_on_dataset(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = 'rmsprop'
loss = 'mse'
metrics = ['mae', metrics_module.CategoricalAccuracy()]
model.compile(
optimizer,
loss,
metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat() # Infinite dataset.
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
# Test with validation data
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
# Test with validation split
with self.assertRaises(ValueError):
model.fit(dataset,
epochs=1, steps_per_epoch=2, verbose=0,
validation_split=0.5, validation_steps=2)
# Test with sample weight.
sample_weight = np.random.random((10,))
with self.assertRaisesRegexp(
ValueError, r'`sample_weight` argument is not supported .+dataset'):
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
sample_weight=sample_weight)
with self.assertRaisesRegexp(
ValueError, '(you should not specify a target)|'
'(`y` argument is not supported when using dataset as input.)'):
model.fit(dataset, dataset,
epochs=1, steps_per_epoch=2, verbose=0)
# With an infinite dataset, `steps_per_epoch`/`steps` argument is required.
with self.assertRaises(ValueError):
model.fit(dataset, epochs=1, verbose=0)
with self.assertRaises(ValueError):
model.evaluate(dataset, verbose=0)
with self.assertRaises(ValueError):
model.predict(dataset, verbose=0)
@keras_parameterized.run_with_all_model_types(exclude_models='sequential')
@keras_parameterized.run_all_keras_modes
def test_training_and_eval_methods_on_multi_input_output_dataset(self):
input_a = keras.layers.Input(shape=(3,), name='input_1')
input_b = keras.layers.Input(shape=(3,), name='input_2')
dense = keras.layers.Dense(4, name='dense')
dropout = keras.layers.Dropout(0.5, name='dropout')
branch_a = [input_a, dense]
branch_b = [input_b, dense, dropout]
model = testing_utils.get_multi_io_model(branch_a, branch_b)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
input_a_np = np.random.random((10, 3)).astype(dtype=np.float32)
input_b_np = np.random.random((10, 3)).astype(dtype=np.float32)
output_d_np = np.random.random((10, 4)).astype(dtype=np.float32)
output_e_np = np.random.random((10, 4)).astype(dtype=np.float32)
# Test with tuples
dataset_tuple = dataset_ops.Dataset.from_tensor_slices((
(input_a_np, input_b_np), (output_d_np, output_e_np)))
dataset_tuple = dataset_tuple.repeat(100)
dataset_tuple = dataset_tuple.batch(10)
model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset_tuple, steps=2, verbose=1)
# Test with dict
input_dict = {'input_1': input_a_np, 'input_2': input_b_np}
if testing_utils.get_model_type() == 'subclass':
output_dict = {'output_1': output_d_np, 'output_2': output_e_np}
else:
output_dict = {'dense': output_d_np, 'dropout': output_e_np}
dataset_dict = dataset_ops.Dataset.from_tensor_slices((
input_dict, output_dict))
dataset_dict = dataset_dict.repeat(100)
dataset_dict = dataset_dict.batch(10)
model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset_dict, steps=2, verbose=1)
predict_dataset_dict = dataset_ops.Dataset.from_tensor_slices(
input_dict)
predict_dataset_dict = predict_dataset_dict.repeat(100)
predict_dataset_dict = predict_dataset_dict.batch(10)
model.predict(predict_dataset_dict, steps=1)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_dataset_with_sample_weights(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = 'rmsprop'
loss = 'mse'
metrics = ['mae', metrics_module.CategoricalAccuracy()]
model.compile(
optimizer,
loss,
metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
sample_weights = np.ones((10), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
sample_weights))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_dataset_with_sample_weights_correctness(self):
x = keras.layers.Input(shape=(1,), name='input')
y = keras.layers.Dense(
1, kernel_initializer='ones', bias_initializer='zeros', name='dense')(x)
model = keras.Model(x, y)
optimizer = 'rmsprop'
loss = 'mse'
model.compile(optimizer, loss)
inputs = np.array([[0], [1], [2], [3]], np.float32)
targets = np.array([[2], [4], [6], [8]], np.float32)
sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32)
ds = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
sample_weights)).batch(2)
result = model.evaluate(ds, verbose=1)
# The per sample loss is multipled by the corresponding sample weight. The
# average of these weighted losses is the return value of the `evaluate`
# call. For example, in the test above the average weighted loss is
# calculated in the following manner:
# ((2-0)^2) * 0.25 + ((4-1)^2) * 0.5 + ((6-2)^2 * 0.75) + ((8-3)^2 * 1)
# equals 42.5 / 4 = 10.625
self.assertEqual(result, 10.625)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_dataset_with_sparse_labels(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = 'rmsprop'
model.compile(
optimizer,
loss='sparse_categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=10, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
@keras_parameterized.run_all_keras_modes
def test_dataset_fit_correctness(self):
class SumLayer(keras.layers.Layer):
def build(self, _):
self.w = self.add_weight('w', ())
def call(self, inputs):
return keras.backend.sum(inputs, axis=1, keepdims=True) + self.w * 0
model = keras.Sequential([SumLayer(input_shape=(2,))])
model.compile(
'rmsprop',
loss='mae',
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((40, 2), dtype=np.float32)
inputs[10:20, :] = 2
inputs[20:30, :] = 1
inputs[30:, :] = 4
targets = np.zeros((40, 1), dtype=np.float32)
# Test correctness with `steps_per_epoch`.
train_dataset = dataset_ops.Dataset.from_tensor_slices(
(inputs, targets)).batch(10)
val_dataset = dataset_ops.Dataset.from_tensor_slices(
(inputs, targets)).batch(10)
history = model.fit(train_dataset,
epochs=2, steps_per_epoch=2, verbose=1,
validation_data=val_dataset, validation_steps=2)
self.assertAllClose(history.history['loss'],
[inputs[:20].sum() / 20, inputs[20:].sum() / 20])
# The validation dataset will be reset at the end of each validation run.
self.assertAllClose(history.history['val_loss'],
[inputs[:20].sum() / 20, inputs[:20].sum() / 20])
# Test correctness with dataset reset.
train_dataset = dataset_ops.Dataset.from_tensor_slices(
(inputs, targets)).batch(10)
val_dataset = dataset_ops.Dataset.from_tensor_slices(
(inputs, targets)).batch(10)
history = model.fit(train_dataset,
epochs=2, verbose=1, validation_data=val_dataset)
self.assertAllClose(
history.history['loss'],
[inputs.sum() / 40, inputs.sum() / 40])
self.assertAllClose(
history.history['val_loss'],
[inputs.sum() / 40, inputs.sum() / 40])
def test_dataset_input_shape_validation(self):
with ops.get_default_graph().as_default(), self.cached_session():
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
model.compile(optimizer='rmsprop', loss='mse')
# User forgets to batch the dataset
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
with self.assertRaisesRegexp(
ValueError,
r'expected (.*?) to have shape \(3,\) but got array with shape \(1,\)'
):
model.train_on_batch(dataset)
# Wrong input shape
inputs = np.zeros((10, 5))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
with self.assertRaisesRegexp(ValueError,
r'expected (.*?) to have shape \(3,\)'):
model.train_on_batch(dataset)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_finite_dataset_known_cardinality_no_steps_arg(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.batch(10)
batch_counter = BatchCounterCallback()
history = model.fit(dataset, epochs=2, verbose=1, callbacks=[batch_counter])
self.assertLen(history.history['loss'], 2)
self.assertEqual(batch_counter.batch_end_count, 20)
model.evaluate(dataset)
out = model.predict(dataset)
self.assertEqual(out.shape[0], 100)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_finite_dataset_unknown_cardinality_no_steps_arg(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.filter(lambda x, y: True).batch(10)
self.assertEqual(keras.backend.get_value(cardinality.cardinality(dataset)),
cardinality.UNKNOWN)
batch_counter = BatchCounterCallback()
history = model.fit(dataset, epochs=2, verbose=1, callbacks=[batch_counter])
self.assertLen(history.history['loss'], 2)
self.assertEqual(batch_counter.batch_end_count, 20)
model.evaluate(dataset)
out = model.predict(dataset)
self.assertEqual(out.shape[0], 100)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_finite_dataset_unknown_cardinality_no_step_with_train_and_val(self):
class CaptureStdout(object):
def __enter__(self):
self._stdout = sys.stdout
string_io = six.StringIO()
sys.stdout = string_io
self._stringio = string_io
return self
def __exit__(self, *args):
self.output = self._stringio.getvalue()
sys.stdout = self._stdout
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.filter(lambda x, y: True).batch(10)
self.assertEqual(
keras.backend.get_value(cardinality.cardinality(dataset)),
cardinality.UNKNOWN)
batch_counter = BatchCounterCallback()
with CaptureStdout() as capture:
history = model.fit(
dataset,
epochs=2,
callbacks=[batch_counter],
validation_data=dataset.take(3))
lines = capture.output.splitlines()
self.assertIn('10/10', lines[-1])
self.assertLen(history.history['loss'], 2)
self.assertEqual(batch_counter.batch_begin_count, 21)
self.assertEqual(batch_counter.batch_end_count, 20)
model.evaluate(dataset)
out = model.predict(dataset)
self.assertEqual(out.shape[0], 100)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_finite_dataset_unknown_cardinality_out_of_data(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.filter(lambda x, y: True).batch(10)
self.assertEqual(
keras.backend.get_value(cardinality.cardinality(dataset)),
cardinality.UNKNOWN)
batch_counter = BatchCounterCallback()
with test.mock.patch.object(logging, 'warning') as mock_log:
# steps_per_epoch (200) is greater than the dataset size (100). As this is
# unexpected, training will stop and not make it to the second epoch.
history = model.fit(
dataset,
epochs=2,
verbose=1,
callbacks=[batch_counter],
steps_per_epoch=200)
self.assertIn(
'ran out of data; interrupting training.', str(mock_log.call_args))
self.assertIn(
'can generate at least '
'`steps_per_epoch * epochs` batches (in this case, 400 batches). '
'You may need to use the repeat() function when '
'building your dataset.', str(mock_log.call_args))
self.assertLen(history.history['loss'], 1)
self.assertEqual(batch_counter.batch_end_count, 10)
model.evaluate(dataset)
out = model.predict(dataset)
self.assertEqual(out.shape[0], 100)
@keras_parameterized.run_all_keras_modes
def test_with_external_loss(self):
inp = keras.Input(shape=(4,), name='inp1')
out = keras.layers.Dense(2)(inp)
model = keras.Model(inp, out)
model.add_loss(math_ops.reduce_mean(out))
model.compile('rmsprop')
x = np.ones((10, 4))
# dataset contains only features, no labels.
dataset = dataset_ops.Dataset.from_tensor_slices(x).repeat(10).batch(10)
model.fit(dataset)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_train_eval_with_steps(self):
# See b/142880049 for more details.
inp = keras.Input(shape=(4,), name='inp1')
out = keras.layers.Dense(2)(inp)
model = keras.Model(inp, out)
model.compile(
'rmsprop', loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((100, 4), dtype=np.float32)
targets = np.random.randint(0, 2, size=100, dtype=np.int32)
training_ds = dataset_ops.Dataset.from_tensor_slices(
(inputs, targets)).repeat().batch(10)
# Create eval dataset with generator, so that dataset won't contain the
# overall size metadata. Without eval_steps, we expect to run through all
# the data in this dataset every epoch.
def gen():
for _ in range(100):
yield (np.zeros(4, dtype=np.float32),
np.random.randint(0, 2, size=1, dtype=np.int32))
eval_ds = dataset_ops.Dataset.from_generator(
generator=gen,
output_types=('float64', 'int32'),
output_shapes=([4], [1])).batch(100)
batch_counter = BatchCounterCallback()
model.fit(
training_ds,
steps_per_epoch=10,
epochs=10,
validation_data=eval_ds,
callbacks=[batch_counter]
)
# Expect 10 batch from training per epoch.
self.assertEqual(batch_counter.batch_end_count, 100)
class TestMetricsWithDatasets(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_metrics_correctness_with_dataset(self):
layers = [
keras.layers.Dense(8, activation='relu', input_dim=4,
kernel_initializer='ones'),
keras.layers.Dense(1, activation='sigmoid', kernel_initializer='ones')
]
model = testing_utils.get_model_from_layers(layers, (4,))
model.compile(
loss='binary_crossentropy',
metrics=['accuracy', metrics_module.BinaryAccuracy()],
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
np.random.seed(123)
x = np.random.randint(10, size=(100, 4)).astype(np.float32)
y = np.random.randint(2, size=(100, 1)).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.batch(10)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(np.around(outs[1], decimals=1), 0.5)
self.assertEqual(np.around(outs[2], decimals=1), 0.5)
y = np.zeros((100, 1), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(outs[1], 0.)
self.assertEqual(outs[2], 0.)
if __name__ == '__main__':
test.main()
|
dshlai/oyprojectmanager
|
oyProjectManager/db/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Erkan Ozgur Yilmaz
#
# This module is part of oyProjectManager and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
"""
Database Module
===============
This is where all the magic happens.
.. versionadded:: 0.2.0
SQLite3 Database:
To hold the information about all the data created
:class:`~oyProjectManager.models.project.Project`\ s,
:class:`~oyProjectManager.models.sequence.Sequence`\ s,
:class:`~oyProjectManager.models.shot.Shot`\ s,
:class:`~oyProjectManager.models.asset.Asset`\ s and
:class:`~oyProjectManager.models.version.VersionType`\ s
, there is a ".metadata.db" file in the repository root. This SQLite3
database has all the information about everything.
With this new extension it is much faster to query any data needed.
Querying data is very simple and fun. To get any kind of data from the
database, just call the ``db.setup()`` and then use ``db.query`` to get the
data.
For a simple example, lets get all the shots for a Sequence called
"TEST_SEQ" in the "TEST_PROJECT"::
from oyProjectManager import db
from oyProjectManager import Project, Sequence, Shot
# setup the database session
db.setup()
all_shots = Shot.query().join(Sequence).\
filter(Sequence.project.name="TEST_PROJECT").\
filter(Shot.sequence.name=="TEST_SEQ").all()
that's it.
"""
import os
import logging
import sqlalchemy
import oyProjectManager
from oyProjectManager.db.declarative import Base
# SQLAlchemy database engine
engine = None
# SQLAlchemy session manager
session = None
query = None
# SQLAlchemy metadata
metadata = None
database_url = None
# create a logger
logger = logging.getLogger(__name__)
#logger.setLevel(logging.WARNING)
logger.setLevel(logging.DEBUG)
def setup(database_url_in=None):
"""Utility function that helps to connect the system to the given database.
Returns the created session
:param database_url_in: The database address, default is None. If the
database_url is skipped or given as None, the default database url
from the :mod:`oyProjectManager.config` will be used. This is good,
just call ``db.setup()`` and then use ``db.session`` and ``db.query``
to get the data.
:returns: sqlalchemy.orm.session
"""
global engine
global session
global query
global metadata
global database_url
# create engine
# TODO: create tests for this
if database_url_in is None:
logger.debug("using the default database_url from the config file")
# use the default database
conf = oyProjectManager.conf
database_url_in = conf.database_url
# expand user and env variables if any
# TODO: because the dialect part and the address part are now coming from
# from one source, it is not possible to expand any variables in the path,
# try to use SQLAlchemy to separate the dialect and the address part and
# expand any data and then merge it again
#database_url_in = os.path.expanduser(
# os.path.expandvars(
# os.path.expandvars(
# database_url_in
# )
# )
#)
while "$" in database_url_in or "~" in database_url_in:
database_url_in = os.path.expanduser(
os.path.expandvars(
database_url_in
)
)
database_url = database_url_in
logger.debug("setting up database in %s" % database_url)
engine = sqlalchemy.create_engine(database_url, echo=False)
# create the tables
metadata = Base.metadata
metadata.create_all(engine)
# create the Session class
Session = sqlalchemy.orm.sessionmaker(bind=engine)
# create and save session object to session
session = Session()
query = session.query
# initialize the db
__init_db__()
# TODO: create a test to check if the returned session is session
return session
def __init_db__():
"""initializes the just setup database
It adds:
- Users
- VersionTypes
to the database.
"""
logger.debug("db is newly created, initializing the db")
global query
global session
# get the users from the config
from oyProjectManager import conf
# ------------------------------------------------------
# create the users
from oyProjectManager.models.auth import User
# get all users from db
users_from_db = query(User).all()
for user_data in conf.users_data:
name = user_data.get("name")
initials = user_data.get("initials")
email = user_data.get("email")
user_from_config = User(name, initials, email)
if user_from_config not in users_from_db:
session.add(user_from_config)
# ------------------------------------------------------
# add the VersionTypes
from oyProjectManager.models.version import VersionType
version_types_from_db = query(VersionType).all()
for version_type in conf.version_types:
version_type_from_conf = VersionType(**version_type)
if version_type_from_conf not in version_types_from_db:
session.add(version_type_from_conf)
session.commit()
logger.debug("finished initialization of the db")
|
shidasan/konoha2
|
src/vm/pygenlib2.py
|
import os, sys
###########
def verbose_print(msg):
print msg
###########
def linetrim(s):
return s.replace('\r', '').replace('\n','')
###
def sublast(s, s2):
return s[s.find(s2)+len(s2):]
def parse_package(fpath):
#p = fpath[:fpath.rfind('/')]
p = fpath
package = 'konoha'
if p.find('/konoha/') != -1:
return 'konoha'
elif p.find('/class/') != -1:
package = sublast(p, '/class/')
elif p.find('/package/') != -1:
package = '+' + sublast(p, '/package/')
elif p.find('/api/') != -1:
package = sublast(p, '/api/')
elif p.find('/driver/') != -1:
package = '#' + sublast(p, '/driver/')
if package.find('_.') > 0: return 'konoha'
if package.find('/') > 0:
return package.split('/')[0]
return package
# p = fpath.split('/')
# if p[-1].find('.') == -1: return p[-1]
# return p[-2]
###
def fpath_shortname(fpath):
p = fpath.split('/')
return p[-1].replace('.c', '')
###
def safedict(d, key, defv):
if d.has_key(key): return d[key]
d[key] = defv
return defv
###
###
def list_topair(list):
t1 = list[0]
t2 = list[1]
return t1, t2, list[2:]
def parse_funcparams(functype):
if not functype.endswith(')'):
debug_print('Invalid functype: %s' % functype)
t = functype.replace('(', ' ').replace(',', ' ').replace(')', '').split()
params = []
while len(t) > 1:
tt, tn, t = list_topair(t)
params.append(nz_cparam(tt, tn))
return params
###########
# ---------------------------------------------------------------------------
LINE = '''
/* ------------------------------------------------------------------------ */
'''
DLINE = '''
/* ------------------------------------------------------------------------ */
'''
# ---------------------------------------------------------------------------
def write_println(f, msg = ''):
f.write(msg+'\n')
def write_line(f):
f.write(LINE)
def write_dline(f):
f.write(DLINE)
def write_comment(f, msg):
f.write('/* %s */\n' % msg)
def write_chapter(f, msg):
f.write(DLINE)
write_comment(f, msg)
def write_section(f, msg):
f.write(LINE)
write_comment(f, msg)
def write_define(f, name, value='', n=40):
s = '#define %s ' % name
while(len(s) < n) : s+=' '
f.write(s)
f.write(value)
f.write('\n')
###
def write_ifndefine(f, name, value='', n=40):
f.write('#ifndef %s\n' % name)
write_define(f, name, value, n)
f.write('#endif\n')
###
def write_ifndef(f, name, value='', n=40):
f.write('#ifndef %s\n' % name)
write_define(f, name, value, n)
f.write('#endif\n')
def write_ifdef(f, n):
f.write('''
#ifdef %s''' % n.upper())
def write_else(f, n):
f.write('''
#else /*%s*/
''' % n.upper())
def write_endif(f, n):
f.write('''
#endif/*%s*/
''' % n.upper())
# ---------------------------------------------------------------------------
def write_BOM(f):
f.write("%c%c%c" % (0xef, 0xbb, 0xbf))
def write_license(f):
f.write('''/****************************************************************************
* KONOHA2 COPYRIGHT, LICENSE NOTICE, AND DISCRIMER
*
* Copyright (c) 2006-2012, Kimio Kuramitsu <kimio at ynu.ac.jp>
* (c) 2008- Konoha Team konohaken@googlegroups.com
* All rights reserved.
*
* You may choose one of the following two licenses when you use konoha.
* If you want to use the latter license, please contact us.
*
* (1) GNU General Public License 3.0 (with K_UNDER_GPL)
* (2) Konoha Non-Disclosure License 1.0
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
''')
def write_begin_c(f):
f.write('''
#ifdef __cplusplus
extern "C" {
#endif
''')
def write_end_c(f):
f.write('''
#ifdef __cplusplus
}
#endif
''')
# ---------------------------------------------------------------------------
def getdict(d, n, defv):
if d.has_key(n): return d[n]
return defv
def read_settings(fn):
KNH_DATA = {}
try:
f = open(fn)
exec(f)
f.close()
return KNH_DATA
except OSError, e:
print e
return KNH_DATA
# ---------------------------------------------------------------------------
def nz_fname(fname):
if fname.rfind('/') > 0: return fname[fname.rfind('/')+1:]
return fname
def open_h(fname, lists):
f = open(fname, 'w')
write_license(f)
d = nz_fname(fname).replace('.', '_'). upper()
f.write('''
#ifndef %s
#define %s
''' % (d, d))
for i in lists:
f.write('''
#include%s''' % i)
if len(lists) > 0: f.write('\n\n')
write_begin_c(f)
write_dline(f)
return f
def open_h2(fname, lists):
f = open(fname, 'w')
write_license(f)
d = nz_fname(fname).replace('.', '_'). upper()
f.write('''
#ifndef %s
#define %s
''' % (d, d))
for i in lists:
f.write('''
#include%s''' % i)
if len(lists) > 0: f.write('\n\n')
return f
# ---------------------------------------------------------------------------
def close_h(f, fname):
d = nz_fname(fname).replace('.', '_'). upper()
write_end_c(f)
write_dline(f)
f.write('''
#endif/*%s*/
''' % d)
f.close()
# ---------------------------------------------------------------------------
def open_c(fname, lists, bom = None):
f = open(fname, 'w')
if bom == 'BOM': write_BOM(f)
write_license(f)
for i in lists:
f.write('''
#include%s''' % i)
if len(lists) > 0: f.write('\n\n')
write_begin_c(f)
write_dline(f)
return f
def close_c(f, fname):
write_end_c(f)
f.close()
def get_serial_number():
f = open('SERIAL_NUMBER')
n = int(f.readline())
f.close()
n += 1
f = open('SERIAL_NUMBER', 'w')
f.write('%d\n' % n)
f.close()
return n
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
def parse_options(option):
d = {}
if option is None: return d
for t in option.split():
if t.find('(') > 0:
t = t.replace('(', ' ').replace(')', '')
t = t.split()
d[t[0]] = t[1]
else:
d[t] = 1
return d
# ---------------------------------------------------------------------------
def check_ifdef(d):
ifdef = ''
endif = ''
if d.has_key('@ifdef'):
ifdef = '#ifdef KNH_IMPORT_%s_\n' % d['@ifdef']
endif = '#endif/*KNH_IMPORT_%s_*/\n' %d['@ifdef']
return ifdef, endif
# ---------------------------------------------------------------------------
def alias_lname(cname):
if cname.find('_') > 0:
return cname.split('_')[1]
return cname
def STRUCT_cname(cname):
return 'STRUCT_%s' % cname
def STRUCT_sname(cname):
return 'STRUCT_%s' % cname
def SAFE_cname(t) :
t = t.replace('..', '')
t = t.replace('!', '')
t = t.replace('[]', '')
t = t.replace('::', '__')
t = t.replace(':', '__')
return t
def CLASS_cname(cname) :
prefix = ''
if cname.endswith('[]'): prefix = 'A'
if cname.endswith('..'): prefix = 'I'
return '%sCLASS_%s' % (prefix, SAFE_cname(cname))
def T_cname(t) :
prefix = ''
if t.endswith("[]!"): prefix = 'NNA'
elif t.endswith("!") : prefix = 'NN'
if t.endswith('[]'): prefix = 'A'
if t.endswith('..'): prefix = 'NNI'
return '%sT_%s' % (prefix, SAFE_cname(t))
def DEBUG_cname(cname):
return 'DEBUG_%s' % cname
def FN_name(fn):
return 'FN_%s' % fn
def SAFE_mname(mname):
return mname.replace('::', '__').replace(':', '__').replace('%', '_')
def MN_mname(mname):
return 'MN_%s' % SAFE_mname(mname)
# ---------------------------------------------------------------------------
DEBUG = None
def debug_print(msg):
if not DEBUG: print msg
def nz_dir(dir):
if dir.endswith('/'): return dir[:len(dir)-1]
return dir
#------------------------------------------------------------------------------------
FUNCMAP = {}
def FUNCMAP_found(funcname):
FUNCMAP[funcname] = funcname
def FUNCMAP_exists(funcname):
return FUNCMAP.has_key(funcname)
|
spreg-git/pysal
|
pysal/esda/mapclassify.py
|
"""
A module of classification schemes for choropleth mapping.
"""
__author__ = "Sergio J. Rey"
__all__ = ['Map_Classifier', 'quantile', 'Box_Plot', 'Equal_Interval',
'Fisher_Jenks', 'Fisher_Jenks_Sampled', 'Jenks_Caspall',
'Jenks_Caspall_Forced', 'Jenks_Caspall_Sampled',
'Max_P_Classifier', 'Maximum_Breaks', 'Natural_Breaks',
'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined',
'gadf', 'K_classifiers']
from pysal.common import *
K = 5 # default number of classes in any map scheme with this as an argument
def quantile(y, k=4):
"""
Calculates the quantiles for an array
Parameters
----------
y : array
(n,1), values to classify
k : int
number of quantiles
Returns
-------
implicit : array
(n,1), quantile values
Examples
--------
>>> x = np.arange(1000)
>>> quantile(x)
array([ 249.75, 499.5 , 749.25, 999. ])
>>> quantile(x, k = 3)
array([ 333., 666., 999.])
>>>
Note that if there are enough ties that the quantile values repeat, we
collapse to pseudo quantiles in which case the number of classes will be less than k
>>> x = [1.0] * 100
>>> x.extend([3.0] * 40)
>>> len(x)
140
>>> y = np.array(x)
>>> quantile(y)
array([ 1., 3.])
"""
w = 100. / k
p = np.arange(w, 100 + w, w)
if p[-1] > 100.0:
p[-1] = 100.0
q = np.array([stats.scoreatpercentile(y, pct) for pct in p])
return np.unique(q)
def binC(y, bins):
"""
Bin categorical/qualitative data
Parameters
----------
y : array
(n,q), categorical values
bins : array
(k,1), unique values associated with each bin
Return
------
b : array
(n,q), bin membership, values between 0 and k-1
Examples
--------
>>> np.random.seed(1)
>>> x = np.random.randint(2, 8, (10, 3))
>>> bins = range(2, 8)
>>> x
array([[7, 5, 6],
[2, 3, 5],
[7, 2, 2],
[3, 6, 7],
[6, 3, 4],
[6, 7, 4],
[6, 5, 6],
[4, 6, 7],
[4, 6, 3],
[3, 2, 7]])
>>> y = binC(x, bins)
>>> y
array([[5, 3, 4],
[0, 1, 3],
[5, 0, 0],
[1, 4, 5],
[4, 1, 2],
[4, 5, 2],
[4, 3, 4],
[2, 4, 5],
[2, 4, 1],
[1, 0, 5]])
>>>
"""
if np.rank(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
for i, bin in enumerate(bins):
b[np.nonzero(y == bin)] = i
# check for non-binned items and print a warning if needed
vals = set(y.flatten())
for val in vals:
if val not in bins:
print 'warning: value not in bin: ', val
print 'bins: ', bins
return b
def bin(y, bins):
"""
bin interval/ratio data
Parameters
----------
y : array
(n,q), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
b : array
(n,q), values of values between 0 and k-1
Examples
--------
>>> np.random.seed(1)
>>> x = np.random.randint(2, 20, (10, 3))
>>> bins = [10, 15, 20]
>>> b = bin(x, bins)
>>> x
array([[ 7, 13, 14],
[10, 11, 13],
[ 7, 17, 2],
[18, 3, 14],
[ 9, 15, 8],
[ 7, 13, 12],
[16, 6, 11],
[19, 2, 15],
[11, 11, 9],
[ 3, 2, 19]])
>>> b
array([[0, 1, 1],
[0, 1, 1],
[0, 2, 0],
[2, 0, 1],
[0, 1, 0],
[0, 1, 1],
[2, 0, 1],
[2, 0, 1],
[1, 1, 0],
[0, 0, 2]])
>>>
"""
if np.rank(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
i = len(bins)
if type(bins) != list:
bins = bins.tolist()
binsc = copy.copy(bins)
while binsc:
i -= 1
c = binsc.pop(-1)
b[np.nonzero(y <= c)] = i
return b
def bin1d(x, bins):
"""
place values of a 1-d array into bins and determine counts of values in
each bin
Parameters
----------
x : array
(n, 1), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
binIds : array
1-d array of integer bin Ids
counts: int
number of elements of x falling in each bin
Examples
--------
>>> x = np.arange(100, dtype = 'float')
>>> bins = [25, 74, 100]
>>> binIds, counts = bin1d(x, bins)
>>> binIds
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2])
>>> counts
array([26, 49, 25])
"""
left = [-sys.maxint]
left.extend(bins[0:-1])
right = bins
cuts = zip(left, right)
k = len(bins)
binIds = np.zeros(x.shape, dtype='int')
while cuts:
k -= 1
l, r = cuts.pop(-1)
binIds += (x > l) * (x <= r) * k
counts = np.bincount(binIds)
return (binIds, counts)
def load_example():
"""
Helper function for doc tests
"""
import pysal
np.random.seed(10)
dat = pysal.open(pysal.examples.get_path('calempdensity.csv'))
cal = np.array([record[-1] for record in dat])
return cal
def natural_breaks(values, k=5, itmax=100):
"""
natural breaks helper function
"""
values = np.array(values)
n = len(values)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
print 'Warning: Not enough unique values in array to form k classes'
print "Warning: setting k to %d" % uvk
k = uvk
sids = np.random.permutation(range(len(uv)))[0:k]
seeds = uv[sids]
seeds.sort()
diffs = abs(np.matrix([values - seed for seed in seeds]))
c0 = diffs.argmin(axis=0)
c0 = np.array(c0)[0]
solving = True
solved = False
rk = range(k)
it = 0
while solving:
# get centroids of clusters
seeds = [np.median(values[c0 == c]) for c in rk]
seeds.sort()
# for each value find closest centroid
diffs = abs(np.matrix([values - seed for seed in seeds]))
# assign value to that centroid
c1 = diffs.argmin(axis=0)
c1 = np.array(c1)[0]
#compare new classids to previous
d = abs(c1 - c0)
if d.sum() == 0:
solving = False
solved = True
else:
c0 = c1
it += 1
if it == itmax:
solving = False
class_ids = c1
cuts = [max(values[c1 == c]) for c in rk]
return sids, seeds, diffs, class_ids, solved, it, cuts
def _fisher_jenks_means(values, classes=5, sort=True):
"""
Jenks Optimal (Natural Breaks) algorithm implemented in Python.
The original Python code comes from here:
http://danieljlewis.org/2010/06/07/jenks-natural-breaks-algorithm-in-python/
and is based on a JAVA and Fortran code available here:
https://stat.ethz.ch/pipermail/r-sig-geo/2006-March/000811.html
Returns class breaks such that classes are internally homogeneous while
assuring heterogeneity among classes.
"""
if sort:
values.sort()
mat1 = []
for i in range(0, len(values) + 1):
temp = []
for j in range(0, classes + 1):
temp.append(0)
mat1.append(temp)
mat2 = []
for i in range(0, len(values) + 1):
temp = []
for j in range(0, classes + 1):
temp.append(0)
mat2.append(temp)
for i in range(1, classes + 1):
mat1[1][i] = 1
mat2[1][i] = 0
for j in range(2, len(values) + 1):
mat2[j][i] = float('inf')
v = 0.0
for l in range(2, len(values) + 1):
s1 = 0.0
s2 = 0.0
w = 0.0
for m in range(1, l + 1):
i3 = l - m + 1
val = float(values[i3 - 1])
s2 += val * val
s1 += val
w += 1
v = s2 - (s1 * s1) / w
i4 = i3 - 1
if i4 != 0:
for j in range(2, classes + 1):
if mat2[l][j] >= (v + mat2[i4][j - 1]):
mat1[l][j] = i3
mat2[l][j] = v + mat2[i4][j - 1]
mat1[l][1] = 1
mat2[l][1] = v
k = len(values)
kclass = []
for i in range(0, classes + 1):
kclass.append(0)
kclass[classes] = float(values[len(values) - 1])
kclass[0] = float(values[0])
countNum = classes
while countNum >= 2:
pivot = mat1[k][countNum]
id = int(pivot - 2)
kclass[countNum - 1] = values[id]
k = int(pivot - 1)
countNum -= 1
return kclass
class Map_Classifier:
"""
Abstract class for all map classifications
For an array :math:`y` of :math:`n` values, a map classifier places each value
:math:`y_i` into one of :math:`k` mutually exclusive and exhaustive classes.
Each classifer defines the classes based on different criteria, but in all
cases the following hold for the classifiers in PySAL:
.. math::
C_j^l < y_i \le C_j^u \ forall i \in C_j
where :math:`C_j` denotes class :math:`j` which has lower bound :math:`C_j^l` and upper bound :math:`C_j^u`.
Map Classifiers Supported
* :class:`~pysal.esda.mapclassify.Box_Plot`
* :class:`~pysal.esda.mapclassify.Equal_Interval`
* :class:`~pysal.esda.mapclassify.Fisher_Jenks`
* :class:`~pysal.esda.mapclassify.Fisher_Jenks_Sampled`
* :class:`~pysal.esda.mapclassify.Jenks_Caspall`
* :class:`~pysal.esda.mapclassify.Jenks_Caspall_Forced`
* :class:`~pysal.esda.mapclassify.Jenks_Caspall_Sampled`
* :class:`~pysal.esda.mapclassify.Max_P_Classifier`
* :class:`~pysal.esda.mapclassify.Maximum_Breaks`
* :class:`~pysal.esda.mapclassify.Natural_Breaks`
* :class:`~pysal.esda.mapclassify.Quantiles`
* :class:`~pysal.esda.mapclassify.Percentiles`
* :class:`~pysal.esda.mapclassify.Std_Mean`
* :class:`~pysal.esda.mapclassify.User_Defined`
Utilities:
In addition to the classifiers, there are several utility functions that can be used to evaluate the properties of a specific classifier for different parameter values, or for automatic selection of a classifier and number of classes.
* :func:`~pysal.esda.mapclassify.gadf`
* :class:`~pysal.esda.mapclassify.K_classifiers`
References
----------
Slocum, T.A., R.B. McMaster, F.C. Kessler and H.H. Howard (2009) *Thematic Cartography and Geovisualization*. Pearson Prentice Hall, Upper Saddle River.
"""
def __init__(self, y):
self.name = 'Map Classifier'
if hasattr(y, 'values'):
y = y.values # fix for pandas
self.y = y
self._classify()
self._summary()
def _summary(self):
yb = self.yb
self.classes = [np.nonzero(yb == c)[0].tolist() for c in range(self.k)]
self.tss = self.get_tss()
self.adcm = self.get_adcm()
self.gadf = self.get_gadf()
def _classify(self):
self._set_bins()
self.yb, self.counts = bin1d(self.y, self.bins)
def __str__(self):
st = self._table_string()
return st
def __repr__(self):
return self._table_string()
def get_tss(self):
"""
Total sum of squares around class means
Returns sum of squares over all class means
"""
tss = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
def _set_bins(self):
pass
def get_adcm(self):
"""
Absolute deviation around class median (ADCM).
Calculates the absolute deviations of each observation about its class
median as a measure of fit for the classification method.
Returns sum of ADCM over all classes
"""
adcm = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
yc_med = np.median(yc)
ycd = np.abs(yc - yc_med)
adcm += sum(ycd)
return adcm
def get_gadf(self):
"""
Goodness of absolute deviation of fit
"""
adam = (np.abs(self.y - np.median(self.y))).sum()
gadf = 1 - self.adcm / adam
return gadf
def _table_string(self, width=12, decimal=3):
fmt = ".%df" % decimal
fmt = "%" + fmt
largest = max([len(fmt % i) for i in self.bins])
width = largest
fmt = "%d.%df" % (width, decimal)
fmt = "%" + fmt
k1 = self.k - 1
h1 = "Lower"
h1 = h1.center(largest)
h2 = " "
h2 = h2.center(10)
h3 = "Upper"
h3 = h3.center(largest + 1)
largest = "%d" % max(self.counts)
largest = len(largest) + 15
h4 = "Count"
h4 = h4.rjust(largest)
table = []
header = h1 + h2 + h3 + h4
table.append(header)
table.append("=" * len(header))
rows = []
for i, up in enumerate(self.bins):
if i == 0:
left = " " * width
left += " x[i] <= "
else:
left = fmt % self.bins[i - 1]
left += " < x[i] <= "
right = fmt % self.bins[i]
row = left + right
cnt = "%d" % self.counts[i]
cnt = cnt.rjust(largest)
row += cnt
table.append(row)
name = self.name
top = name.center(len(row))
table.insert(0, top)
table.insert(1, " ")
table = "\n".join(table)
return table
class Equal_Interval(Map_Classifier):
"""
Equal Interval Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0 otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> ei = Equal_Interval(cal, k = 5)
>>> ei.k
5
>>> ei.counts
array([57, 0, 0, 0, 1])
>>> ei.bins
array([ 822.394, 1644.658, 2466.922, 3289.186, 4111.45 ])
>>>
Notes
-----
Intervals defined to have equal width:
.. math::
bins_j = min(y)+w*(j+1)
with :math:`w=\\frac{max(y)-min(j)}{k}`
"""
def __init__(self, y, k=K):
"""
see class docstring
"""
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Equal Interval'
def _set_bins(self):
y = self.y
k = self.k
max_y = max(y)
min_y = min(y)
rg = max_y - min_y
width = rg * 1. / k
cuts = np.arange(min_y + width, max_y + width, width)
if len(cuts) > self.k: # handle overshooting
cuts = cuts[0:k]
cuts[-1] = max_y
bins = cuts.copy()
self.bins = bins
class Percentiles(Map_Classifier):
"""
Percentiles Map Classification
Parameters
----------
y : array
attribute to classify
pct : array
percentiles default=[1,10,50,90,99,100]
Attributes
----------
yb : array
bin ids for observations (numpy array n x 1)
bins : array
the upper bounds of each class (numpy array k x 1)
k : int
the number of classes
counts : int
the number of observations falling in each class (numpy array k x 1)
Examples
--------
>>> cal = load_example()
>>> p = Percentiles(cal)
>>> p.bins
array([ 1.35700000e-01, 5.53000000e-01, 9.36500000e+00,
2.13914000e+02, 2.17994800e+03, 4.11145000e+03])
>>> p.counts
array([ 1, 5, 23, 23, 5, 1])
>>> p2 = Percentiles(cal, pct = [50, 100])
>>> p2.bins
array([ 9.365, 4111.45 ])
>>> p2.counts
array([29, 29])
>>> p2.k
2
"""
def __init__(self, y, pct=[1, 10, 50, 90, 99, 100]):
self.pct = pct
Map_Classifier.__init__(self, y)
self.name = 'Percentiles'
def _set_bins(self):
y = self.y
pct = self.pct
self.bins = np.array([stats.scoreatpercentile(y, p) for p in pct])
self.k = len(self.bins)
class Box_Plot(Map_Classifier):
"""
Box_Plot Map Classification
Parameters
----------
y : array
attribute to classify
hinge : float
multiplier for IQR
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(n,1), the upper bounds of each class (monotonic)
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
low_outlier_ids : array
indices of observations that are low outliers
high_outlier_ids : array
indices of observations that are high outliers
Notes
-----
The bins are set as follows::
bins[0] = q[0]-hinge*IQR
bins[1] = q[0]
bins[2] = q[1]
bins[3] = q[2]
bins[4] = q[2]+hinge*IQR
bins[5] = inf (see Notes)
where q is an array of the first three quartiles of y and
IQR=q[2]-q[0]
If q[2]+hinge*IQR > max(y) there will only be 5 classes and no high outliers,
otherwise, there will be 6 classes and at least one high outlier.
Examples
--------
>>> cal = load_example()
>>> bp = Box_Plot(cal)
>>> bp.bins
array([ -5.28762500e+01, 2.56750000e+00, 9.36500000e+00,
3.95300000e+01, 9.49737500e+01, 4.11145000e+03])
>>> bp.counts
array([ 0, 15, 14, 14, 6, 9])
>>> bp.high_outlier_ids
array([ 0, 6, 18, 29, 33, 36, 37, 40, 42])
>>> cal[bp.high_outlier_ids]
array([ 329.92, 181.27, 370.5 , 722.85, 192.05, 110.74,
4111.45, 317.11, 264.93])
>>> bx = Box_Plot(np.arange(100))
>>> bx.bins
array([ -49.5 , 24.75, 49.5 , 74.25, 148.5 ])
"""
def __init__(self, y, hinge=1.5):
"""
Parameters
----------
y : array (n,1)
attribute to classify
hinge : float
multiple of inter-quartile range (default=1.5)
"""
self.hinge = hinge
Map_Classifier.__init__(self, y)
self.name = 'Box Plot'
def _set_bins(self):
y = self.y
pct = [25, 50, 75, 100]
bins = [stats.scoreatpercentile(y, p) for p in pct]
iqr = bins[-2] - bins[0]
self.iqr = iqr
pivot = self.hinge * iqr
left_fence = bins[0] - pivot
right_fence = bins[-2] + pivot
if right_fence < bins[-1]:
bins.insert(-1, right_fence)
else:
bins[-1] = right_fence
bins.insert(0, left_fence)
self.bins = np.array(bins)
self.k = len(pct)
def _classify(self):
Map_Classifier._classify(self)
self.low_outlier_ids = np.nonzero(self.yb == 0)[0]
self.high_outlier_ids = np.nonzero(self.yb == 5)[0]
class Quantiles(Map_Classifier):
"""
Quantile Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0 otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> q = Quantiles(cal, k = 5)
>>> q.bins
array([ 1.46400000e+00, 5.79800000e+00, 1.32780000e+01,
5.46160000e+01, 4.11145000e+03])
>>> q.counts
array([12, 11, 12, 11, 12])
>>>
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Quantiles'
def _set_bins(self):
y = self.y
k = self.k
self.bins = quantile(y, k=k)
class Std_Mean(Map_Classifier):
"""
Standard Deviation and Mean Map Classification
Parameters
----------
y : array
(n,1), values to classify
multiples : array
the multiples of the standard deviation to add/subtract from
the sample mean to define the bins, default=[-2,-1,1,2]
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> st = Std_Mean(cal)
>>> st.k
5
>>> st.bins
array([ -967.36235382, -420.71712519, 672.57333208, 1219.21856072,
4111.45 ])
>>> st.counts
array([ 0, 0, 56, 1, 1])
>>>
>>> st3 = Std_Mean(cal, multiples = [-3, -1.5, 1.5, 3])
>>> st3.bins
array([-1514.00758246, -694.03973951, 945.8959464 , 1765.86378936,
4111.45 ])
>>> st3.counts
array([ 0, 0, 57, 0, 1])
>>>
"""
def __init__(self, y, multiples=[-2, -1, 1, 2]):
self.multiples = multiples
Map_Classifier.__init__(self, y)
self.name = 'Std_Mean'
def _set_bins(self):
y = self.y
s = y.std(ddof=1)
m = y.mean()
cuts = [m + s * w for w in self.multiples]
y_max = y.max()
if cuts[-1] < y_max:
cuts.append(y_max)
self.bins = np.array(cuts)
self.k = len(cuts)
class Maximum_Breaks(Map_Classifier):
"""
Maximum Breaks Map Classification
Parameters
----------
y : array
(n, 1), values to classify
k : int
number of classes required
mindiff : float
The minimum difference between class breaks
Attributes
----------
yb : array
(n, 1), bin ids for observations
bins : array
(k, 1), the upper bounds of each class
k : int
the number of classes
counts : array
(k, 1), the number of observations falling in each class (numpy array k x 1)
Examples
--------
>>> cal = load_example()
>>> mb = Maximum_Breaks(cal, k = 5)
>>> mb.k
5
>>> mb.bins
array([ 146.005, 228.49 , 546.675, 2417.15 , 4111.45 ])
>>> mb.counts
array([50, 2, 4, 1, 1])
>>>
"""
def __init__(self, y, k=5, mindiff=0):
self.k = k
self.mindiff = mindiff
Map_Classifier.__init__(self, y)
self.name = 'Maximum_Breaks'
def _set_bins(self):
xs = self.y.copy()
y = self.y.copy()
k = self.k
xs.sort()
min_diff = self.mindiff
d = xs[1:] - xs[:-1]
diffs = d[np.nonzero(d > min_diff)]
diffs = sp.unique(diffs)
k1 = k - 1
if len(diffs) > k1:
diffs = diffs[-k1:]
mp = []
self.cids = []
for diff in diffs:
ids = np.nonzero(d == diff)
for id in ids:
self.cids.append(id[0])
cp = ((xs[id] + xs[id + 1]) / 2.)
mp.append(cp[0])
mp.append(xs[-1])
mp.sort()
self.bins = np.array(mp)
class Natural_Breaks(Map_Classifier):
"""
Natural Breaks Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to generate, (default=100)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import numpy as np
>>> np.random.seed(10)
>>> cal = load_example()
>>> nb = Natural_Breaks(cal, k = 5)
>>> nb.k
5
>>> nb.counts
array([14, 13, 14, 10, 7])
>>> nb.bins
array([ 1.81000000e+00, 7.60000000e+00, 2.98200000e+01,
1.81270000e+02, 4.11145000e+03])
>>> x = np.array([1] * 50)
>>> x[-1] = 20
>>> nb = Natural_Breaks(x, k = 5, initial = 0)
Warning: Not enough unique values in array to form k classes
Warning: setting k to 2
>>> nb.bins
array([ 1, 20])
>>> nb.counts
array([49, 1])
Notes
-----
There is a tradeoff here between speed and consistency of the
classification
If you want more speed, set initial to a smaller value (0
would result in the best speed, if you want more consistent classes in
multiple runs of Natural_Breaks on the same data, set initial to a
higher value.
"""
def __init__(self, y, k=K, initial=100):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = 'Natural_Breaks'
def _set_bins(self):
x = self.y.copy()
k = self.k
res0 = natural_breaks(x, k)
fit = res0[2].sum()
for i in xrange(self.initial):
res = natural_breaks(x, k)
fit_i = res[2].sum()
if fit_i < fit:
res0 = res
self.bins = np.array(res0[-1])
self.k = len(self.bins)
self.iterations = res0[-2]
class Fisher_Jenks(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> fj = Fisher_Jenks(cal)
>>> fj.adcm
799.24000000000001
>>> fj.bins
array([ 75.29, 192.05, 370.5 , 722.85, 4111.45])
>>> fj.counts
array([49, 3, 4, 1, 1])
>>>
"""
def __init__(self, y, k=K):
nu = len(np.unique(y))
if nu < k:
raise ValueError("Fewer unique values than specified classes.")
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Fisher_Jenks"
def _set_bins(self):
x = self.y.copy()
self.bins = np.array(_fisher_jenks_means(x, classes=self.k)[1:])
class Fisher_Jenks_Sampled(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based using random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then
pct = 1000./n, unless truncate is False
truncate : boolean
truncate pct in cases where pct * n > 1000., (Default True)
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
(Turned off due to timing being different across hardware)
"""
def __init__(self, y, k=K, pct=0.10, truncate=True):
self.k = k
n = y.size
if (pct * n > 1000) and truncate:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, n * pct)
yr = y[ids]
yr[-1] = max(y) # make sure we have the upper bound
yr[0] = min(y) # make sure we have the min
self.original_y = y
self.pct = pct
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Fisher_Jenks_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
fj = Fisher_Jenks(self.y, self.k)
self.bins = fj.bins
class Jenks_Caspall(Map_Classifier):
"""
Jenks Caspall Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> jc = Jenks_Caspall(cal, k = 5)
>>> jc.bins
array([ 1.81000000e+00, 7.60000000e+00, 2.98200000e+01,
1.81270000e+02, 4.11145000e+03])
>>> jc.counts
array([14, 13, 14, 10, 7])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall"
def _set_bins(self):
x = self.y.copy()
k = self.k
# start with quantiles
q = quantile(x, k)
solving = True
xb, cnts = bin1d(x, q)
#class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, k = x.shape
xm = [np.median(x[xb == i]) for i in np.unique(xb)]
xb0 = xb.copy()
q = xm
it = 0
rk = range(self.k)
while solving:
xb = np.zeros(xb0.shape, int)
d = abs(x - q)
xb = d.argmin(axis=1)
if (xb0 == xb).all():
solving = False
else:
xb0 = xb
it += 1
q = np.array([np.median(x[xb == i]) for i in rk])
cuts = np.array([max(x[xb == i]) for i in sp.unique(xb)])
cuts.shape = (len(cuts),)
self.bins = cuts
self.iterations = it
class Jenks_Caspall_Sampled(Map_Classifier):
"""
Jenks Caspall Map Classification using a random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then pct = 1000./n
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> x = np.random.random(100000)
>>> jc = Jenks_Caspall(x)
>>> jcs = Jenks_Caspall_Sampled(x)
>>> jc.bins
array([ 0.19770952, 0.39695769, 0.59588617, 0.79716865, 0.99999425])
>>> jcs.bins
array([ 0.18877882, 0.39341638, 0.6028286 , 0.80070925, 0.99999425])
>>> jc.counts
array([19804, 20005, 19925, 20178, 20088])
>>> jcs.counts
array([18922, 20521, 20980, 19826, 19751])
>>>
# not for testing since we get different times on different hardware
# just included for documentation of likely speed gains
#>>> t1 = time.time(); jc = Jenks_Caspall(x); t2 = time.time()
#>>> t1s = time.time(); jcs = Jenks_Caspall_Sampled(x); t2s = time.time()
#>>> t2 - t1; t2s - t1s
#1.8292930126190186
#0.061631917953491211
Notes
-----
This is intended for large n problems. The logic is to apply
Jenks_Caspall to a random subset of the y space and then bin the
complete vector y on the bins obtained from the subset. This would
trade off some "accuracy" for a gain in speed.
"""
def __init__(self, y, k=K, pct=0.10):
self.k = k
n = y.size
if pct * n > 1000:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, n * pct)
yr = y[ids]
yr[0] = max(y) # make sure we have the upper bound
self.original_y = y
self.pct = pct
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Jenks_Caspall_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
jc = Jenks_Caspall(self.y, self.k)
self.bins = jc.bins
self.iterations = jc.iterations
class Jenks_Caspall_Forced(Map_Classifier):
"""
Jenks Caspall Map Classification with forced movements
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> jcf = Jenks_Caspall_Forced(cal, k = 5)
>>> jcf.k
5
>>> jcf.bins
array([[ 1.34000000e+00],
[ 5.90000000e+00],
[ 1.67000000e+01],
[ 5.06500000e+01],
[ 4.11145000e+03]])
>>> jcf.counts
array([12, 12, 13, 9, 12])
>>> jcf4 = Jenks_Caspall_Forced(cal, k = 4)
>>> jcf4.k
4
>>> jcf4.bins
array([[ 2.51000000e+00],
[ 8.70000000e+00],
[ 3.66800000e+01],
[ 4.11145000e+03]])
>>> jcf4.counts
array([15, 14, 14, 15])
>>>
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall_Forced"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
solving = True
xb, cnt = bin1d(x, q)
#class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
xm = [x[xb == i].mean() for i in np.unique(xb)]
xb0 = xb.copy()
q = xm
xbar = np.array([xm[xbi] for xbi in xb])
xbar.shape = (n, 1)
ss = x - xbar
ss *= ss
ss = sum(ss)
maxk = k - 1
down_moves = up_moves = 0
solving = True
it = 0
while solving:
# try upward moves first
moving_up = True
while moving_up:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[:-1]
i = 0
up_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = max(ids[0])
tmp = xb.copy()
tmp[mover] = xb[mover] + 1
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
up_moves += 1
i += 1
if not up_moves:
moving_up = False
moving_down = True
while moving_down:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[1:]
i = 1
down_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = min(ids[0])
mover_class = xb[mover]
target_class = mover_class - 1
tmp = xb.copy()
tmp[mover] = target_class
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
down_moves += 1
i += 1
if not down_moves:
moving_down = False
if not up_moves and not down_moves:
solving = False
it += 1
cuts = [max(x[xb == i]) for i in sp.unique(xb)]
self.bins = np.array(cuts)
self.iterations = it
class User_Defined(Map_Classifier):
"""
User Specified Binning
Parameters
----------
y : array
(n,1), values to classify
bins : array
(k,1), upper bounds of classes (have to be monotically increasing)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> bins = [20, max(cal)]
>>> bins
[20, 4111.4499999999998]
>>> ud = User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 4111.45])
>>> ud.counts
array([37, 21])
>>> bins = [20, 30]
>>> ud = User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 30. , 4111.45])
>>> ud.counts
array([37, 4, 17])
>>>
Notes
-----
If upper bound of user bins does not exceed max(y) we append an
additional bin.
"""
def __init__(self, y, bins):
if bins[-1] < max(y):
bins.append(max(y))
self.k = len(bins)
self.bins = np.array(bins)
self.y = y
Map_Classifier.__init__(self, y)
self.name = 'User Defined'
def _set_bins(self):
pass
class Max_P_Classifier(Map_Classifier):
"""
Max_P Map Classification
Based on Max_p regionalization algorithm
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to use prior to swapping
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import pysal
>>> cal = pysal.esda.mapclassify.load_example()
>>> mp = pysal.Max_P_Classifier(cal)
>>> mp.bins
array([ 8.7 , 16.7 , 20.47, 66.26, 4111.45])
>>> mp.counts
array([29, 8, 1, 10, 10])
"""
def __init__(self, y, k=K, initial=1000):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = "Max_P"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
x.sort(axis=0)
# find best of initial solutions
solution = 0
best_tss = x.var() * x.shape[0]
tss_all = np.zeros((self.initial, 1))
while solution < self.initial:
remaining = range(n)
seeds = [np.nonzero(di == min(
di))[0][0] for di in [np.abs(x - qi) for qi in q]]
rseeds = np.random.permutation(range(k)).tolist()
tmp = [remaining.remove(seed) for seed in seeds]
self.classes = classes = []
tmp = [classes.append([seed]) for seed in seeds]
while rseeds:
seed_id = rseeds.pop()
current = classes[seed_id]
growing = True
while growing:
current = classes[seed_id]
low = current[0]
high = current[-1]
left = low - 1
right = high + 1
move_made = False
if left in remaining:
current.insert(0, left)
remaining.remove(left)
move_made = True
if right in remaining:
current.append(right)
remaining.remove(right)
move_made = True
if move_made:
classes[seed_id] = current
else:
growing = False
tss = _fit(self.y, classes)
tss_all[solution] = tss
if tss < best_tss:
best_solution = classes
best_it = solution
best_tss = tss
solution += 1
classes = best_solution
self.best_it = best_it
self.tss = best_tss
self.a2c = a2c = {}
self.tss_all = tss_all
for r, cl in enumerate(classes):
for a in cl:
a2c[a] = r
swapping = True
it = 0
while swapping:
rseeds = np.random.permutation(range(k)).tolist()
total_moves = 0
while rseeds:
id = rseeds.pop()
growing = True
total_moves = 0
while growing:
target = classes[id]
left = target[0] - 1
right = target[-1] + 1
n_moves = 0
if left in a2c:
left_class = classes[a2c[left]]
if len(left_class) > 1:
a = left_class[-1]
if self._swap(left_class, target, a):
target.insert(0, a)
left_class.remove(a)
a2c[a] = id
n_moves += 1
if right in a2c:
right_class = classes[a2c[right]]
if len(right_class) > 1:
a = right_class[0]
if self._swap(right_class, target, a):
target.append(a)
right_class.remove(a)
n_moves += 1
a2c[a] = id
if not n_moves:
growing = False
total_moves += n_moves
if not total_moves:
swapping = False
xs = self.y.copy()
xs.sort()
self.bins = np.array([xs[cl][-1] for cl in classes])
def _ss(self, class_def):
"""calculates sum of squares for a class"""
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
return sum(css)
def _swap(self, class1, class2, a):
"""evaluate cost of moving a from class1 to class2"""
ss1 = self._ss(class1)
ss2 = self._ss(class2)
tss1 = ss1 + ss2
class1c = copy.copy(class1)
class2c = copy.copy(class2)
class1c.remove(a)
class2c.append(a)
ss1 = self._ss(class1c)
ss2 = self._ss(class2c)
tss2 = ss1 + ss2
if tss1 < tss2:
return False
else:
return True
def _fit(y, classes):
"""Calculate the total sum of squares for a vector y classified into
classes
Parameters
----------
y : array
(n,1), variable to be classified
classes : array
(k,1), integer values denoting class membership
"""
tss = 0
for class_def in classes:
yc = y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
kmethods = {}
kmethods["Quantiles"] = Quantiles
kmethods["Fisher_Jenks"] = Fisher_Jenks
kmethods['Natural_Breaks'] = Natural_Breaks
kmethods['Maximum_Breaks'] = Maximum_Breaks
def gadf(y, method="Quantiles", maxk=15, pct=0.8):
"""
Evaluate the Goodness of Absolute Deviation Fit of a Classifier
Finds the minimum value of k for which gadf>pct
Parameters
----------
y : array
(n, 1) values to be classified
method : {'Quantiles, 'Fisher_Jenks', 'Maximum_Breaks', 'Natrual_Breaks'}
maxk : int
maximum value of k to evaluate
pct : float
The percentage of GADF to exceed
Returns
-------
k : int
number of classes
cl : object
instance of the classifier at k
gadf : float
goodness of absolute deviation fit
Examples
--------
>>> cal = load_example()
>>> qgadf = gadf(cal)
>>> qgadf[0]
15
>>> qgadf[-1]
0.37402575909092828
Quantiles fail to exceed 0.80 before 15 classes. If we lower the bar to
0.2 we see quintiles as a result
>>> qgadf2 = gadf(cal, pct = 0.2)
>>> qgadf2[0]
5
>>> qgadf2[-1]
0.21710231966462412
>>>
Notes
-----
The GADF is defined as:
.. math::
GADF = 1 - \sum_c \sum_{i \in c} |y_i - y_{c,med}| / \sum_i |y_i - y_{med}|
where :math:`y_{med}` is the global median and :math:`y_{c,med}` is
the median for class :math:`c`.
See Also
--------
K_classifiers
"""
y = np.array(y)
adam = (np.abs(y - np.median(y))).sum()
for k in range(2, maxk + 1):
cl = kmethods[method](y, k)
gadf = 1 - cl.adcm / adam
if gadf > pct:
break
return (k, cl, gadf)
class K_classifiers:
"""
Evaluate all k-classifers and pick optimal based on k and GADF
Parameters
----------
y : array
(n,1), values to be classified
pct : float
The percentage of GADF to exceed
Attributes
----------
best : object
instance of the optimal Map_Classifier
results : dictionary
keys are classifier names, values are the Map_Classifier instances with the best pct for each classifer
Examples
--------
>>> cal = load_example()
>>> ks = K_classifiers(cal)
>>> ks.best.name
'Fisher_Jenks'
>>> ks.best.k
4
>>> ks.best.gadf
0.84810327199081048
>>>
Notes
-----
This can be used to suggest a classification scheme.
See Also
--------
gadf
"""
def __init__(self, y, pct=0.8):
results = {}
c = 0
best = gadf(y, "Fisher_Jenks", maxk=len(y) - 1, pct=pct)
pct0 = best[0]
k0 = best[-1]
keys = kmethods.keys()
keys.remove("Fisher_Jenks")
results["Fisher_Jenks"] = best
for method in keys:
results[method] = gadf(y, method, maxk=len(y) - 1, pct=pct)
k1 = results[method][0]
pct1 = results[method][-1]
if (k1 < k0) or (k1 == k0 and pct0 < pct1):
best = results[method]
k0 = k1
pct0 = pct1
self.results = results
self.best = best[1]
def fj(x, k=5):
y = x.copy()
y.sort()
d = {}
initial = opt_part(y)
# d has key = number of groups
# value: list of ids, list of group tss, group size
split_id = [initial[0]]
tss = initial[1:] # left and right within tss
sizes = [split_id - 1, len(y) - split_id]
d[2] = [split_id, tss, sizes]
return d
def opt_part(x):
"""
Find optimal bi-partition of x values
Parameters
-----------
x : array
(n,1), Array of attribute values
Returns
-------
opt_i : int
partition index
tss : float
toal sum of squares
left_min : float
variance to the left of the break (including the break)
right_min : float
variance to the right of the break
"""
n = len(x)
tss = np.inf
opt_i = -999
for i in xrange(1, n):
print i
left = x[:i].var() * i
right = x[i:].var() * (n - i)
tss_i = left + right
if tss_i < tss:
opt_i = i
tss = tss_i
left_min = left
right_min = right
return (opt_i, tss, left_min, right_min)
|
sharad1126/owtf
|
tests/test_cases/framework/plugin/plugin_params_tests.py
|
from tests.testing_framework.base_test_cases import BaseTestCase
from flexmock import flexmock
from hamcrest import *
from framework.plugin.plugin_params import PluginParams
import re
from hamcrest.library.text.stringmatches import matches_regexp
class PluginParamsTests(BaseTestCase):
def before(self):
self.core_mock = flexmock()
self.plugin_params = PluginParams(self.core_mock, {'Args': ['arg1=val1', "arg2=val2"]})
def test_ProcessArgs(self):
assert_that(self.plugin_params.ProcessArgs(), is_(True))
assert_that(self.plugin_params.Args["arg1"], is_("val1"))
assert_that(self.plugin_params.Args["arg2"], is_("val2"))
def test_ListArgs_should_print_the_args_to_the_stdout(self):
args = {"arg_name": "arg_value"}
self.init_stdout_recording()
self.plugin_params.ListArgs(args)
output = self.get_recorded_stdout_and_close()
assert_that(output is not None)
def test_ShowParamInfo_should_print_the_params_to_the_stdout(self):
args = {"Description": "plugin description",
"Mandatory": {"arg_name": "arg_value"},
"Optional": {"arg_name": "arg_value"}}
plugin = self._get_plugin_example()
self.core_mock.Error = flexmock()
self.core_mock.Error.should_receive("FrameworkAbort").once()
self.init_stdout_recording()
self.plugin_params.ShowParamInfo(args, plugin)
output = self.get_recorded_stdout_and_close()
assert_that(output is not None)
def test_CheckArgList_should_be_ok(self):
plugin = self._get_plugin_example()
args = {"Mandatory": [], "Optional": [], "Description": ""}
assert_that(self.plugin_params.CheckArgList(args, plugin))
def test_CheckArgList_with_missing_Mandatory_and_Optional_args(self):
self.core_mock.Error = flexmock()
self.core_mock.Error.should_receive("Add").with_args(re.compile(".*Mandatory.*Optional")).once()
plugin = self._get_plugin_example()
self.plugin_params.CheckArgList({}, plugin)
def test_CheckArgList_with_missing_description_arg(self):
self.core_mock.Error = flexmock()
self.core_mock.Error.should_receive("Add").with_args(re.compile(".*requires.*Description")).once()
plugin = self._get_plugin_example()
args = {"Mandatory": [], "Optional": []}
self.plugin_params.CheckArgList(args, plugin)
def test_SetArgsBasic_sets_the_args_to_the_plugin(self):
plugin = self._get_plugin_example()
args = {"arg1": "val1", "arg2": "val2"}
self.plugin_params.Args = args
assert_that(self.plugin_params.SetArgsBasic(args, plugin), equal_to([args]))
assert_that(plugin["Args"], matches_regexp(".*arg1=val1.*"))
assert_that(plugin["Args"], matches_regexp(".*arg2=val2.*"))
def test_SetConfig_is_a_wrapper(self):
self.core_mock.Config = flexmock()
self.core_mock.Config.should_receive("Set").with_args("_arg1", "val1").once()
args = {"arg1": "val1"}
self.plugin_params.SetConfig(args)
def test_GetArgList_returns_the_args_we_ask_for(self):
arg_list = ["arg1", "arg2"]
plugin = self._get_plugin_example()
result = self.plugin_params.GetArgList(arg_list, plugin)
assert_that(result["arg1"], is_("val1"))
assert_that(result["arg2"], is_("val2"))
def test_GetArgList_registers_an_error_for_not_foud_args(self):
self.core_mock.Error = flexmock()
self.core_mock.Error.should_receive("Add").once()
self.core_mock.Config = flexmock()
self.core_mock.Config.should_receive("IsSet").and_return(False)
arg_list = ["non_existent_arg"]
plugin = self._get_plugin_example()
result = self.plugin_params.GetArgList(arg_list, plugin)
assert_that(result, is_({}))
assert_that(plugin["ArgError"], is_(True))
def test_GetArgs(self):
args = {"Mandatory": ["arg1"],
"Optional": ["arg2"],
"Description": "description"}
plugin = self._get_plugin_example()
self.core_mock.Config = flexmock()
self.core_mock.Config.should_receive("IsSet").and_return(False)
result = self.plugin_params.GetArgs(args, plugin)
assert_that(result[0]["arg1"], is_("val1"))
assert_that(result[0]["arg2"], is_("val2"))
def _get_plugin_example(self):
return {'Args': '', 'Code': 'OWASP-IG-005', 'Group': 'web', 'Name': 'Application_Discovery', 'File': 'Application_Discovery@OWASP-IG-005.py', 'Title': 'Application Discovery', 'Descrip': '', 'Type': 'passive'}
|
toontownfunserver/Panda3D-1.9.0
|
direct/tkwidgets/Floater.py
|
"""
Floater Class: Velocity style controller for floating point values with
a label, entry (validated), and scale
"""
__all__ = ['Floater', 'FloaterWidget', 'FloaterGroup']
from direct.showbase.TkGlobal import *
from Tkinter import *
from Valuator import Valuator, VALUATOR_MINI, VALUATOR_FULL
from direct.task import Task
import math, sys, string, Pmw
FLOATER_WIDTH = 22
FLOATER_HEIGHT = 18
class Floater(Valuator):
def __init__(self, parent = None, **kw):
INITOPT = Pmw.INITOPT
optiondefs = (
('style', VALUATOR_MINI, INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialize the superclass
Valuator.__init__(self, parent)
self.initialiseoptions(Floater)
def createValuator(self):
self._valuator = self.createcomponent('valuator',
(('floater', 'valuator'),),
None,
FloaterWidget,
(self.interior(),),
command = self.setEntry,
value = self['value'])
self._valuator._widget.bind('<Double-ButtonPress-1>', self.mouseReset)
def packValuator(self):
# Position components
if self._label:
self._label.grid(row=0, column=0, sticky = EW)
self._entry.grid(row=0, column=1, sticky = EW)
self._valuator.grid(row=0, column=2, padx = 2, pady = 2)
self.interior().columnconfigure(0, weight = 1)
class FloaterWidget(Pmw.MegaWidget):
def __init__(self, parent = None, **kw):
#define the megawidget options
INITOPT = Pmw.INITOPT
optiondefs = (
# Appearance
('width', FLOATER_WIDTH, INITOPT),
('height', FLOATER_HEIGHT, INITOPT),
('relief', RAISED, self.setRelief),
('borderwidth', 2, self.setBorderwidth),
('background', 'grey75', self.setBackground),
# Behavior
# Initial value of floater, use self.set to change value
('value', 0.0, INITOPT),
('numDigits', 2, self.setNumDigits),
# Command to execute on floater updates
('command', None, None),
# Extra data to be passed to command function
('commandData', [], None),
# Callback's to execute during mouse interaction
('preCallback', None, None),
('postCallback', None, None),
# Extra data to be passed to callback function, needs to be a list
('callbackData', [], None),
)
self.defineoptions(kw, optiondefs)
# Initialize the superclass
Pmw.MegaWidget.__init__(self, parent)
# Set up some local and instance variables
# Create the components
interior = self.interior()
# Current value
self.value = self['value']
# The canvas
width = self['width']
height = self['height']
self._widget = self.createcomponent('canvas', (), None,
Canvas, (interior,),
width = width,
height = height,
background = self['background'],
highlightthickness = 0,
scrollregion = (-width/2.0,
-height/2.0,
width/2.0,
height/2.0))
self._widget.pack(expand = 1, fill = BOTH)
# The floater icon
self._widget.create_polygon(-width/2.0, 0, -2.0, -height/2.0,
-2.0, height/2.0,
fill = 'grey50',
tags = ('floater',))
self._widget.create_polygon(width/2.0, 0, 2.0, height/2.0,
2.0, -height/2.0,
fill = 'grey50',
tags = ('floater',))
# Add event bindings
self._widget.bind('<ButtonPress-1>', self.mouseDown)
self._widget.bind('<B1-Motion>', self.updateFloaterSF)
self._widget.bind('<ButtonRelease-1>', self.mouseUp)
self._widget.bind('<Enter>', self.highlightWidget)
self._widget.bind('<Leave>', self.restoreWidget)
# Make sure input variables processed
self.initialiseoptions(FloaterWidget)
def set(self, value, fCommand = 1):
"""
self.set(value, fCommand = 1)
Set floater to new value, execute command if fCommand == 1
"""
# Send command if any
if fCommand and (self['command'] != None):
apply(self['command'], [value] + self['commandData'])
# Record value
self.value = value
def updateIndicator(self, value):
# Nothing visible to update on this type of widget
pass
def get(self):
"""
self.get()
Get current floater value
"""
return self.value
## Canvas callback functions
# Floater velocity controller
def mouseDown(self, event):
""" Begin mouse interaction """
# Exectute user redefinable callback function (if any)
self['relief'] = SUNKEN
if self['preCallback']:
apply(self['preCallback'], self['callbackData'])
self.velocitySF = 0.0
self.updateTask = taskMgr.add(self.updateFloaterTask,
'updateFloater')
self.updateTask.lastTime = globalClock.getFrameTime()
def updateFloaterTask(self, state):
"""
Update floaterWidget value based on current scaleFactor
Adjust for time to compensate for fluctuating frame rates
"""
currT = globalClock.getFrameTime()
dt = currT - state.lastTime
self.set(self.value + self.velocitySF * dt)
state.lastTime = currT
return Task.cont
def updateFloaterSF(self, event):
"""
Update velocity scale factor based of mouse distance from origin
"""
x = self._widget.canvasx(event.x)
y = self._widget.canvasy(event.y)
offset = max(0, abs(x) - Valuator.deadband)
if offset == 0:
return 0
sf = math.pow(Valuator.sfBase,
self.minExp + offset/Valuator.sfDist)
if x > 0:
self.velocitySF = sf
else:
self.velocitySF = -sf
def mouseUp(self, event):
taskMgr.remove(self.updateTask)
self.velocitySF = 0.0
# Execute user redefinable callback function (if any)
if self['postCallback']:
apply(self['postCallback'], self['callbackData'])
self['relief'] = RAISED
def setNumDigits(self):
"""
Adjust minimum exponent to use in velocity task based
upon the number of digits to be displayed in the result
"""
self.minExp = math.floor(-self['numDigits']/
math.log10(Valuator.sfBase))
# Methods to modify floater characteristics
def setRelief(self):
self.interior()['relief'] = self['relief']
def setBorderwidth(self):
self.interior()['borderwidth'] = self['borderwidth']
def setBackground(self):
self._widget['background'] = self['background']
def highlightWidget(self, event):
self._widget.itemconfigure('floater', fill = 'black')
def restoreWidget(self, event):
self._widget.itemconfigure('floater', fill = 'grey50')
class FloaterGroup(Pmw.MegaToplevel):
def __init__(self, parent = None, **kw):
# Default group size
DEFAULT_DIM = 1
# Default value depends on *actual* group size, test for user input
DEFAULT_VALUE = [0.0] * kw.get('dim', DEFAULT_DIM)
DEFAULT_LABELS = ['v[%d]' % x for x in range(kw.get('dim', DEFAULT_DIM))]
#define the megawidget options
INITOPT = Pmw.INITOPT
optiondefs = (
('dim', DEFAULT_DIM, INITOPT),
('side', TOP, INITOPT),
('title', 'Floater Group', None),
# A tuple of initial values, one for each floater
('value', DEFAULT_VALUE, INITOPT),
# The command to be executed any time one of the floaters is updated
('command', None, None),
# A tuple of labels, one for each floater
('labels', DEFAULT_LABELS, self._updateLabels),
)
self.defineoptions(kw, optiondefs)
# Initialize the toplevel widget
Pmw.MegaToplevel.__init__(self, parent)
# Create the components
interior = self.interior()
# Get a copy of the initial value (making sure its a list)
self._value = list(self['value'])
# The Menu Bar
self.balloon = Pmw.Balloon()
menubar = self.createcomponent('menubar', (), None,
Pmw.MenuBar, (interior,),
balloon = self.balloon)
menubar.pack(fill=X)
# FloaterGroup Menu
menubar.addmenu('Floater Group', 'Floater Group Operations')
menubar.addmenuitem(
'Floater Group', 'command', 'Reset the Floater Group panel',
label = 'Reset',
command = lambda s = self: s.reset())
menubar.addmenuitem(
'Floater Group', 'command', 'Dismiss Floater Group panel',
label = 'Dismiss', command = self.withdraw)
menubar.addmenu('Help', 'Floater Group Help Operations')
self.toggleBalloonVar = IntVar()
self.toggleBalloonVar.set(0)
menubar.addmenuitem('Help', 'checkbutton',
'Toggle balloon help',
label = 'Balloon Help',
variable = self.toggleBalloonVar,
command = self.toggleBalloon)
self.floaterList = []
for index in range(self['dim']):
# Add a group alias so you can configure the floaters via:
# fg.configure(Valuator_XXX = YYY)
f = self.createcomponent(
'floater%d' % index, (), 'Valuator', Floater,
(interior,), value = self._value[index],
text = self['labels'][index])
# Do this separately so command doesn't get executed during construction
f['command'] = lambda val, s=self, i=index: s._floaterSetAt(i, val)
f.pack(side = self['side'], expand = 1, fill = X)
self.floaterList.append(f)
# Make sure floaters are initialized
self.set(self['value'])
# Make sure input variables processed
self.initialiseoptions(FloaterGroup)
def _updateLabels(self):
if self['labels']:
for index in range(self['dim']):
self.floaterList[index]['text'] = self['labels'][index]
def toggleBalloon(self):
if self.toggleBalloonVar.get():
self.balloon.configure(state = 'balloon')
else:
self.balloon.configure(state = 'none')
def get(self):
return self._value
def getAt(self, index):
return self._value[index]
# This is the command is used to set the groups value
def set(self, value, fCommand = 1):
for i in range(self['dim']):
self._value[i] = value[i]
# Update floater, but don't execute its command
self.floaterList[i].set(value[i], 0)
if fCommand and (self['command'] is not None):
self['command'](self._value)
def setAt(self, index, value):
# Update floater and execute its command
self.floaterList[index].set(value)
# This is the command used by the floater
def _floaterSetAt(self, index, value):
self._value[index] = value
if self['command']:
self['command'](self._value)
def reset(self):
self.set(self['value'])
## SAMPLE CODE
if __name__ == '__main__':
# Initialise Tkinter and Pmw.
root = Toplevel()
root.title('Pmw Floater demonstration')
# Dummy command
def printVal(val):
print val
# Create and pack a Floater megawidget.
mega1 = Floater(root, command = printVal)
mega1.pack(side = 'left', expand = 1, fill = 'x')
"""
# These are things you can set/configure
# Starting value for floater
mega1['value'] = 123.456
mega1['text'] = 'Drive delta X'
# To change the color of the label:
mega1.label['foreground'] = 'Red'
# Max change/update, default is 100
# To have really fine control, for example
# mega1['maxVelocity'] = 0.1
# Number of digits to the right of the decimal point, default = 2
# mega1['numDigits'] = 5
"""
# To create a floater group to set an RGBA value:
group1 = FloaterGroup(root, dim = 4,
title = 'Simple RGBA Panel',
labels = ('R', 'G', 'B', 'A'),
Valuator_min = 0.0,
Valuator_max = 255.0,
Valuator_resolution = 1.0,
command = printVal)
# Uncomment this if you aren't running in IDLE
#root.mainloop()
|
QInfer/python-qinfer
|
src/qinfer/domains.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
##
# domains.py: module for domains of model outcomes
##
# © 2017, Chris Ferrie (csferrie@gmail.com) and
# Christopher Granade (cgranade@cgranade.com).
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
##
## IMPORTS ###################################################################
from __future__ import division
from __future__ import absolute_import
from builtins import range
from future.utils import with_metaclass
from functools import reduce
from operator import mul
from scipy.special import binom
from math import factorial
from itertools import combinations_with_replacement, product
import numpy as np
from .utils import join_struct_arrays, separate_struct_array
import abc
import warnings
## EXPORTS ###################################################################
__all__ = [
'Domain',
'ProductDomain',
'RealDomain',
'IntegerDomain',
'MultinomialDomain'
]
## FUNCTIONS #################################################################
## ABSTRACT CLASSES AND MIXINS ###############################################
class Domain(with_metaclass(abc.ABCMeta, object)):
"""
Abstract base class for domains of outcomes of models.
"""
## ABSTRACT PROPERTIES ##
@abc.abstractproperty
def is_continuous(self):
"""
Whether or not the domain has an uncountable number of values.
:type: `bool`
"""
pass
@abc.abstractproperty
def is_finite(self):
"""
Whether or not the domain contains a finite number of points.
:type: `bool`
"""
pass
@abc.abstractproperty
def dtype(self):
"""
The numpy dtype of a single element of the domain.
:type: `np.dtype`
"""
pass
@abc.abstractproperty
def n_members(self):
"""
Returns the number of members in the domain if it
`is_finite`, otherwise, returns `np.inf`.
:type: ``int`` or ``np.inf``
"""
pass
@abc.abstractproperty
def example_point(self):
"""
Returns any single point guaranteed to be in the domain, but
no other guarantees; useful for testing purposes.
This is given as a size 1 ``np.array`` of type `dtype`.
:type: ``np.ndarray``
"""
pass
@abc.abstractproperty
def values(self):
"""
Returns an `np.array` of type `dtype` containing
some values from the domain.
For domains where `is_finite` is ``True``, all elements
of the domain will be yielded exactly once.
:rtype: `np.ndarray`
"""
pass
## CONCRETE PROPERTIES ##
@property
def is_discrete(self):
"""
Whether or not the domain has a countable number of values.
:type: `bool`
"""
return not self.is_continuous
## ABSTRACT METHODS ##
@abc.abstractmethod
def in_domain(self, points):
"""
Returns ``True`` if all of the given points are in the domain,
``False`` otherwise.
:param np.ndarray points: An `np.ndarray` of type `self.dtype`.
:rtype: `bool`
"""
pass
class ProductDomain(Domain):
"""
A domain made from the cartesian product of other domains.
:param Domain domains: ``Domain`` instances as separate arguments,
or as a singe list of ``Domain`` instances.
"""
def __init__(self, *domains):
if len(domains) == 1:
try:
self._domains = list(domains[0])
except:
self._domains = domains
else:
self._domains = domains
self._domains = domains
self._dtypes = [domain.example_point.dtype for domain in self._domains]
self._example_point = join_struct_arrays(
[np.array(domain.example_point) for domain in self._domains]
)
self._dtype = self._example_point.dtype
@property
def is_continuous(self):
"""
Whether or not the domain has an uncountable number of values.
:type: `bool`
"""
return any([domain.is_continuous for domain in self._domains])
@property
def is_finite(self):
"""
Whether or not the domain contains a finite number of points.
:type: `bool`
"""
return all([domain.is_finite for domain in self._domains])
@property
def dtype(self):
"""
The numpy dtype of a single element of the domain.
:type: `np.dtype`
"""
return self._dtype
@property
def n_members(self):
"""
Returns the number of members in the domain if it
`is_finite`, otherwise, returns `np.inf`.
:type: ``int`` or ``np.inf``
"""
if self.is_finite:
return reduce(mul, [domain.n_members for domain in self._domains], 1)
else:
return np.inf
@property
def example_point(self):
"""
Returns any single point guaranteed to be in the domain, but
no other guarantees; useful for testing purposes.
This is given as a size 1 ``np.array`` of type `dtype`.
:type: ``np.ndarray``
"""
return self._example_point
@property
def values(self):
"""
Returns an `np.array` of type `dtype` containing
some values from the domain.
For domains where `is_finite` is ``True``, all elements
of the domain will be yielded exactly once.
:rtype: `np.ndarray`
"""
separate_values = [domain.values for domain in self._domains]
return np.concatenate([
join_struct_arrays(list(map(np.array, value)))
for value in product(*separate_values)
])
## METHODS ##
def _mytype(self, array):
# astype does weird stuff with struct names, and possibly
# depends on numpy version; hopefully
# the following is a bit more predictable since it passes through
# uint8
return separate_struct_array(array, self.dtype)[0]
def to_regular_arrays(self, array):
"""
Expands from an array of type `self.dtype` into a list of
arrays with dtypes corresponding to the factor domains.
:param np.ndarray array: An `np.array` of type `self.dtype`.
:rtype: ``list``
"""
return separate_struct_array(self._mytype(array), self._dtypes)
def from_regular_arrays(self, arrays):
"""
Merges a list of arrays (of the same shape) of dtypes
corresponding to the factor domains into a single array
with the dtype of the ``ProductDomain``.
:param list array: A list with each element of type ``np.ndarray``
:rtype: `np.ndarray`
"""
return self._mytype(join_struct_arrays([
array.astype(dtype)
for dtype, array in zip(self._dtypes, arrays)
]))
def in_domain(self, points):
"""
Returns ``True`` if all of the given points are in the domain,
``False`` otherwise.
:param np.ndarray points: An `np.ndarray` of type `self.dtype`.
:rtype: `bool`
"""
return all([
domain.in_domain(array)
for domain, array in
zip(self._domains, separate_struct_array(points, self._dtypes))
])
## CLASSES ###################################################################
class RealDomain(Domain):
"""
A domain specifying a contiguous (and possibly open ended) subset
of the real numbers.
:param float min: A number specifying the lowest possible value of the
domain.
:param float max: A number specifying the largest possible value of the
domain.
"""
def __init__(self, min=-np.inf, max=np.inf):
self._min = min
self._max = max
## PROPERTIES ##
@property
def min(self):
"""
Returns the minimum value of the domain.
:rtype: `float`
"""
return self._min
@property
def max(self):
"""
Returns the maximum value of the domain.
:rtype: `float`
"""
return self._max
@property
def is_continuous(self):
"""
Whether or not the domain has an uncountable number of values.
:type: `bool`
"""
return True
@property
def is_finite(self):
"""
Whether or not the domain contains a finite number of points.
:type: `bool`
"""
return False
@property
def dtype(self):
"""
The numpy dtype of a single element of the domain.
:type: `np.dtype`
"""
return np.float
@property
def n_members(self):
"""
Returns the number of members in the domain if it
`is_finite`, otherwise, returns `None`.
:type: ``np.inf``
"""
return np.inf
@property
def example_point(self):
"""
Returns any single point guaranteed to be in the domain, but
no other guarantees; useful for testing purposes.
This is given as a size 1 ``np.array`` of type ``dtype``.
:type: ``np.ndarray``
"""
if not np.isinf(self.min):
return np.array([self.min], dtype=self.dtype)
if not np.isinf(self.max):
return np.array([self.max], dtype=self.dtype)
else:
return np.array([0], dtype=self.dtype)
@property
def values(self):
"""
Returns an `np.array` of type `self.dtype` containing
some values from the domain.
For domains where ``is_finite`` is ``True``, all elements
of the domain will be yielded exactly once.
:rtype: `np.ndarray`
"""
return self.example_point
## METHODS ##
def in_domain(self, points):
"""
Returns ``True`` if all of the given points are in the domain,
``False`` otherwise.
:param np.ndarray points: An `np.ndarray` of type `self.dtype`.
:rtype: `bool`
"""
if np.all(np.isreal(points)):
are_greater = np.all(np.greater_equal(points, self._min))
are_smaller = np.all(np.less_equal(points, self._max))
return are_greater and are_smaller
else:
return False
class IntegerDomain(Domain):
"""
A domain specifying a contiguous (and possibly open ended) subset
of the integers.
Internally minimum and maximum are represented as
floats in order to handle the case of infinite maximum, and minimums. The
integer conversion function will be applied to the min and max values.
:param int min: A number specifying the lowest possible value of the
domain.
:param int max: A number specifying the largest possible value of the
domain.
Note: Yes, it is slightly unpythonic to specify `max` instead of `max`+1.
"""
def __init__(self, min=0, max=np.inf):
self._min = int(min) if not np.isinf(min) else min
self._max = int(max) if not np.isinf(max) else max
## PROPERTIES ##
@property
def min(self):
"""
Returns the minimum value of the domain.
:rtype: `float` or `np.inf`
"""
return int(self._min) if not np.isinf(self._min) else self._min
@property
def max(self):
"""
Returns the maximum value of the domain.
:rtype: `float` or `np.inf`
"""
return int(self._max) if not np.isinf(self._max) else self._max
@property
def is_continuous(self):
"""
Whether or not the domain has an uncountable number of values.
:type: `bool`
"""
return False
@property
def is_finite(self):
"""
Whether or not the domain contains a finite number of points.
:type: `bool`
"""
return not np.isinf(self.min) and not np.isinf(self.max)
@property
def dtype(self):
"""
The numpy dtype of a single element of the domain.
:type: `np.dtype`
"""
return np.int
@property
def n_members(self):
"""
Returns the number of members in the domain if it
`is_finite`, otherwise, returns `np.inf`.
:type: ``int`` or ``np.inf``
"""
if self.is_finite:
return int(self.max - self.min + 1)
else:
return np.inf
@property
def example_point(self):
"""
Returns any single point guaranteed to be in the domain, but
no other guarantees; useful for testing purposes.
This is given as a size 1 ``np.array`` of type ``dtype``.
:type: ``np.ndarray``
"""
if not np.isinf(self.min):
return np.array([self._min], dtype=self.dtype)
if not np.isinf(self.max):
return np.array([self._max], dtype=self.dtype)
else:
return np.array([0], dtype=self.dtype)
@property
def values(self):
"""
Returns an `np.array` of type `self.dtype` containing
some values from the domain.
For domains where ``is_finite`` is ``True``, all elements
of the domain will be yielded exactly once.
:rtype: `np.ndarray`
"""
if self.is_finite:
return np.arange(self.min, self.max + 1, dtype = self.dtype)
else:
return self.example_point
## METHODS ##
def in_domain(self, points):
"""
Returns ``True`` if all of the given points are in the domain,
``False`` otherwise.
:param np.ndarray points: An `np.ndarray` of type `self.dtype`.
:rtype: `bool`
"""
if np.all(np.isreal(points)):
try:
are_integer = np.all(np.mod(points, 1) == 0)
except TypeError:
are_integer = False
are_greater = np.all(np.greater_equal(points, self._min))
are_smaller = np.all(np.less_equal(points, self._max))
return are_integer and are_greater and are_smaller
else:
return False
class MultinomialDomain(Domain):
"""
A domain specifying k-tuples of non-negative integers which
sum to a specific value.
:param int n_meas: The sum of any tuple in the domain.
:param int n_elements: The number of elements in a tuple.
"""
def __init__(self, n_meas, n_elements=2):
self._n_elements = n_elements
self._n_meas = n_meas
## PROPERTIES ##
@property
def n_meas(self):
"""
Returns the sum of any tuple in the domain.
:rtype: `int`
"""
return self._n_meas
@property
def n_elements(self):
"""
Returns the number of elements of a tuple in the domain.
:rtype: `int`
"""
return self._n_elements
@property
def is_continuous(self):
"""
Whether or not the domain has an uncountable number of values.
:type: `bool`
"""
return False
@property
def is_finite(self):
"""
Whether or not the domain contains a finite number of points.
:type: `bool`
"""
return True
@property
def dtype(self):
"""
The numpy dtype of a single element of the domain.
:type: `np.dtype`
"""
return np.dtype([('k', np.int, self.n_elements)])
@property
def n_members(self):
"""
Returns the number of members in the domain if it
`is_finite`, otherwise, returns `None`.
:type: ``int``
"""
return int(binom(self.n_meas + self.n_elements -1, self.n_elements - 1))
@property
def example_point(self):
"""
Returns any single point guaranteed to be in the domain, but
no other guarantees; useful for testing purposes.
This is given as a size 1 ``np.array`` of type ``dtype``.
:type: ``np.ndarray``
"""
return np.array([([self.n_meas] + [0] * (self.n_elements-1),)], dtype=self.dtype)
@property
def values(self):
"""
Returns an `np.array` of type `self.dtype` containing
some values from the domain.
For domains where ``is_finite`` is ``True``, all elements
of the domain will be yielded exactly once.
:rtype: `np.ndarray`
"""
# This code comes from Jared Goguen at http://stackoverflow.com/a/37712597/1082565
partition_array = np.empty((self.n_members, self.n_elements), dtype=int)
masks = np.identity(self.n_elements, dtype=int)
for i, c in enumerate(combinations_with_replacement(masks, self.n_meas)):
partition_array[i,:] = sum(c)
# Convert to dtype before returning
return self.from_regular_array(partition_array)
## METHODS ##
def to_regular_array(self, A):
"""
Converts from an array of type `self.dtype` to an array
of type `int` with an additional index labeling the
tuple indeces.
:param np.ndarray A: An `np.array` of type `self.dtype`.
:rtype: `np.ndarray`
"""
# this could be a static method, but we choose to be consistent with
# from_regular_array
return A.view((int, len(A.dtype.names))).reshape(A.shape + (-1,))
def from_regular_array(self, A):
"""
Converts from an array of type `int` where the last index
is assumed to have length `self.n_elements` to an array
of type `self.d_type` with one fewer index.
:param np.ndarray A: An `np.array` of type `int`.
:rtype: `np.ndarray`
"""
dims = A.shape[:-1]
return A.reshape((np.prod(dims),-1)).view(dtype=self.dtype).squeeze(-1).reshape(dims)
def in_domain(self, points):
"""
Returns ``True`` if all of the given points are in the domain,
``False`` otherwise.
:param np.ndarray points: An `np.ndarray` of type `self.dtype`.
:rtype: `bool`
"""
array_view = self.to_regular_array(points)
non_negative = np.all(np.greater_equal(array_view, 0))
correct_sum = np.all(np.sum(array_view, axis=-1) == self.n_meas)
return non_negative and correct_sum
|
fboers/jumeg
|
examples/connectivity/plot_degree_circle.py
|
#!/usr/bin/env python
'''
Plot degree values for a given set of nodes in a simple circle plot.
'''
import numpy as np
import matplotlib.pyplot as plt
import mne
from jumeg import get_jumeg_path
from jumeg.connectivity import plot_degree_circle
import bct
orig_labels_fname = get_jumeg_path() + '/data/desikan_label_names.yaml'
yaml_fname = get_jumeg_path() + '/data/desikan_aparc_cortex_based_grouping.yaml'
con_fname = get_jumeg_path() + '/data/sample,aparc-con.npy'
con = np.load(con_fname)
con_ = con[0, :, :, 2] + con[0, :, :, 2].T
# compute the degree
degrees = mne.connectivity.degree(con_, threshold_prop=0.2)
fig, ax = plot_degree_circle(degrees, yaml_fname, orig_labels_fname)
|
suziesparkle/wagtail
|
wagtail/vendor/django-treebeard/treebeard/tests/test_treebeard.py
|
# -*- coding: utf-8 -*-
"""Unit/Functional tests"""
from __future__ import with_statement, unicode_literals
import datetime
import os
import sys
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.models import User
from django.contrib.messages.storage.fallback import FallbackStorage
from django.db.models import Q
from django.template import Template, Context
from django.test import TestCase
from django.test.client import RequestFactory
import pytest
from treebeard import numconv
from treebeard.admin import admin_factory
from treebeard.exceptions import InvalidPosition, InvalidMoveToDescendant,\
PathOverflow, MissingNodeOrderBy
from treebeard.forms import movenodeform_factory
from treebeard.templatetags.admin_tree import get_static_url
from treebeard.tests import models
BASE_DATA = [
{'data': {'desc': '1'}},
{'data': {'desc': '2'}, 'children': [
{'data': {'desc': '21'}},
{'data': {'desc': '22'}},
{'data': {'desc': '23'}, 'children': [
{'data': {'desc': '231'}},
]},
{'data': {'desc': '24'}},
]},
{'data': {'desc': '3'}},
{'data': {'desc': '4'}, 'children': [
{'data': {'desc': '41'}},
]}]
UNCHANGED = [
('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
def _prepare_db_test(request):
case = TestCase(methodName='__init__')
case._pre_setup()
request.addfinalizer(case._post_teardown)
return request.param
@pytest.fixture(scope='function',
params=models.BASE_MODELS + models.PROXY_MODELS)
def model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=models.BASE_MODELS)
def model_without_proxy(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=models.UNICODE_MODELS)
def model_with_unicode(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=models.SORTED_MODELS)
def sorted_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=models.RELATED_MODELS)
def related_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=models.MP_SHORTPATH_MODELS)
def mpshort_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=[models.MP_TestNodeShortPath])
def mpshortnotsorted_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=[models.MP_TestNodeAlphabet])
def mpalphabet_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=[models.MP_TestNodeSortedAutoNow])
def mpsortedautonow_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=[models.MP_TestNodeSmallStep])
def mpsmallstep_model(request):
return _prepare_db_test(request)
@pytest.fixture(scope='function', params=[models.MP_TestManyToManyWithUser])
def mpm2muser_model(request):
return _prepare_db_test(request)
class TestTreeBase(object):
def got(self, model):
if model in [models.NS_TestNode, models.NS_TestNode_Proxy]:
# this slows down nested sets tests quite a bit, but it has the
# advantage that we'll check the node edges are correct
d = {}
for tree_id, lft, rgt in model.objects.values_list('tree_id',
'lft',
'rgt'):
d.setdefault(tree_id, []).extend([lft, rgt])
for tree_id, got_edges in d.items():
assert len(got_edges) == max(got_edges)
good_edges = list(range(1, len(got_edges) + 1))
assert sorted(got_edges) == good_edges
return [(o.desc, o.get_depth(), o.get_children_count())
for o in model.get_tree()]
def _assert_get_annotated_list(self, model, expected, parent=None):
got = [
(obj[0].desc, obj[1]['open'], obj[1]['close'], obj[1]['level'])
for obj in model.get_annotated_list(parent)
]
assert expected == got
class TestEmptyTree(TestTreeBase):
def test_load_bulk_empty(self, model):
ids = model.load_bulk(BASE_DATA)
got_descs = [obj.desc
for obj in model.objects.filter(id__in=ids)]
expected_descs = [x[0] for x in UNCHANGED]
assert sorted(got_descs) == sorted(expected_descs)
assert self.got(model) == UNCHANGED
def test_dump_bulk_empty(self, model):
assert model.dump_bulk() == []
def test_add_root_empty(self, model):
model.add_root(desc='1')
expected = [('1', 1, 0)]
assert self.got(model) == expected
def test_get_root_nodes_empty(self, model):
got = model.get_root_nodes()
expected = []
assert [node.desc for node in got] == expected
def test_get_first_root_node_empty(self, model):
got = model.get_first_root_node()
assert got is None
def test_get_last_root_node_empty(self, model):
got = model.get_last_root_node()
assert got is None
def test_get_tree(self, model):
got = list(model.get_tree())
assert got == []
def test_get_annotated_list(self, model):
expected = []
self._assert_get_annotated_list(model, expected)
class TestNonEmptyTree(TestTreeBase):
@classmethod
def setup_class(cls):
for model in models.BASE_MODELS:
model.load_bulk(BASE_DATA)
@classmethod
def teardown_class(cls):
models.empty_models_tables(models.BASE_MODELS)
class TestClassMethods(TestNonEmptyTree):
def test_load_bulk_existing(self, model):
# inserting on an existing node
node = model.objects.get(desc='231')
ids = model.load_bulk(BASE_DATA, node)
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 4),
('1', 4, 0),
('2', 4, 4),
('21', 5, 0),
('22', 5, 0),
('23', 5, 1),
('231', 6, 0),
('24', 5, 0),
('3', 4, 0),
('4', 4, 1),
('41', 5, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
expected_descs = ['1', '2', '21', '22', '23', '231', '24',
'3', '4', '41']
got_descs = [obj.desc for obj in model.objects.filter(id__in=ids)]
assert sorted(got_descs) == sorted(expected_descs)
assert self.got(model) == expected
def test_get_tree_all(self, model):
got = [(o.desc, o.get_depth(), o.get_children_count())
for o in model.get_tree()]
assert got == UNCHANGED
def test_dump_bulk_all(self, model):
assert model.dump_bulk(keep_ids=False) == BASE_DATA
def test_get_tree_node(self, model):
node = model.objects.get(desc='231')
model.load_bulk(BASE_DATA, node)
# the tree was modified by load_bulk, so we reload our node object
node = model.objects.get(pk=node.pk)
got = [(o.desc, o.get_depth(), o.get_children_count())
for o in model.get_tree(node)]
expected = [('231', 3, 4),
('1', 4, 0),
('2', 4, 4),
('21', 5, 0),
('22', 5, 0),
('23', 5, 1),
('231', 6, 0),
('24', 5, 0),
('3', 4, 0),
('4', 4, 1),
('41', 5, 0)]
assert got == expected
def test_get_tree_leaf(self, model):
node = model.objects.get(desc='1')
assert 0 == node.get_children_count()
got = [(o.desc, o.get_depth(), o.get_children_count())
for o in model.get_tree(node)]
expected = [('1', 1, 0)]
assert got == expected
def test_get_annotated_list_all(self, model):
expected = [('1', True, [], 0), ('2', False, [], 0),
('21', True, [], 1), ('22', False, [], 1),
('23', False, [], 1), ('231', True, [0], 2),
('24', False, [0], 1), ('3', False, [], 0),
('4', False, [], 0), ('41', True, [0, 1], 1)]
self._assert_get_annotated_list(model, expected)
def test_get_annotated_list_node(self, model):
node = model.objects.get(desc='2')
expected = [('2', True, [], 0), ('21', True, [], 1),
('22', False, [], 1), ('23', False, [], 1),
('231', True, [0], 2), ('24', False, [0, 1], 1)]
self._assert_get_annotated_list(model, expected, node)
def test_get_annotated_list_leaf(self, model):
node = model.objects.get(desc='1')
expected = [('1', True, [0], 0)]
self._assert_get_annotated_list(model, expected, node)
def test_dump_bulk_node(self, model):
node = model.objects.get(desc='231')
model.load_bulk(BASE_DATA, node)
# the tree was modified by load_bulk, so we reload our node object
node = model.objects.get(pk=node.pk)
got = model.dump_bulk(node, False)
expected = [{'data': {'desc': '231'}, 'children': BASE_DATA}]
assert got == expected
def test_load_and_dump_bulk_keeping_ids(self, model):
exp = model.dump_bulk(keep_ids=True)
model.objects.all().delete()
model.load_bulk(exp, None, True)
got = model.dump_bulk(keep_ids=True)
assert got == exp
# do we really have an unchaged tree after the dump/delete/load?
got = [(o.desc, o.get_depth(), o.get_children_count())
for o in model.get_tree()]
assert got == UNCHANGED
def test_load_and_dump_bulk_with_fk(self, related_model):
# https://bitbucket.org/tabo/django-treebeard/issue/48/
related_model.objects.all().delete()
related, created = models.RelatedModel.objects.get_or_create(
desc="Test %s" % related_model.__name__)
related_data = [
{'data': {'desc': '1', 'related': related.pk}},
{'data': {'desc': '2', 'related': related.pk}, 'children': [
{'data': {'desc': '21', 'related': related.pk}},
{'data': {'desc': '22', 'related': related.pk}},
{'data': {'desc': '23', 'related': related.pk}, 'children': [
{'data': {'desc': '231', 'related': related.pk}},
]},
{'data': {'desc': '24', 'related': related.pk}},
]},
{'data': {'desc': '3', 'related': related.pk}},
{'data': {'desc': '4', 'related': related.pk}, 'children': [
{'data': {'desc': '41', 'related': related.pk}},
]}]
related_model.load_bulk(related_data)
got = related_model.dump_bulk(keep_ids=False)
assert got == related_data
def test_get_root_nodes(self, model):
got = model.get_root_nodes()
expected = ['1', '2', '3', '4']
assert [node.desc for node in got] == expected
def test_get_first_root_node(self, model):
got = model.get_first_root_node()
assert got.desc == '1'
def test_get_last_root_node(self, model):
got = model.get_last_root_node()
assert got.desc == '4'
def test_add_root(self, model):
obj = model.add_root(desc='5')
assert obj.get_depth() == 1
assert model.get_last_root_node().desc == '5'
class TestSimpleNodeMethods(TestNonEmptyTree):
def test_is_root(self, model):
data = [
('2', True),
('1', True),
('4', True),
('21', False),
('24', False),
('22', False),
('231', False),
]
for desc, expected in data:
got = model.objects.get(desc=desc).is_root()
assert got == expected
def test_is_leaf(self, model):
data = [
('2', False),
('23', False),
('231', True),
]
for desc, expected in data:
got = model.objects.get(desc=desc).is_leaf()
assert got == expected
def test_get_root(self, model):
data = [
('2', '2'),
('1', '1'),
('4', '4'),
('21', '2'),
('24', '2'),
('22', '2'),
('231', '2'),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_root()
assert node.desc == expected
def test_get_parent(self, model):
data = [
('2', None),
('1', None),
('4', None),
('21', '2'),
('24', '2'),
('22', '2'),
('231', '23'),
]
data = dict(data)
objs = {}
for desc, expected in data.items():
node = model.objects.get(desc=desc)
parent = node.get_parent()
if expected:
assert parent.desc == expected
else:
assert parent is None
objs[desc] = node
# corrupt the objects' parent cache
node._parent_obj = 'CORRUPTED!!!'
for desc, expected in data.items():
node = objs[desc]
# asking get_parent to not use the parent cache (since we
# corrupted it in the previous loop)
parent = node.get_parent(True)
if expected:
assert parent.desc == expected
else:
assert parent is None
def test_get_children(self, model):
data = [
('2', ['21', '22', '23', '24']),
('23', ['231']),
('231', []),
]
for desc, expected in data:
children = model.objects.get(desc=desc).get_children()
assert [node.desc for node in children] == expected
def test_get_children_count(self, model):
data = [
('2', 4),
('23', 1),
('231', 0),
]
for desc, expected in data:
got = model.objects.get(desc=desc).get_children_count()
assert got == expected
def test_get_siblings(self, model):
data = [
('2', ['1', '2', '3', '4']),
('21', ['21', '22', '23', '24']),
('231', ['231']),
]
for desc, expected in data:
siblings = model.objects.get(desc=desc).get_siblings()
assert [node.desc for node in siblings] == expected
def test_get_first_sibling(self, model):
data = [
('2', '1'),
('1', '1'),
('4', '1'),
('21', '21'),
('24', '21'),
('22', '21'),
('231', '231'),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_first_sibling()
assert node.desc == expected
def test_get_prev_sibling(self, model):
data = [
('2', '1'),
('1', None),
('4', '3'),
('21', None),
('24', '23'),
('22', '21'),
('231', None),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_prev_sibling()
if expected is None:
assert node is None
else:
assert node.desc == expected
def test_get_next_sibling(self, model):
data = [
('2', '3'),
('1', '2'),
('4', None),
('21', '22'),
('24', None),
('22', '23'),
('231', None),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_next_sibling()
if expected is None:
assert node is None
else:
assert node.desc == expected
def test_get_last_sibling(self, model):
data = [
('2', '4'),
('1', '4'),
('4', '4'),
('21', '24'),
('24', '24'),
('22', '24'),
('231', '231'),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_last_sibling()
assert node.desc == expected
def test_get_first_child(self, model):
data = [
('2', '21'),
('21', None),
('23', '231'),
('231', None),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_first_child()
if expected is None:
assert node is None
else:
assert node.desc == expected
def test_get_last_child(self, model):
data = [
('2', '24'),
('21', None),
('23', '231'),
('231', None),
]
for desc, expected in data:
node = model.objects.get(desc=desc).get_last_child()
if expected is None:
assert node is None
else:
assert node.desc == expected
def test_get_ancestors(self, model):
data = [
('2', []),
('21', ['2']),
('231', ['2', '23']),
]
for desc, expected in data:
nodes = model.objects.get(desc=desc).get_ancestors()
assert [node.desc for node in nodes] == expected
def test_get_descendants(self, model):
data = [
('2', ['21', '22', '23', '231', '24']),
('23', ['231']),
('231', []),
('1', []),
('4', ['41']),
]
for desc, expected in data:
nodes = model.objects.get(desc=desc).get_descendants()
assert [node.desc for node in nodes] == expected
def test_get_descendant_count(self, model):
data = [
('2', 5),
('23', 1),
('231', 0),
('1', 0),
('4', 1),
]
for desc, expected in data:
got = model.objects.get(desc=desc).get_descendant_count()
assert got == expected
def test_is_sibling_of(self, model):
data = [
('2', '2', True),
('2', '1', True),
('21', '2', False),
('231', '2', False),
('22', '23', True),
('231', '23', False),
('231', '231', True),
]
for desc1, desc2, expected in data:
node1 = model.objects.get(desc=desc1)
node2 = model.objects.get(desc=desc2)
assert node1.is_sibling_of(node2) == expected
def test_is_child_of(self, model):
data = [
('2', '2', False),
('2', '1', False),
('21', '2', True),
('231', '2', False),
('231', '23', True),
('231', '231', False),
]
for desc1, desc2, expected in data:
node1 = model.objects.get(desc=desc1)
node2 = model.objects.get(desc=desc2)
assert node1.is_child_of(node2) == expected
def test_is_descendant_of(self, model):
data = [
('2', '2', False),
('2', '1', False),
('21', '2', True),
('231', '2', True),
('231', '23', True),
('231', '231', False),
]
for desc1, desc2, expected in data:
node1 = model.objects.get(desc=desc1)
node2 = model.objects.get(desc=desc2)
assert node1.is_descendant_of(node2) == expected
class TestAddChild(TestNonEmptyTree):
def test_add_child_to_leaf(self, model):
model.objects.get(desc='231').add_child(desc='2311')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 1),
('2311', 4, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_child_to_node(self, model):
model.objects.get(desc='2').add_child(desc='25')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('25', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
class TestAddSibling(TestNonEmptyTree):
def test_add_sibling_invalid_pos(self, model):
with pytest.raises(InvalidPosition):
model.objects.get(desc='231').add_sibling('invalid_pos')
def test_add_sibling_missing_nodeorderby(self, model):
node_wchildren = model.objects.get(desc='2')
with pytest.raises(MissingNodeOrderBy):
node_wchildren.add_sibling('sorted-sibling', desc='aaa')
def test_add_sibling_last_root(self, model):
node_wchildren = model.objects.get(desc='2')
obj = node_wchildren.add_sibling('last-sibling', desc='5')
assert obj.get_depth() == 1
assert node_wchildren.get_last_sibling().desc == '5'
def test_add_sibling_last(self, model):
node = model.objects.get(desc='231')
obj = node.add_sibling('last-sibling', desc='232')
assert obj.get_depth() == 3
assert node.get_last_sibling().desc == '232'
def test_add_sibling_first_root(self, model):
node_wchildren = model.objects.get(desc='2')
obj = node_wchildren.add_sibling('first-sibling', desc='new')
assert obj.get_depth() == 1
expected = [('new', 1, 0),
('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_first(self, model):
node_wchildren = model.objects.get(desc='23')
obj = node_wchildren.add_sibling('first-sibling', desc='new')
assert obj.get_depth() == 2
expected = [('1', 1, 0),
('2', 1, 5),
('new', 2, 0),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_left_root(self, model):
node_wchildren = model.objects.get(desc='2')
obj = node_wchildren.add_sibling('left', desc='new')
assert obj.get_depth() == 1
expected = [('1', 1, 0),
('new', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_left(self, model):
node_wchildren = model.objects.get(desc='23')
obj = node_wchildren.add_sibling('left', desc='new')
assert obj.get_depth() == 2
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('new', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_left_noleft_root(self, model):
node = model.objects.get(desc='1')
obj = node.add_sibling('left', desc='new')
assert obj.get_depth() == 1
expected = [('new', 1, 0),
('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_left_noleft(self, model):
node = model.objects.get(desc='231')
obj = node.add_sibling('left', desc='new')
assert obj.get_depth() == 3
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 2),
('new', 3, 0),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_right_root(self, model):
node_wchildren = model.objects.get(desc='2')
obj = node_wchildren.add_sibling('right', desc='new')
assert obj.get_depth() == 1
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('new', 1, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_right(self, model):
node_wchildren = model.objects.get(desc='23')
obj = node_wchildren.add_sibling('right', desc='new')
assert obj.get_depth() == 2
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('new', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_add_sibling_right_noright_root(self, model):
node = model.objects.get(desc='4')
obj = node.add_sibling('right', desc='new')
assert obj.get_depth() == 1
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0),
('new', 1, 0)]
assert self.got(model) == expected
def test_add_sibling_right_noright(self, model):
node = model.objects.get(desc='231')
obj = node.add_sibling('right', desc='new')
assert obj.get_depth() == 3
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 2),
('231', 3, 0),
('new', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
class TestDelete(TestNonEmptyTree):
@classmethod
def setup_class(cls):
TestNonEmptyTree.setup_class()
for model, dep_model in zip(models.BASE_MODELS, models.DEP_MODELS):
for node in model.objects.all():
dep_model(node=node).save()
@classmethod
def teardown_class(cls):
models.empty_models_tables(models.DEP_MODELS + models.BASE_MODELS)
def test_delete_leaf(self, model):
model.objects.get(desc='231').delete()
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_delete_node(self, model):
model.objects.get(desc='23').delete()
expected = [('1', 1, 0),
('2', 1, 3),
('21', 2, 0),
('22', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_delete_root(self, model):
model.objects.get(desc='2').delete()
expected = [('1', 1, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_delete_filter_root_nodes(self, model):
model.objects.filter(desc__in=('2', '3')).delete()
expected = [('1', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_delete_filter_children(self, model):
model.objects.filter(desc__in=('2', '23', '231')).delete()
expected = [('1', 1, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_delete_nonexistant_nodes(self, model):
model.objects.filter(desc__in=('ZZZ', 'XXX')).delete()
assert self.got(model) == UNCHANGED
def test_delete_same_node_twice(self, model):
model.objects.filter(desc__in=('2', '2')).delete()
expected = [('1', 1, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_delete_all_root_nodes(self, model):
model.get_root_nodes().delete()
count = model.objects.count()
assert count == 0
def test_delete_all_nodes(self, model):
model.objects.all().delete()
count = model.objects.count()
assert count == 0
class TestMoveErrors(TestNonEmptyTree):
def test_move_invalid_pos(self, model):
node = model.objects.get(desc='231')
with pytest.raises(InvalidPosition):
node.move(node, 'invalid_pos')
def test_move_to_descendant(self, model):
node = model.objects.get(desc='2')
target = model.objects.get(desc='231')
with pytest.raises(InvalidMoveToDescendant):
node.move(target, 'first-sibling')
def test_move_missing_nodeorderby(self, model):
node = model.objects.get(desc='231')
with pytest.raises(MissingNodeOrderBy):
node.move(node, 'sorted-child')
with pytest.raises(MissingNodeOrderBy):
node.move(node, 'sorted-sibling')
class TestMoveSortedErrors(TestTreeBase):
def test_nonsorted_move_in_sorted(self, sorted_model):
node = sorted_model.add_root(val1=3, val2=3, desc='zxy')
with pytest.raises(InvalidPosition):
node.move(node, 'left')
class TestMoveLeafRoot(TestNonEmptyTree):
def test_move_leaf_last_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='231').move(target, 'last-sibling')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0),
('231', 1, 0)]
assert self.got(model) == expected
def test_move_leaf_first_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='231').move(target, 'first-sibling')
expected = [('231', 1, 0),
('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_left_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='231').move(target, 'left')
expected = [('1', 1, 0),
('231', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_right_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='231').move(target, 'right')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('231', 1, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_last_child_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='231').move(target, 'last-child')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('231', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_first_child_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='231').move(target, 'first-child')
expected = [('1', 1, 0),
('2', 1, 5),
('231', 2, 0),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
class TestMoveLeaf(TestNonEmptyTree):
def test_move_leaf_last_sibling(self, model):
target = model.objects.get(desc='22')
model.objects.get(desc='231').move(target, 'last-sibling')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('231', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_first_sibling(self, model):
target = model.objects.get(desc='22')
model.objects.get(desc='231').move(target, 'first-sibling')
expected = [('1', 1, 0),
('2', 1, 5),
('231', 2, 0),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_left_sibling(self, model):
target = model.objects.get(desc='22')
model.objects.get(desc='231').move(target, 'left')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('231', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_right_sibling(self, model):
target = model.objects.get(desc='22')
model.objects.get(desc='231').move(target, 'right')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('231', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_left_sibling_itself(self, model):
target = model.objects.get(desc='231')
model.objects.get(desc='231').move(target, 'left')
assert self.got(model) == UNCHANGED
def test_move_leaf_last_child(self, model):
target = model.objects.get(desc='22')
model.objects.get(desc='231').move(target, 'last-child')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 1),
('231', 3, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_leaf_first_child(self, model):
target = model.objects.get(desc='22')
model.objects.get(desc='231').move(target, 'first-child')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 1),
('231', 3, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
class TestMoveBranchRoot(TestNonEmptyTree):
def test_move_branch_first_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='4').move(target, 'first-sibling')
expected = [('4', 1, 1),
('41', 2, 0),
('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_last_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='4').move(target, 'last-sibling')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_branch_left_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='4').move(target, 'left')
expected = [('1', 1, 0),
('4', 1, 1),
('41', 2, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_right_sibling_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='4').move(target, 'right')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('4', 1, 1),
('41', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_left_noleft_sibling_root(self, model):
target = model.objects.get(desc='2').get_first_sibling()
model.objects.get(desc='4').move(target, 'left')
expected = [('4', 1, 1),
('41', 2, 0),
('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_right_noright_sibling_root(self, model):
target = model.objects.get(desc='2').get_last_sibling()
model.objects.get(desc='4').move(target, 'right')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_branch_first_child_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='4').move(target, 'first-child')
expected = [('1', 1, 0),
('2', 1, 5),
('4', 2, 1),
('41', 3, 0),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_last_child_root(self, model):
target = model.objects.get(desc='2')
model.objects.get(desc='4').move(target, 'last-child')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('4', 2, 1),
('41', 3, 0),
('3', 1, 0)]
assert self.got(model) == expected
class TestMoveBranch(TestNonEmptyTree):
def test_move_branch_first_sibling(self, model):
target = model.objects.get(desc='23')
model.objects.get(desc='4').move(target, 'first-sibling')
expected = [('1', 1, 0),
('2', 1, 5),
('4', 2, 1),
('41', 3, 0),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_last_sibling(self, model):
target = model.objects.get(desc='23')
model.objects.get(desc='4').move(target, 'last-sibling')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('4', 2, 1),
('41', 3, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_left_sibling(self, model):
target = model.objects.get(desc='23')
model.objects.get(desc='4').move(target, 'left')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('4', 2, 1),
('41', 3, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_right_sibling(self, model):
target = model.objects.get(desc='23')
model.objects.get(desc='4').move(target, 'right')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('4', 2, 1),
('41', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_left_noleft_sibling(self, model):
target = model.objects.get(desc='23').get_first_sibling()
model.objects.get(desc='4').move(target, 'left')
expected = [('1', 1, 0),
('2', 1, 5),
('4', 2, 1),
('41', 3, 0),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_right_noright_sibling(self, model):
target = model.objects.get(desc='23').get_last_sibling()
model.objects.get(desc='4').move(target, 'right')
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 1),
('231', 3, 0),
('24', 2, 0),
('4', 2, 1),
('41', 3, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_left_itself_sibling(self, model):
target = model.objects.get(desc='4')
model.objects.get(desc='4').move(target, 'left')
assert self.got(model) == UNCHANGED
def test_move_branch_first_child(self, model):
target = model.objects.get(desc='23')
model.objects.get(desc='4').move(target, 'first-child')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 2),
('4', 3, 1),
('41', 4, 0),
('231', 3, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
def test_move_branch_last_child(self, model):
target = model.objects.get(desc='23')
model.objects.get(desc='4').move(target, 'last-child')
expected = [('1', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 2),
('231', 3, 0),
('4', 3, 1),
('41', 4, 0),
('24', 2, 0),
('3', 1, 0)]
assert self.got(model) == expected
class TestTreeSorted(TestTreeBase):
def got(self, sorted_model):
return [(o.val1, o.val2, o.desc, o.get_depth(), o.get_children_count())
for o in sorted_model.get_tree()]
def test_add_root_sorted(self, sorted_model):
sorted_model.add_root(val1=3, val2=3, desc='zxy')
sorted_model.add_root(val1=1, val2=4, desc='bcd')
sorted_model.add_root(val1=2, val2=5, desc='zxy')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=4, val2=1, desc='fgh')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=2, val2=2, desc='qwe')
sorted_model.add_root(val1=3, val2=2, desc='vcx')
expected = [(1, 4, 'bcd', 1, 0),
(2, 2, 'qwe', 1, 0),
(2, 5, 'zxy', 1, 0),
(3, 2, 'vcx', 1, 0),
(3, 3, 'abc', 1, 0),
(3, 3, 'abc', 1, 0),
(3, 3, 'zxy', 1, 0),
(4, 1, 'fgh', 1, 0)]
assert self.got(sorted_model) == expected
def test_add_child_root_sorted(self, sorted_model):
root = sorted_model.add_root(val1=0, val2=0, desc='aaa')
root.add_child(val1=3, val2=3, desc='zxy')
root.add_child(val1=1, val2=4, desc='bcd')
root.add_child(val1=2, val2=5, desc='zxy')
root.add_child(val1=3, val2=3, desc='abc')
root.add_child(val1=4, val2=1, desc='fgh')
root.add_child(val1=3, val2=3, desc='abc')
root.add_child(val1=2, val2=2, desc='qwe')
root.add_child(val1=3, val2=2, desc='vcx')
expected = [(0, 0, 'aaa', 1, 8),
(1, 4, 'bcd', 2, 0),
(2, 2, 'qwe', 2, 0),
(2, 5, 'zxy', 2, 0),
(3, 2, 'vcx', 2, 0),
(3, 3, 'abc', 2, 0),
(3, 3, 'abc', 2, 0),
(3, 3, 'zxy', 2, 0),
(4, 1, 'fgh', 2, 0)]
assert self.got(sorted_model) == expected
def test_add_child_nonroot_sorted(self, sorted_model):
get_node = lambda node_id: sorted_model.objects.get(pk=node_id)
root_id = sorted_model.add_root(val1=0, val2=0, desc='a').pk
node_id = get_node(root_id).add_child(val1=0, val2=0, desc='ac').pk
get_node(root_id).add_child(val1=0, val2=0, desc='aa')
get_node(root_id).add_child(val1=0, val2=0, desc='av')
get_node(node_id).add_child(val1=0, val2=0, desc='aca')
get_node(node_id).add_child(val1=0, val2=0, desc='acc')
get_node(node_id).add_child(val1=0, val2=0, desc='acb')
expected = [(0, 0, 'a', 1, 3),
(0, 0, 'aa', 2, 0),
(0, 0, 'ac', 2, 3),
(0, 0, 'aca', 3, 0),
(0, 0, 'acb', 3, 0),
(0, 0, 'acc', 3, 0),
(0, 0, 'av', 2, 0)]
assert self.got(sorted_model) == expected
def test_move_sorted(self, sorted_model):
sorted_model.add_root(val1=3, val2=3, desc='zxy')
sorted_model.add_root(val1=1, val2=4, desc='bcd')
sorted_model.add_root(val1=2, val2=5, desc='zxy')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=4, val2=1, desc='fgh')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=2, val2=2, desc='qwe')
sorted_model.add_root(val1=3, val2=2, desc='vcx')
root_nodes = sorted_model.get_root_nodes()
target = root_nodes[0]
for node in root_nodes[1:]:
# because raw queries don't update django objects
node = sorted_model.objects.get(pk=node.pk)
target = sorted_model.objects.get(pk=target.pk)
node.move(target, 'sorted-child')
expected = [(1, 4, 'bcd', 1, 7),
(2, 2, 'qwe', 2, 0),
(2, 5, 'zxy', 2, 0),
(3, 2, 'vcx', 2, 0),
(3, 3, 'abc', 2, 0),
(3, 3, 'abc', 2, 0),
(3, 3, 'zxy', 2, 0),
(4, 1, 'fgh', 2, 0)]
assert self.got(sorted_model) == expected
def test_move_sortedsibling(self, sorted_model):
# https://bitbucket.org/tabo/django-treebeard/issue/27
sorted_model.add_root(val1=3, val2=3, desc='zxy')
sorted_model.add_root(val1=1, val2=4, desc='bcd')
sorted_model.add_root(val1=2, val2=5, desc='zxy')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=4, val2=1, desc='fgh')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=2, val2=2, desc='qwe')
sorted_model.add_root(val1=3, val2=2, desc='vcx')
root_nodes = sorted_model.get_root_nodes()
target = root_nodes[0]
for node in root_nodes[1:]:
# because raw queries don't update django objects
node = sorted_model.objects.get(pk=node.pk)
target = sorted_model.objects.get(pk=target.pk)
node.val1 -= 2
node.save()
node.move(target, 'sorted-sibling')
expected = [(0, 2, 'qwe', 1, 0),
(0, 5, 'zxy', 1, 0),
(1, 2, 'vcx', 1, 0),
(1, 3, 'abc', 1, 0),
(1, 3, 'abc', 1, 0),
(1, 3, 'zxy', 1, 0),
(1, 4, 'bcd', 1, 0),
(2, 1, 'fgh', 1, 0)]
assert self.got(sorted_model) == expected
class TestMP_TreeAlphabet(TestTreeBase):
def test_alphabet(self, mpalphabet_model):
if not os.getenv('TREEBEARD_TEST_ALPHABET', False):
# run this test only if the enviroment variable is set
return
basealpha = numconv.BASE85
got_err = False
last_good = None
for alphabetlen in range(35, len(basealpha) + 1):
alphabet = basealpha[0:alphabetlen]
expected = [alphabet[0] + char for char in alphabet[1:]]
expected.extend([alphabet[1] + char for char in alphabet])
expected.append(alphabet[2] + alphabet[0])
# remove all nodes
mpalphabet_model.objects.all().delete()
# change the model's alphabet
mpalphabet_model.alphabet = alphabet
# insert root nodes
for pos in range(len(alphabet) * 2):
try:
mpalphabet_model.add_root(numval=pos)
except:
got_err = True
break
if got_err:
break
got = [obj.path
for obj in mpalphabet_model.objects.all()]
if got != expected:
got_err = True
last_good = alphabet
sys.stdout.write(
'\nThe best BASE85 based alphabet for your setup is: %s\n' % (
last_good, )
)
sys.stdout.flush()
class TestHelpers(TestTreeBase):
@classmethod
def setup_class(cls):
for model in models.BASE_MODELS:
model.load_bulk(BASE_DATA)
for node in model.get_root_nodes():
model.load_bulk(BASE_DATA, node)
model.add_root(desc='5')
@classmethod
def teardown_class(cls):
models.empty_models_tables(models.BASE_MODELS)
def test_descendants_group_count_root(self, model):
expected = [(o.desc, o.get_descendant_count())
for o in model.get_root_nodes()]
got = [(o.desc, o.descendants_count)
for o in model.get_descendants_group_count()]
assert got == expected
def test_descendants_group_count_node(self, model):
parent = model.get_root_nodes().get(desc='2')
expected = [(o.desc, o.get_descendant_count())
for o in parent.get_children()]
got = [(o.desc, o.descendants_count)
for o in model.get_descendants_group_count(parent)]
assert got == expected
class TestMP_TreeSortedAutoNow(TestTreeBase):
"""
The sorting mechanism used by treebeard when adding a node can fail if the
ordering is using an "auto_now" field
"""
def test_sorted_by_autonow_workaround(self, mpsortedautonow_model):
# workaround
for i in range(1, 5):
mpsortedautonow_model.add_root(desc='node%d' % (i, ),
created=datetime.datetime.now())
def test_sorted_by_autonow_FAIL(self, mpsortedautonow_model):
"""
This test asserts that we have a problem.
fix this, somehow
"""
mpsortedautonow_model.add_root(desc='node1')
with pytest.raises(ValueError):
mpsortedautonow_model.add_root(desc='node2')
class TestMP_TreeStepOverflow(TestTreeBase):
def test_add_root(self, mpsmallstep_model):
method = mpsmallstep_model.add_root
for i in range(1, 10):
method()
with pytest.raises(PathOverflow):
method()
def test_add_child(self, mpsmallstep_model):
root = mpsmallstep_model.add_root()
method = root.add_child
for i in range(1, 10):
method()
with pytest.raises(PathOverflow):
method()
def test_add_sibling(self, mpsmallstep_model):
root = mpsmallstep_model.add_root()
for i in range(1, 10):
root.add_child()
positions = ('first-sibling', 'left', 'right', 'last-sibling')
for pos in positions:
with pytest.raises(PathOverflow):
root.get_last_child().add_sibling(pos)
def test_move(self, mpsmallstep_model):
root = mpsmallstep_model.add_root()
for i in range(1, 10):
root.add_child()
newroot = mpsmallstep_model.add_root()
targets = [(root, ['first-child', 'last-child']),
(root.get_first_child(), ['first-sibling',
'left',
'right',
'last-sibling'])]
for target, positions in targets:
for pos in positions:
with pytest.raises(PathOverflow):
newroot.move(target, pos)
class TestMP_TreeShortPath(TestTreeBase):
"""Test a tree with a very small path field (max_length=4) and a
steplen of 1
"""
def test_short_path(self, mpshortnotsorted_model):
obj = mpshortnotsorted_model.add_root()
obj = obj.add_child().add_child().add_child()
with pytest.raises(PathOverflow):
obj.add_child()
class TestMP_TreeFindProblems(TestTreeBase):
def test_find_problems(self, mpalphabet_model):
mpalphabet_model.alphabet = '01234'
mpalphabet_model(path='01', depth=1, numchild=0, numval=0).save()
mpalphabet_model(path='1', depth=1, numchild=0, numval=0).save()
mpalphabet_model(path='111', depth=1, numchild=0, numval=0).save()
mpalphabet_model(path='abcd', depth=1, numchild=0, numval=0).save()
mpalphabet_model(path='qa#$%!', depth=1, numchild=0, numval=0).save()
mpalphabet_model(path='0201', depth=2, numchild=0, numval=0).save()
mpalphabet_model(path='020201', depth=3, numchild=0, numval=0).save()
mpalphabet_model(path='03', depth=1, numchild=2, numval=0).save()
mpalphabet_model(path='0301', depth=2, numchild=0, numval=0).save()
mpalphabet_model(path='030102', depth=3, numchild=10, numval=0).save()
mpalphabet_model(path='04', depth=10, numchild=1, numval=0).save()
mpalphabet_model(path='0401', depth=20, numchild=0, numval=0).save()
def got(ids):
return [o.path for o in
mpalphabet_model.objects.filter(id__in=ids)]
(evil_chars, bad_steplen, orphans, wrong_depth, wrong_numchild) = (
mpalphabet_model.find_problems())
assert ['abcd', 'qa#$%!'] == got(evil_chars)
assert ['1', '111'] == got(bad_steplen)
assert ['0201', '020201'] == got(orphans)
assert ['03', '0301', '030102'] == got(wrong_numchild)
assert ['04', '0401'] == got(wrong_depth)
class TestMP_TreeFix(TestTreeBase):
expected_no_holes = {
models.MP_TestNodeShortPath: [
('1', 'b', 1, 2),
('11', 'u', 2, 1),
('111', 'i', 3, 1),
('1111', 'e', 4, 0),
('12', 'o', 2, 0),
('2', 'd', 1, 0),
('3', 'g', 1, 0),
('4', 'a', 1, 4),
('41', 'a', 2, 0),
('42', 'a', 2, 0),
('43', 'u', 2, 1),
('431', 'i', 3, 1),
('4311', 'e', 4, 0),
('44', 'o', 2, 0)],
models.MP_TestSortedNodeShortPath: [
('1', 'a', 1, 4),
('11', 'a', 2, 0),
('12', 'a', 2, 0),
('13', 'o', 2, 0),
('14', 'u', 2, 1),
('141', 'i', 3, 1),
('1411', 'e', 4, 0),
('2', 'b', 1, 2),
('21', 'o', 2, 0),
('22', 'u', 2, 1),
('221', 'i', 3, 1),
('2211', 'e', 4, 0),
('3', 'd', 1, 0),
('4', 'g', 1, 0)]}
expected_with_holes = {
models.MP_TestNodeShortPath: [
('1', 'b', 1, 2),
('13', 'u', 2, 1),
('134', 'i', 3, 1),
('1343', 'e', 4, 0),
('14', 'o', 2, 0),
('2', 'd', 1, 0),
('3', 'g', 1, 0),
('4', 'a', 1, 4),
('41', 'a', 2, 0),
('42', 'a', 2, 0),
('43', 'u', 2, 1),
('434', 'i', 3, 1),
('4343', 'e', 4, 0),
('44', 'o', 2, 0)],
models.MP_TestSortedNodeShortPath: [
('1', 'b', 1, 2),
('13', 'u', 2, 1),
('134', 'i', 3, 1),
('1343', 'e', 4, 0),
('14', 'o', 2, 0),
('2', 'd', 1, 0),
('3', 'g', 1, 0),
('4', 'a', 1, 4),
('41', 'a', 2, 0),
('42', 'a', 2, 0),
('43', 'u', 2, 1),
('434', 'i', 3, 1),
('4343', 'e', 4, 0),
('44', 'o', 2, 0)]}
def got(self, model):
return [(o.path, o.desc, o.get_depth(), o.get_children_count())
for o in model.get_tree()]
def add_broken_test_data(self, model):
model(path='4', depth=2, numchild=2, desc='a').save()
model(path='13', depth=1000, numchild=0, desc='u').save()
model(path='14', depth=4, numchild=500, desc='o').save()
model(path='134', depth=321, numchild=543, desc='i').save()
model(path='1343', depth=321, numchild=543, desc='e').save()
model(path='42', depth=1, numchild=1, desc='a').save()
model(path='43', depth=1000, numchild=0, desc='u').save()
model(path='44', depth=4, numchild=500, desc='o').save()
model(path='434', depth=321, numchild=543, desc='i').save()
model(path='4343', depth=321, numchild=543, desc='e').save()
model(path='41', depth=1, numchild=1, desc='a').save()
model(path='3', depth=221, numchild=322, desc='g').save()
model(path='1', depth=10, numchild=3, desc='b').save()
model(path='2', depth=10, numchild=3, desc='d').save()
def test_fix_tree_non_destructive(self, mpshort_model):
self.add_broken_test_data(mpshort_model)
mpshort_model.fix_tree(destructive=False)
got = self.got(mpshort_model)
expected = self.expected_with_holes[mpshort_model]
assert got == expected
mpshort_model.find_problems()
def test_fix_tree_destructive(self, mpshort_model):
self.add_broken_test_data(mpshort_model)
mpshort_model.fix_tree(destructive=True)
got = self.got(mpshort_model)
expected = self.expected_no_holes[mpshort_model]
assert got == expected
mpshort_model.find_problems()
class TestIssues(TestTreeBase):
# test for http://code.google.com/p/django-treebeard/issues/detail?id=14
def test_many_to_many_django_user_anonymous(self, mpm2muser_model):
# Using AnonymousUser() in the querysets will expose non-treebeard
# related problems in Django 1.0
#
# Postgres:
# ProgrammingError: can't adapt
# SQLite:
# InterfaceError: Error binding parameter 4 - probably unsupported
# type.
# MySQL compared a string to an integer field:
# `treebeard_mp_testissue14_users`.`user_id` = 'AnonymousUser'
#
# Using a None field instead works (will be translated to IS NULL).
#
# anonuserobj = AnonymousUser()
anonuserobj = None
def qs_check(qs, expected):
assert [o.name for o in qs] == expected
def qs_check_first_or_user(expected, root, user):
qs_check(
root.get_children().filter(Q(name="first") | Q(users=user)),
expected)
user = User.objects.create_user('test_user', 'test@example.com',
'testpasswd')
user.save()
root = mpm2muser_model.add_root(name="the root node")
root.add_child(name="first")
second = root.add_child(name="second")
qs_check(root.get_children(), ['first', 'second'])
qs_check(root.get_children().filter(Q(name="first")), ['first'])
qs_check(root.get_children().filter(Q(users=user)), [])
qs_check_first_or_user(['first'], root, user)
qs_check_first_or_user(['first', 'second'], root, anonuserobj)
user = User.objects.get(username="test_user")
second.users.add(user)
qs_check_first_or_user(['first', 'second'], root, user)
qs_check_first_or_user(['first'], root, anonuserobj)
class TestMoveNodeForm(TestNonEmptyTree):
def _get_nodes_list(self, nodes):
return [(pk, '%sNode %d' % (' ' * 4 * (depth - 1), pk))
for pk, depth in nodes]
def _assert_nodes_in_choices(self, form, nodes):
choices = form.fields['_ref_node_id'].choices
assert 0 == choices.pop(0)[0]
assert nodes == [(choice[0], choice[1]) for choice in choices]
def _move_node_helper(self, node, safe_parent_nodes):
form_class = movenodeform_factory(type(node))
form = form_class(instance=node)
assert ['desc', '_position', '_ref_node_id'] == list(
form.base_fields.keys())
got = [choice[0] for choice in form.fields['_position'].choices]
assert ['first-child', 'left', 'right'] == got
nodes = self._get_nodes_list(safe_parent_nodes)
self._assert_nodes_in_choices(form, nodes)
def _get_node_ids_and_depths(self, nodes):
return [(node.id, node.get_depth()) for node in nodes]
def test_form_root_node(self, model):
nodes = list(model.get_tree())
node = nodes.pop(0)
safe_parent_nodes = self._get_node_ids_and_depths(nodes)
self._move_node_helper(node, safe_parent_nodes)
def test_form_leaf_node(self, model):
nodes = list(model.get_tree())
node = nodes.pop()
safe_parent_nodes = self._get_node_ids_and_depths(nodes)
self._move_node_helper(node, safe_parent_nodes)
def test_form_admin(self, model):
request = None
nodes = list(model.get_tree())
safe_parent_nodes = self._get_node_ids_and_depths(nodes)
for node in model.objects.all():
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
ma = admin_class(model, site)
got = list(ma.get_form(request).base_fields.keys())
desc_pos_refnodeid = ['desc', '_position', '_ref_node_id']
assert desc_pos_refnodeid == got
got = ma.get_fieldsets(request)
expected = [(None, {'fields': desc_pos_refnodeid})]
assert got == expected
got = ma.get_fieldsets(request, node)
assert got == expected
form = ma.get_form(request)()
nodes = self._get_nodes_list(safe_parent_nodes)
self._assert_nodes_in_choices(form, nodes)
class TestModelAdmin(TestNonEmptyTree):
def test_default_fields(self, model):
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
ma = admin_class(model, site)
assert list(ma.get_form(None).base_fields.keys()) == [
'desc', '_position', '_ref_node_id']
class TestSortedForm(TestTreeSorted):
def test_sorted_form(self, sorted_model):
sorted_model.add_root(val1=3, val2=3, desc='zxy')
sorted_model.add_root(val1=1, val2=4, desc='bcd')
sorted_model.add_root(val1=2, val2=5, desc='zxy')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=4, val2=1, desc='fgh')
sorted_model.add_root(val1=3, val2=3, desc='abc')
sorted_model.add_root(val1=2, val2=2, desc='qwe')
sorted_model.add_root(val1=3, val2=2, desc='vcx')
form_class = movenodeform_factory(sorted_model)
form = form_class()
assert list(form.fields.keys()) == ['val1', 'val2', 'desc',
'_position', '_ref_node_id']
form = form_class(instance=sorted_model.objects.get(desc='bcd'))
assert list(form.fields.keys()) == ['val1', 'val2', 'desc',
'_position', '_ref_node_id']
assert 'id__position' in str(form)
assert 'id__ref_node_id' in str(form)
class TestForm(TestNonEmptyTree):
def test_form(self, model):
form_class = movenodeform_factory(model)
form = form_class()
assert list(form.fields.keys()) == ['desc', '_position',
'_ref_node_id']
form = form_class(instance=model.objects.get(desc='1'))
assert list(form.fields.keys()) == ['desc', '_position',
'_ref_node_id']
assert 'id__position' in str(form)
assert 'id__ref_node_id' in str(form)
def test_get_position_ref_node(self, model):
form_class = movenodeform_factory(model)
instance_parent = model.objects.get(desc='1')
form = form_class(instance=instance_parent)
assert form._get_position_ref_node(instance_parent) == {
'_position': 'first-child',
'_ref_node_id': ''
}
instance_child = model.objects.get(desc='21')
form = form_class(instance=instance_child)
assert form._get_position_ref_node(instance_child) == {
'_position': 'first-child',
'_ref_node_id': model.objects.get(desc='2').pk
}
instance_grandchild = model.objects.get(desc='22')
form = form_class(instance=instance_grandchild)
assert form._get_position_ref_node(instance_grandchild) == {
'_position': 'right',
'_ref_node_id': model.objects.get(desc='21').pk
}
instance_grandchild = model.objects.get(desc='231')
form = form_class(instance=instance_grandchild)
assert form._get_position_ref_node(instance_grandchild) == {
'_position': 'first-child',
'_ref_node_id': model.objects.get(desc='23').pk
}
def test_clean_cleaned_data(self, model):
instance_parent = model.objects.get(desc='1')
_position = 'first-child'
_ref_node_id = ''
form_class = movenodeform_factory(model)
form = form_class(
instance=instance_parent,
data={
'_position': _position,
'_ref_node_id': _ref_node_id,
'desc': instance_parent.desc
}
)
assert form.is_valid()
assert form._clean_cleaned_data() == (_position, _ref_node_id)
def test_save_edit(self, model):
instance_parent = model.objects.get(desc='1')
original_count = len(model.objects.all())
form_class = movenodeform_factory(model)
form = form_class(
instance=instance_parent,
data={
'_position': 'first-child',
'_ref_node_id': model.objects.get(desc='2').pk,
'desc': instance_parent.desc
}
)
assert form.is_valid()
saved_instance = form.save()
assert original_count == model.objects.all().count()
assert saved_instance.get_children_count() == 0
assert saved_instance.get_depth() == 2
assert not saved_instance.is_root()
assert saved_instance.is_leaf()
# Return to original state
form_class = movenodeform_factory(model)
form = form_class(
instance=saved_instance,
data={
'_position': 'first-child',
'_ref_node_id': '',
'desc': saved_instance.desc
}
)
assert form.is_valid()
restored_instance = form.save()
assert original_count == model.objects.all().count()
assert restored_instance.get_children_count() == 0
assert restored_instance.get_depth() == 1
assert restored_instance.is_root()
assert restored_instance.is_leaf()
def test_save_new(self, model):
original_count = model.objects.all().count()
assert original_count == 10
_position = 'first-child'
form_class = movenodeform_factory(model)
form = form_class(
data={'_position': _position, 'desc': 'New Form Test'})
assert form.is_valid()
assert form.save() is not None
assert original_count < model.objects.all().count()
class TestAdminTreeTemplateTags(TestCase):
def test_treebeard_css(self):
template = Template("{% load admin_tree %}{% treebeard_css %}")
context = Context()
rendered = template.render(context)
expected = ('<link rel="stylesheet" type="text/css" '
'href="/treebeard/treebeard-admin.css"/>')
assert expected == rendered
def test_treebeard_js(self):
template = Template("{% load admin_tree %}{% treebeard_js %}")
context = Context()
rendered = template.render(context)
expected = ('<script type="text/javascript" src="jsi18n"></script>'
'<script type="text/javascript" '
'src="/treebeard/treebeard-admin.js"></script>'
'<script>(function($){'
'jQuery = $.noConflict(true);'
'})(django.jQuery);</script>'
'<script type="text/javascript" '
'src="/treebeard/jquery-ui-1.8.5.custom.min.js"></script>')
assert expected == rendered
def test_get_static_url(self):
with self.settings(STATIC_URL=None, MEDIA_URL=None):
assert get_static_url() == '/'
with self.settings(STATIC_URL='/static/', MEDIA_URL=None):
assert get_static_url() == '/static/'
with self.settings(STATIC_URL=None, MEDIA_URL='/media/'):
assert get_static_url() == '/media/'
with self.settings(STATIC_URL='/static/', MEDIA_URL='/media/'):
assert get_static_url() == '/static/'
class TestAdminTree(TestNonEmptyTree):
template = Template('{% load admin_tree %}{% spaceless %}'
'{% result_tree cl request %}{% endspaceless %}')
def test_result_tree(self, model_without_proxy):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
model = model_without_proxy
request = RequestFactory().get('/admin/tree/')
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
m = admin_class(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request})
table_output = self.template.render(context)
# We have the same amount of drag handlers as objects
drag_handler = '<td class="drag-handler"><span> </span></td>'
assert table_output.count(drag_handler) == model.objects.count()
# All nodes are in the result tree
for object in model.objects.all():
url = cl.url_for_result(object)
node = '<a href="%s">Node %i</a>' % (url, object.pk)
assert node in table_output
# Unfiltered
assert '<input type="hidden" id="has-filters" value="0"/>' in \
table_output
def test_unicode_result_tree(self, model_with_unicode):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
model = model_with_unicode
# Add a unicode description
model.add_root(desc='áéîøü')
request = RequestFactory().get('/admin/tree/')
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
m = admin_class(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request})
table_output = self.template.render(context)
# We have the same amount of drag handlers as objects
drag_handler = '<td class="drag-handler"><span> </span></td>'
assert table_output.count(drag_handler) == model.objects.count()
# All nodes are in the result tree
for object in model.objects.all():
url = cl.url_for_result(object)
node = '<a href="%s">%s</a>' % (url, object.desc)
assert node in table_output
# Unfiltered
assert '<input type="hidden" id="has-filters" value="0"/>' in \
table_output
def test_result_filtered(self, model_without_proxy):
""" Test template changes with filters or pagination.
"""
model = model_without_proxy
# Filtered GET
request = RequestFactory().get('/admin/tree/?desc=1')
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
m = admin_class(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request})
table_output = self.template.render(context)
# Filtered
assert '<input type="hidden" id="has-filters" value="1"/>' in \
table_output
# Not Filtered GET, it should ignore pagination
request = RequestFactory().get('/admin/tree/?p=1')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request})
table_output = self.template.render(context)
# Not Filtered
assert '<input type="hidden" id="has-filters" value="0"/>' in \
table_output
# Not Filtered GET, it should ignore all
request = RequestFactory().get('/admin/tree/?all=1')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request})
table_output = self.template.render(context)
# Not Filtered
assert '<input type="hidden" id="has-filters" value="0"/>' in \
table_output
class TestAdminTreeList(TestNonEmptyTree):
template = Template('{% load admin_tree_list %}{% spaceless %}'
'{% result_tree cl request %}{% endspaceless %}')
def test_result_tree_list(self, model_without_proxy):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
model = model_without_proxy
request = RequestFactory().get('/admin/tree/')
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
m = admin_class(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request})
table_output = self.template.render(context)
output_template = '<li><a href="%i/" >Node %i</a>'
for object in model.objects.all():
expected_output = output_template % (object.pk, object.pk)
assert expected_output in table_output
def test_result_tree_list_with_action(self, model_without_proxy):
model = model_without_proxy
request = RequestFactory().get('/admin/tree/')
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
m = admin_class(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request,
'action_form': True})
table_output = self.template.render(context)
output_template = ('<input type="checkbox" class="action-select" '
'value="%i" name="_selected_action" />'
'<a href="%i/" >Node %i</a>')
for object in model.objects.all():
expected_output = output_template % (object.pk, object.pk,
object.pk)
assert expected_output in table_output
def test_result_tree_list_with_get(self, model_without_proxy):
model = model_without_proxy
# Test t GET parameter with value id
request = RequestFactory().get('/admin/tree/?t=id')
site = AdminSite()
form_class = movenodeform_factory(model)
admin_class = admin_factory(form_class)
m = admin_class(model, site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, model, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.formset = None
context = Context({'cl': cl,
'request': request})
table_output = self.template.render(context)
output_template = "opener.dismissRelatedLookupPopup(window, '%i');"
for object in model.objects.all():
expected_output = output_template % object.pk
assert expected_output in table_output
class TestTreeAdmin(TestNonEmptyTree):
site = AdminSite()
def _create_superuser(self, username):
return User.objects.create(username=username, is_superuser=True)
def _mocked_authenticated_request(self, url, user):
request_factory = RequestFactory()
request = request_factory.get(url)
request.user = user
return request
def _mocked_request(self, data):
request_factory = RequestFactory()
request = request_factory.post('/', data=data)
setattr(request, 'session', 'session')
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
return request
def _get_admin_obj(self, model_class):
form_class = movenodeform_factory(model_class)
admin_class = admin_factory(form_class)
return admin_class(model_class, self.site)
def test_changelist_view(self):
tmp_user = self._create_superuser('changelist_tmp')
request = self._mocked_authenticated_request('/', tmp_user)
admin_obj = self._get_admin_obj(models.AL_TestNode)
admin_obj.changelist_view(request)
assert admin_obj.change_list_template == 'admin/tree_list.html'
admin_obj = self._get_admin_obj(models.MP_TestNode)
admin_obj.changelist_view(request)
assert admin_obj.change_list_template != 'admin/tree_list.html'
def test_get_node(self, model):
admin_obj = self._get_admin_obj(model)
target = model.objects.get(desc='2')
assert admin_obj.get_node(target.pk) == target
def test_move_node_validate_keyerror(self, model):
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={})
response = admin_obj.move_node(request)
assert response.status_code == 400
request = self._mocked_request(data={'node_id': 1})
response = admin_obj.move_node(request)
assert response.status_code == 400
def test_move_node_validate_valueerror(self, model):
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={'node_id': 1,
'sibling_id': 2,
'as_child': 'invalid'})
response = admin_obj.move_node(request)
assert response.status_code == 400
def test_move_validate_missing_nodeorderby(self, model):
node = model.objects.get(desc='231')
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={})
response = admin_obj.try_to_move_node(True, node, 'sorted-child',
request, target=node)
assert response.status_code == 400
response = admin_obj.try_to_move_node(True, node, 'sorted-sibling',
request, target=node)
assert response.status_code == 400
def test_move_validate_invalid_pos(self, model):
node = model.objects.get(desc='231')
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={})
response = admin_obj.try_to_move_node(True, node, 'invalid_pos',
request, target=node)
assert response.status_code == 400
def test_move_validate_to_descendant(self, model):
node = model.objects.get(desc='2')
target = model.objects.get(desc='231')
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={})
response = admin_obj.try_to_move_node(True, node, 'first-sibling',
request, target)
assert response.status_code == 400
def test_move_left(self, model):
node = model.objects.get(desc='231')
target = model.objects.get(desc='2')
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={'node_id': node.pk,
'sibling_id': target.pk,
'as_child': 0})
response = admin_obj.move_node(request)
assert response.status_code == 200
expected = [('1', 1, 0),
('231', 1, 0),
('2', 1, 4),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
def test_move_last_child(self, model):
node = model.objects.get(desc='231')
target = model.objects.get(desc='2')
admin_obj = self._get_admin_obj(model)
request = self._mocked_request(data={'node_id': node.pk,
'sibling_id': target.pk,
'as_child': 1})
response = admin_obj.move_node(request)
assert response.status_code == 200
expected = [('1', 1, 0),
('2', 1, 5),
('21', 2, 0),
('22', 2, 0),
('23', 2, 0),
('24', 2, 0),
('231', 2, 0),
('3', 1, 0),
('4', 1, 1),
('41', 2, 0)]
assert self.got(model) == expected
|
dcramer/django-db-log
|
djangodblog/helpers.py
|
from django.conf import settings
from django.template import (Template, Context, TemplateDoesNotExist,
TemplateSyntaxError)
from django.utils.encoding import smart_unicode
from django.utils.hashcompat import md5_constructor
from django.views.debug import ExceptionReporter
class ImprovedExceptionReporter(ExceptionReporter):
def __init__(self, request, exc_type, exc_value, frames):
ExceptionReporter.__init__(self, request, exc_type, exc_value, None)
self.frames = frames
def get_traceback_frames(self):
return self.frames
def get_traceback_html(self):
"Return HTML code for traceback."
if issubclass(self.exc_type, TemplateDoesNotExist):
self.template_does_not_exist = True
if (settings.TEMPLATE_DEBUG and hasattr(self.exc_value, 'source') and
isinstance(self.exc_value, TemplateSyntaxError)):
self.get_template_exception_info()
frames = self.get_traceback_frames()
unicode_hint = ''
if issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = smart_unicode(unicode_str[max(start-5, 0):min(end+5, len(unicode_str))], 'ascii', errors='replace')
t = Template(TECHNICAL_500_TEMPLATE, name='Technical 500 template')
c = Context({
'exception_type': self.exc_type.__name__,
'exception_value': smart_unicode(self.exc_value, errors='replace'),
'unicode_hint': unicode_hint,
'frames': frames,
'lastframe': frames[-1],
'request': self.request,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
})
return t.render(c)
def construct_checksum(error):
checksum = md5_constructor(str(error.level))
checksum.update(error.class_name or '')
message = error.traceback or error.message
if isinstance(message, unicode):
message = message.encode('utf-8', 'replace')
checksum.update(message)
return checksum.hexdigest()
TECHNICAL_500_TEMPLATE = """
<div id="summary">
<h1>{{ exception_type }} at {{ request.path_info|escape }}</h1>
<pre class="exception_value">{{ exception_value|escape }}</pre>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|escape }}</pre></td>
</tr>
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|escape }}</strong></p>
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Template error</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}{% ifnotequal template_info.bottom template_info.total %} cut-bottom{% endifnotequal %}">
{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>{{ template_info.before }}<span class="specific">{{ template_info.during }}</span>{{ template_info.after }}</td></tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endifequal %}
{% endfor %}
</table>
</div>
{% endif %}
<div id="traceback">
<h2>Traceback <span class="commands"><a href="#" onclick="return switchPastebinFriendly(this);">Switch to copy-and-paste view</a></span></h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
<li class="frame">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">{% for line in frame.pre_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')">{{ line|escape }}</li>{% endfor %}</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line"><li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')">{{ frame.context_line|escape }} <span>...</span></li></ol>
{% if frame.post_context %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">{% for line in frame.post_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')">{{ line|escape }}</li>{% endfor %}</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:"0" %}
<tr>
<td>{{ var.0|escape }}</td>
<td class="code"><div>{{ var.1|pprint|escape }}</div></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<div id="pastebinTraceback" class="pastebin">
<textarea id="traceback_area" cols="140" rows="25">
Environment:
{% if request.META %}Request Method: {{ request.META.REQUEST_METHOD }}{% endif %}
Request URL: {{ request.build_absolute_uri|escape }}
Python Version: {{ sys_version_info }}
{% if template_does_not_exist %}Template Loader Error: (Unavailable in db-log)
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}
Traceback:
{% for frame in frames %}File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}
{% endfor %}
Exception Type: {{ exception_type|escape }} at {{ request.path_info|escape }}
Exception Value: {{ exception_value|escape }}
</textarea>
</div>
</div>
{% if request %}
<div id="requestinfo">
<h2>Request information</h2>
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.GET.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><div>{{ var.1|pprint }}</div></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if request.POST %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.POST.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><div>{{ var.1|pprint }}</div></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.COOKIES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><div>{{ var.1|pprint }}</div></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
{% if request.META %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><div>{{ var.1|pprint }}</div></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No META data</p>
{% endif %}
</div>
{% endif %}
"""
|
temmeand/scikit-rf
|
qtapps/skrf_qtwidgets/networkPlotWidget.py
|
from collections import OrderedDict
from math import sqrt
import numpy as np
import pyqtgraph as pg
from qtpy import QtWidgets
import skrf
from . import smith_chart, util
class NetworkPlotWidget(QtWidgets.QWidget):
S_VALS = OrderedDict((
("decibels", "db"),
("magnitude", "mag"),
("phase (deg)", "deg"),
("phase unwrapped (deg)", "deg_unwrap"),
("phase (rad)", "rad"),
("phase unwrapped (rad)", "rad_unwrap"),
("real", "re"),
("imaginary", "im"),
("group delay", "group_delay"),
("vswr", "vswr")
))
S_UNITS = list(S_VALS.keys())
def __init__(self, parent=None, **kwargs):
super(NetworkPlotWidget, self).__init__(parent)
self.checkBox_useCorrected = QtWidgets.QCheckBox()
self.checkBox_useCorrected.setText("Plot Corrected")
self.checkBox_useCorrected.setEnabled(False)
self.comboBox_primarySelector = QtWidgets.QComboBox(self)
self.comboBox_primarySelector.addItems(("S", "Z", "Y", "A", "Smith Chart"))
self.comboBox_unitsSelector = QtWidgets.QComboBox(self)
self.comboBox_unitsSelector.addItems(self.S_UNITS)
self.comboBox_traceSelector = QtWidgets.QComboBox(self)
self.set_trace_items()
self.comboBox_traceSelector.setCurrentIndex(0)
self.plot_layout = pg.GraphicsLayoutWidget(self)
self.plot_layout.sceneObj.sigMouseClicked.connect(self.graph_clicked)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.addWidget(self.checkBox_useCorrected)
self.horizontalLayout.addWidget(self.comboBox_primarySelector)
self.horizontalLayout.addWidget(self.comboBox_unitsSelector)
self.horizontalLayout.addWidget(self.comboBox_traceSelector)
self.data_info_label = QtWidgets.QLabel("Click a data point to see info")
self.verticalLayout = QtWidgets.QVBoxLayout(self)
self.verticalLayout.setContentsMargins(3, 3, 3, 3) # normally this will be embedded in another application
self.verticalLayout.addLayout(self.horizontalLayout)
self.verticalLayout.addWidget(self.plot_layout)
self.verticalLayout.addWidget(self.data_info_label)
self.checkBox_useCorrected.stateChanged.connect(self.set_use_corrected)
self.comboBox_primarySelector.currentIndexChanged.connect(self.update_plot)
self.comboBox_unitsSelector.currentIndexChanged.connect(self.update_plot)
self.comboBox_traceSelector.currentIndexChanged.connect(self.update_plot)
self.plot = self.plot_layout.addPlot() # type: pg.PlotItem
self._ntwk = None
self._ntwk_corrected = None
self._corrected_data_enabled = True
self._use_corrected = False
self.corrected_data_enabled = kwargs.get('corrected_data_enabled', True)
self.plot.addLegend()
self.plot.showGrid(True, True)
self.plot.setLabel("bottom", "frequency", units="Hz")
self.last_plot = "rectangular"
def get_use_corrected(self):
return self._use_corrected
def set_use_corrected(self, val):
if val in (1, 2):
self._use_corrected = True
else:
self._use_corrected = False
self.update_plot()
use_corrected = property(get_use_corrected, set_use_corrected)
@property
def ntwk(self): return self._ntwk
@ntwk.setter
def ntwk(self, ntwk):
if ntwk is None or isinstance(ntwk, skrf.Network) or type(ntwk) in (list, tuple):
self.set_trace_items(ntwk)
self._ntwk = ntwk
self.update_plot()
else:
raise TypeError("must set to skrf.Network, list of Networks, or None")
@property
def ntwk_corrected(self): return self._ntwk_corrected
@ntwk_corrected.setter
def ntwk_corrected(self, ntwk):
if ntwk is None or isinstance(ntwk, skrf.Network) or type(ntwk) in (list, tuple):
self.set_trace_items(ntwk)
self._ntwk_corrected = ntwk
self.update_plot()
else:
raise TypeError("must set to skrf.Network, list of Networks, or None")
@property
def corrected_data_enabled(self):
return self._corrected_data_enabled
@corrected_data_enabled.setter
def corrected_data_enabled(self, enabled):
if enabled is True:
self._corrected_data_enabled = True
self.checkBox_useCorrected.setEnabled(True)
else:
self._corrected_data_enabled = False
self._use_corrected = False
self.checkBox_useCorrected.setEnabled(False)
def set_networks(self, ntwk, ntwk_corrected=None):
if ntwk is None or isinstance(ntwk, skrf.Network) or type(ntwk) in (list, tuple):
self._ntwk = ntwk
self.set_trace_items(self._ntwk)
if ntwk is None:
self._ntwk_corrected = None
self.set_trace_items(self._ntwk)
return
else:
raise TypeError("must set to skrf.Network, list of Networks, or None")
if ntwk_corrected is None or isinstance(ntwk_corrected, skrf.Network) or type(ntwk_corrected) in (list, tuple):
self._ntwk_corrected = ntwk_corrected
else:
raise TypeError("must set to skrf.Network, list of Networks, or None")
self.update_plot()
def _calc_traces(self):
trace = self.comboBox_traceSelector.currentIndex()
n_ = m_ = 0
if trace > 0:
mn = trace - 1
nports = int(sqrt(self.comboBox_traceSelector.count() - 1))
m_ = mn % nports
n_ = int((mn - mn % nports) / nports)
return m_, n_, trace
def reset_plot(self, smith=False):
self.plot.clear()
if not smith and self.last_plot == "smith":
self.plot.setAspectLocked(False)
self.plot.autoRange()
self.plot.enableAutoRange()
self.plot.setLabel("bottom", "frequency", units="Hz")
if smith and not self.last_plot == "smith":
self.last_plot = "smith"
self.ZGrid = smith_chart.gen_z_grid()
self.s_unity_circle = smith_chart.gen_s_unity_circle()
self.plot_layout.removeItem(self.plot)
self.plot = self.plot_layout.addPlot()
self.plot.setAspectLocked()
self.plot.setXRange(-1, 1)
self.plot.setYRange(-1, 1)
if smith:
self.plot.addItem(self.s_unity_circle)
self.plot.addItem(self.ZGrid)
if not smith:
self.plot.setLabel("left", "")
self.plot.setTitle(None)
legend = self.plot.legend
if legend is not None:
legend.scene().removeItem(legend)
self.plot.legend = None
self.plot.addLegend()
def clear_plot(self):
self._ntwk = None
self._ntwk_corrected = None
self._ntwk_list = None
self.reset_plot()
def set_trace_items(self, ntwk=None):
self.comboBox_traceSelector.blockSignals(True)
current_index = self.comboBox_traceSelector.currentIndex()
nports = 0
if isinstance(ntwk, skrf.Network):
nports = ntwk.nports
elif type(ntwk) in (list, tuple):
for n in ntwk:
if n.nports > nports:
nports = n.nports
self.comboBox_traceSelector.clear()
self.comboBox_traceSelector.addItem("all")
for n in range(nports):
for m in range(nports):
self.comboBox_traceSelector.addItem("S{:d}{:d}".format(m + 1, n + 1))
if current_index <= self.comboBox_traceSelector.count():
self.comboBox_traceSelector.setCurrentIndex(current_index)
else:
self.comboBox_traceSelector.setCurrentIndex(0)
self.comboBox_traceSelector.blockSignals(False)
def graph_clicked(self, ev):
"""
:type ev: pg.GraphicsScene.mouseEvents.MouseClickEvent
:return:
"""
xy = self.plot.vb.mapSceneToView(ev.scenePos())
if not ev.isAccepted():
if "smith" in self.comboBox_primarySelector.currentText().lower():
S11 = xy.x() + 1j * xy.y()
Z = (1 + S11) / (1 - S11)
self.data_info_label.setText(
"Sre: {:g}, Sim: {:g} - R: {:g}, X: {:g}".format(xy.x(), xy.y(), Z.real, Z.imag))
else:
self.data_info_label.setText("x: {:g}, y: {:g}".format(xy.x(), xy.y()))
elif isinstance(ev.acceptedItem, pg.PlotCurveItem):
curve = ev.acceptedItem # type: pg.PlotCurveItem
spoint = xy.x() + 1j * xy.y()
sdata = curve.xData + 1j * curve.yData
index = np.argmin(np.abs(sdata - spoint))
frequency = curve.ntwk.frequency.f_scaled[index]
S11 = curve.xData[index] + 1j * curve.yData[index]
Z = (1 + S11) / (1 - S11)
self.data_info_label.setText(
"Freq: {:g} ({:s}), S(re): {:g}, S(im): {:g} - R: {:g}, X: {:g}".format(
frequency, curve.ntwk.frequency.unit, S11.real, S11.imag, Z.real, Z.imag))
def _plot_attr(self, ntwk, attr, colors, trace, n_, m_):
for n in range(ntwk.s.shape[2]):
for m in range(ntwk.s.shape[1]):
if trace > 0:
if not n == n_ or not m == m_:
continue
c = next(colors)
label = ntwk.name
param = "S{:d}{:d}".format(m + 1, n + 1)
if ntwk.s.shape[1] > 1:
label += " - " + param
if hasattr(ntwk, attr):
s = getattr(ntwk, attr)
if "db" in attr:
splot = pg.PlotDataItem(pen=pg.mkPen(c), name=label)
if not np.any(s[:, m, n] == -np.inf):
splot.setData(ntwk.f, s[:, m, n])
self.plot.addItem(splot)
else:
self.plot.plot(ntwk.f, s[:, m, n], pen=pg.mkPen(c), name=label)
else:
s = getattr(ntwk, param.lower(), None)
if s is None:
continue
if attr == 's_group_delay':
self.plot.plot(ntwk.f, abs(s.group_delay[:, 0, 0]), pen=pg.mkPen(c), name=label)
else:
attr = self.S_VALS[attr]
self.plot.plot(ntwk.f, getattr(s, attr)[:, 0, 0], pen=pg.mkPen(c), name=label)
def update_plot(self):
if self.corrected_data_enabled:
if self.ntwk_corrected:
self.checkBox_useCorrected.setEnabled(True)
else:
self.checkBox_useCorrected.setEnabled(False)
if "smith" in self.comboBox_primarySelector.currentText().lower():
self.plot_smith()
else:
self.plot_ntwk()
self.last_plot = "rectangular"
def plot_ntwk(self):
if self.use_corrected and self.ntwk_corrected is not None:
ntwk = self.ntwk_corrected
else:
ntwk = self.ntwk
if ntwk is None:
return
elif type(ntwk) in (list, tuple):
self.plot_ntwk_list()
return
self.reset_plot()
self.plot.showGrid(True, True)
self.plot.setLabel("bottom", "frequency", units="Hz")
colors = util.trace_color_cycle(ntwk.s.shape[1] ** 2)
m_, n_, trace = self._calc_traces()
primary = self.comboBox_primarySelector.currentText().lower()
s_units = self.comboBox_unitsSelector.currentText()
attr = primary + "_" + self.S_VALS[s_units]
self._plot_attr(ntwk, attr, colors, trace, n_, m_)
self.plot.setLabel("left", s_units)
self.plot.setTitle(ntwk.name)
def plot_ntwk_list(self):
if self.use_corrected and self.ntwk_corrected is not None:
ntwk_list = self.ntwk_corrected
else:
ntwk_list = self.ntwk
if ntwk_list is None:
return
self.reset_plot()
self.plot.showGrid(True, True)
self.plot.setLabel("bottom", "frequency", units="Hz")
colors = util.trace_color_cycle()
m_, n_, trace = self._calc_traces()
primary = self.comboBox_primarySelector.currentText().lower()
s_units = self.comboBox_unitsSelector.currentText()
attr = primary + "_" + self.S_VALS[s_units]
for ntwk in ntwk_list:
self._plot_attr(ntwk, attr, colors, trace, n_, m_)
self.plot.setLabel("left", s_units)
def _map_smith(self, ntwk, colors, trace, n_, m_):
for n in range(ntwk.s.shape[2]):
for m in range(ntwk.s.shape[1]):
if trace > 0:
if not n == n_ or not m == m_:
continue
c = next(colors)
label = ntwk.name
if ntwk.s.shape[1] > 1:
label += " - S{:d}{:d}".format(m + 1, n + 1)
s = ntwk.s[:, m, n]
curve = self.plot.plot(s.real, s.imag, pen=pg.mkPen(c), name=label)
curve.curve.setClickable(True)
curve.curve.ntwk = ntwk
def plot_smith(self):
if self.use_corrected and self.ntwk_corrected is not None:
ntwk = self.ntwk_corrected
else:
ntwk = self.ntwk
if ntwk is None:
self.reset_plot(smith=True)
return
elif type(ntwk) in (list, tuple):
self.plot_smith_list()
return
self.reset_plot(smith=True)
colors = util.trace_color_cycle(ntwk.s.shape[1] ** 2)
m_, n_, trace = self._calc_traces()
self._map_smith(ntwk, colors, trace, n_, m_)
self.plot.setTitle(ntwk.name)
def plot_smith_list(self):
self.reset_plot(smith=True)
ntwk_list = self.ntwk
if ntwk_list is None:
return
colors = util.trace_color_cycle()
m_, n_, trace = self._calc_traces()
for ntwk in ntwk_list:
self._map_smith(ntwk, colors, trace, n_, m_)
|
DudLab/nanshe
|
nanshe/__init__.py
|
"""
``nanshe`` package, an image processing toolkit.
===============================================================================
Overview
===============================================================================
The ``nanshe`` package is an image processing package that contains a variety
of different techniques, which are used primarily to assemble the ADINA
algorithm proposed by Diego, et al.
( doi:`10.1109/ISBI.2013.6556660`_ ) to extract active neurons from
an image sequence. This algorithm uses online dictionary learning (a form of
matrix factorization) at its heart as implemented by Marial, et al.
( doi:`10.1145/1553374.1553463`_ ) to find a set of atoms (or basis
images) that are representative of an image sequence and can be used to
approximately reconstruct the sequence. However, it is designed in a modular
way so that a different matrix factorization could be swapped in and
appropriately parameterized. Other portions of the algorithm include a
preprocessing phase that has a variety of different techniques that can be
applied optionally. For example, removing registration artifacts from
a line-by-line registration algorithm, background subtraction, and a wavelet
transform to filter objects in a particular size.
===============================================================================
Installation
===============================================================================
-------------------------------------------------------------------------------
Dependencies
-------------------------------------------------------------------------------
Implementation of the algorithm has been done here in pure Python. However, a
few dependencies are required to get started. These include NumPy_, SciPy_,
h5py_, scikit-image_, SPAMS_, VIGRA_, and rank_filter_. The first 4 can be
found in standard distributions like Anaconda_. Installing VIGRA and
rank_filter can be done by using CMake_. SPAMS requires an existing BLAS/LAPACK
implementation. On Mac and Linux, this can be anything. Typically ATLAS_ is
used, but OpenBLAS_ or `Intel MKL`_ (if available) can be used, as well. This
will require modifying the setup.py script. On Windows, the setup.py links to
R_, which should be changed if another BLAS is available.
-------------------------------------------------------------------------------
Building
-------------------------------------------------------------------------------
Python
===============================================================================
As this package is pure Python, building follows through the standard method.
Currently, we require setuptools_ for installation; so, make sure it is
installed. Then simply issue the following command to build and install.
.. code-block:: sh
python setup.py install
Alternatively, one can build and then install in two steps if that is
preferable.
.. code-block:: sh
python setup.py build
python setup.py install
Conda
===============================================================================
Current packages can be found on our anaconda_ channel
( https://anaconda.org/nanshe/nanshe ). New ones are released every time a
passing tagged release is pushed to the ``master`` branch on GitHub. It is also
possible to build packages for conda_ for non-release commits as we do in our
continuous integration strategy.
To do this one requires the dependencies be installed or be available from a
anaconda channel. Additionally, one must be using the conda's ``root``
environment and have conda-build installed. Once this is done one need
only the run the following command with ``setup.py``.
.. code-block:: sh
python setup.py bdist_conda
Assuming this completes successfully, conda will provide the path to the built
package.
-------------------------------------------------------------------------------
Testing
-------------------------------------------------------------------------------
Running the test suite is fairly straightforward. Testing is done using nose_;
so, make sure you have a running copy if you wish to run the tests. Some of the
tests require drmaa_ installed and properly configured. If that is not the
case, those tests will be skipped automatically. To run the test suite, one
must be in the source directory. Then simply run the following command. This
will run all the tests and doctests. Depending on your machine, this will take
a few minutes to complete.
.. code-block:: sh
nosetests
The full test suite includes 3D tests, which are very slow to run and so are
not run by default. As the code has been written to be dimensionally agnostic,
these tests don't cover anything that the 2D tests don't already cover. To run
the 3D tests, simply use ``setup.all.cfg``.
.. code-block:: sh
nosetests -c setup.all.cfg
It is also possible to run this as part of the setup.py process. In which case,
this can be done as shown below. If 3D tests are required for this portion, one
need only replace ``setup.cfg`` with ``setup.all.cfg``.
.. code-block:: sh
python setup.py nosetests
Also, the typical ``test`` subcommand can be used to run ``nosetests``, but no
other arguments are allowed.
.. code-block:: sh
python setup.py test
-------------------------------------------------------------------------------
Documentation
-------------------------------------------------------------------------------
Current documentation can be found on the GitHub page
( http://nanshe-org.github.io/nanshe/ ). A new copy is rebuilt any time there is
a passing commit is added to the ``master`` branch. Each documentation commit
is added to ``gh-pages`` branch with a reference to the commit in ``master``
that triggered the build as well as the tag (version) if provided.
It is also possible to build the documentation from source. This project uses
Sphinx_ for generating documentation. Please make sure you have it installed.
In particular, a version from 1.3 or later is required. Additionally, the
`Cloud Sphinx Theme`_ is required for generating the documentation and is used
in the HTML layout.
The ``rst`` files (outside of ``index.rst`` are not distributed with the source
code. This is because it is trivial to generate them and it is to easy for the
code to become out of sync with documentation if they are distributed. However,
building ``rst`` files has been made a dependency of all other documentation
build steps so one does not have to think about this. The preferred method for
building documentation is to use the ``setup.py`` hook as shown below. This
will build the RST files and place them in ``docs/``. It will also build the
HTML files by default and put them in the directory ``build/sphinx/html/``.
Simply open the ``index.html`` file to take a look.
.. code-block:: sh
python setup.py build_sphinx
More build options can be determined by running the help command.
.. code-block:: sh
python setup.py build_sphinx --help
-------------------------------------------------------------------------------
Cleaning
-------------------------------------------------------------------------------
After any building operation a number of undesirable intermediate files are
created and left behind that one may wish to remove. To do this one merely
needs to run the clean command.
.. code-block:: sh
python setup.py clean
This has been modified to also remove RST files generated when building
documentation. However, it will leave any final build products like HTML files.
If one wishes to remove everything built (including final build products), the
clean all command will do this.
.. code-block:: sh
python setup.py clean --all
.. _`10.1109/ISBI.2013.6556660`: http://dx.doi.org/10.1109/ISBI.2013.6556660
.. _`10.1145/1553374.1553463`: http://dx.doi.org/10.1145/1553374.1553463
.. _NumPy: http://www.numpy.org/
.. _SciPy: http://www.scipy.org/
.. _h5py: http://www.h5py.org/
.. _scikit-image: http://scikit-image.org/
.. _SPAMS: http://spams-devel.gforge.inria.fr/
.. _VIGRA: http://ukoethe.github.io/vigra/
.. _rank_filter: http://github.com/nanshe-org/rank_filter/
.. _Anaconda: http://store.continuum.io/cshop/anaconda/
.. _CMake: http://www.cmake.org/
.. _ATLAS: http://math-atlas.sourceforge.net/
.. _OpenBLAS: http://www.openblas.net/
.. _`Intel MKL`: http://software.intel.com/en-us/intel-mkl
.. _R: http://www.r-project.org/
.. _setuptools: http://pythonhosted.org/setuptools/
.. _anaconda: https://anaconda.org/
.. _conda: http://conda.pydata.org/
.. _nose: http://nose.readthedocs.org/en/latest/
.. _drmaa: http://github.com/pygridtools/drmaa-python
.. _Sphinx: http://sphinx-doc.org/
.. _`Cloud Sphinx Theme`: https://pythonhosted.org/cloud_sptheme/
"""
__author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>"
__date__ = "$Dec 22, 2014 08:46:12 EST$"
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
__all__ = [
"box", "converter", "io", "imp", "learner", "registerer", "syn", "util"
]
from nanshe import box
from nanshe import converter
from nanshe import io
from nanshe import imp
from nanshe import learner
from nanshe import registerer
from nanshe import syn
from nanshe import util
|
catapult-project/catapult
|
telemetry/telemetry/core/memory_cache_http_server_unittest.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import os
from telemetry.core import util
from telemetry.core import memory_cache_http_server
from telemetry.testing import tab_test_case
class RequestHandler(
memory_cache_http_server.MemoryCacheDynamicHTTPRequestHandler):
def ResponseFromHandler(self, path):
content = "Hello from handler"
return self.MakeResponse(content, "text/html", False)
class MemoryCacheHTTPServerTest(tab_test_case.TabTestCase):
def setUp(self):
super(MemoryCacheHTTPServerTest, self).setUp()
self._test_filename = 'bear.webm'
test_file = os.path.join(util.GetUnittestDataDir(), 'bear.webm')
self._test_file_size = os.stat(test_file).st_size
def testBasicHostingAndRangeRequests(self):
self.Navigate('blank.html')
x = self._tab.EvaluateJavaScript('document.body.innerHTML')
x = x.strip()
# Test basic html hosting.
self.assertEqual(x, 'Hello world')
file_size = self._test_file_size
last_byte = file_size - 1
# Test byte range request: no end byte.
self.CheckContentHeaders('0-', '0-%d' % last_byte, file_size)
# Test byte range request: greater than zero start byte.
self.CheckContentHeaders('100-', '100-%d' % last_byte, file_size - 100)
# Test byte range request: explicit byte range.
self.CheckContentHeaders('2-500', '2-500', '499')
# Test byte range request: no start byte.
self.CheckContentHeaders('-228', '%d-%d' % (file_size - 228, last_byte),
'228')
# Test byte range request: end byte less than start byte.
self.CheckContentHeaders('100-5', '100-%d' % last_byte, file_size - 100)
def CheckContentHeaders(self, content_range_request, content_range_response,
content_length_response):
self._tab.ExecuteJavaScript(
"""
var loaded = false;
var xmlhttp = new XMLHttpRequest();
xmlhttp.onload = function(e) {
loaded = true;
};
// Avoid cached content by appending unique URL param.
xmlhttp.open('GET', {{ url }} + "?t=" + Date.now(), true);
xmlhttp.setRequestHeader('Range', {{ range }});
xmlhttp.send();
""",
url=self.UrlOfUnittestFile(self._test_filename),
range='bytes=%s' % content_range_request)
self._tab.WaitForJavaScriptCondition('loaded', timeout=5)
content_range = self._tab.EvaluateJavaScript(
'xmlhttp.getResponseHeader("Content-Range");')
content_range_response = 'bytes %s/%d' % (content_range_response,
self._test_file_size)
self.assertEqual(content_range, content_range_response)
content_length = self._tab.EvaluateJavaScript(
'xmlhttp.getResponseHeader("Content-Length");')
self.assertEqual(content_length, str(content_length_response))
def testAbsoluteAndRelativePathsYieldSameURL(self):
test_file_rel_path = 'green_rect.html'
test_file_abs_path = os.path.abspath(
os.path.join(util.GetUnittestDataDir(), test_file_rel_path))
# It's necessary to bypass self.UrlOfUnittestFile since that
# concatenates the unittest directory on to the incoming path,
# causing the same code path to be taken in both cases.
self._platform.SetHTTPServerDirectories(util.GetUnittestDataDir())
self.assertEqual(self._platform.http_server.UrlOf(test_file_rel_path),
self._platform.http_server.UrlOf(test_file_abs_path))
def testDynamicHTTPServer(self):
self.Navigate('test.html', handler_class=RequestHandler)
x = self._tab.EvaluateJavaScript('document.body.innerHTML')
self.assertEqual(x, 'Hello from handler')
|
witcxc/scipy
|
scipy/cluster/hierarchy.py
|
"""
========================================================
Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
========================================================
.. currentmodule:: scipy.cluster.hierarchy
These functions cut hierarchical clusterings into flat clusterings
or find the roots of the forest formed by a cut by providing the flat
cluster ids of each observation.
.. autosummary::
:toctree: generated/
fcluster
fclusterdata
leaders
These are routines for agglomerative clustering.
.. autosummary::
:toctree: generated/
linkage
single
complete
average
weighted
centroid
median
ward
These routines compute statistics on hierarchies.
.. autosummary::
:toctree: generated/
cophenet
from_mlab_linkage
inconsistent
maxinconsts
maxdists
maxRstat
to_mlab_linkage
Routines for visualizing flat clusters.
.. autosummary::
:toctree: generated/
dendrogram
These are data structures and routines for representing hierarchies as
tree objects.
.. autosummary::
:toctree: generated/
ClusterNode
leaves_list
to_tree
These are predicates for checking the validity of linkage and
inconsistency matrices as well as for checking isomorphism of two
flat cluster assignments.
.. autosummary::
:toctree: generated/
is_valid_im
is_valid_linkage
is_isomorphic
is_monotonic
correspond
num_obs_linkage
Utility routines for plotting:
.. autosummary::
:toctree: generated/
set_link_color_palette
References
----------
.. [1] "Statistics toolbox." API Reference Documentation. The MathWorks.
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/.
Accessed October 1, 2007.
.. [2] "Hierarchical clustering." API Reference Documentation.
The Wolfram Research, Inc.
http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/
HierarchicalClustering.html.
Accessed October 1, 2007.
.. [3] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage
Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969.
.. [4] Ward Jr, JH. "Hierarchical grouping to optimize an objective
function." Journal of the American Statistical Association. 58(301):
pp. 236--44. 1963.
.. [5] Johnson, SC. "Hierarchical clustering schemes." Psychometrika.
32(2): pp. 241--54. 1966.
.. [6] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp.
855--60. 1962.
.. [7] Batagelj, V. "Comparing resemblance measures." Journal of
Classification. 12: pp. 73--90. 1995.
.. [8] Sokal, RR and Michener, CD. "A statistical method for evaluating
systematic relationships." Scientific Bulletins. 38(22):
pp. 1409--38. 1958.
.. [9] Edelbrock, C. "Mixture model tests of hierarchical clustering
algorithms: the problem of classifying everybody." Multivariate
Behavioral Research. 14: pp. 367--84. 1979.
.. [10] Jain, A., and Dubes, R., "Algorithms for Clustering Data."
Prentice-Hall. Englewood Cliffs, NJ. 1988.
.. [11] Fisher, RA "The use of multiple measurements in taxonomic
problems." Annals of Eugenics, 7(2): 179-188. 1936
* MATLAB and MathWorks are registered trademarks of The MathWorks, Inc.
* Mathematica is a registered trademark of The Wolfram Research, Inc.
"""
from __future__ import division, print_function, absolute_import
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
#
# Author: Damian Eads
# Date: September 22, 2007
#
# Copyright (c) 2007, 2008, Damian Eads
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# - Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import numpy as np
from . import _hierarchy
import scipy.spatial.distance as distance
from scipy._lib.six import string_types
from scipy._lib.six import xrange
_cpy_non_euclid_methods = {'single': 0, 'complete': 1, 'average': 2,
'weighted': 6}
_cpy_euclid_methods = {'centroid': 3, 'median': 4, 'ward': 5}
_cpy_linkage_methods = set(_cpy_non_euclid_methods.keys()).union(
set(_cpy_euclid_methods.keys()))
__all__ = ['ClusterNode', 'average', 'centroid', 'complete', 'cophenet',
'correspond', 'dendrogram', 'fcluster', 'fclusterdata',
'from_mlab_linkage', 'inconsistent', 'is_isomorphic',
'is_monotonic', 'is_valid_im', 'is_valid_linkage', 'leaders',
'leaves_list', 'linkage', 'maxRstat', 'maxdists', 'maxinconsts',
'median', 'num_obs_linkage', 'set_link_color_palette', 'single',
'to_mlab_linkage', 'to_tree', 'ward', 'weighted', 'distance']
def _warning(s):
warnings.warn('scipy.cluster: %s' % s, stacklevel=3)
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _copy_arrays_if_base_present(T):
"""
Accepts a tuple of arrays T. Copies the array T[i] if its base array
points to an actual array. Otherwise, the reference is just copied.
This is useful if the arrays are being passed to a C function that
does not do proper striding.
"""
l = [_copy_array_if_base_present(a) for a in T]
return l
def _randdm(pnts):
""" Generates a random distance matrix stored in condensed form. A
pnts * (pnts - 1) / 2 sized vector is returned.
"""
if pnts >= 2:
D = np.random.rand(pnts * (pnts - 1) / 2)
else:
raise ValueError("The number of points in the distance matrix "
"must be at least 2.")
return D
def single(y):
"""
Performs single/min/nearest linkage on the condensed distance matrix ``y``
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
The linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='single', metric='euclidean')
def complete(y):
"""
Performs complete/max/farthest point linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage
"""
return linkage(y, method='complete', metric='euclidean')
def average(y):
"""
Performs average/UPGMA linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='average', metric='euclidean')
def weighted(y):
"""
Performs weighted/WPGMA linkage on the condensed distance matrix.
See ``linkage`` for more information on the return
structure and algorithm.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='weighted', metric='euclidean')
def centroid(y):
"""
Performs centroid/UPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = centroid(y)``
Performs centroid/UPGMC linkage on the condensed distance
matrix ``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = centroid(X)``
Performs centroid/UPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See ``linkage``
for more information on the return structure and algorithm.
Parameters
----------
Q : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='centroid', metric='euclidean')
def median(y):
"""
Performs median/WPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = median(y)``
Performs median/WPGMC linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = median(X)``
Performs median/WPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See linkage
for more information on the return structure and algorithm.
Parameters
----------
Q : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='median', metric='euclidean')
def ward(y):
"""
Performs Ward's linkage on a condensed or redundant distance matrix.
See linkage for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = ward(y)``
Performs Ward's linkage on the condensed distance matrix ``Z``. See
linkage for more information on the return structure and
algorithm.
2. ``Z = ward(X)``
Performs Ward's linkage on the observation matrix ``X`` using
Euclidean distance as the distance metric. See linkage for more
information on the return structure and algorithm.
Parameters
----------
Q : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='ward', metric='euclidean')
def linkage(y, method='single', metric='euclidean'):
"""
Performs hierarchical/agglomerative clustering on the condensed
distance matrix y.
y must be a :math:`{n \\choose 2}` sized
vector where n is the number of original observations paired
in the distance matrix. The behavior of this function is very
similar to the MATLAB linkage function.
A 4 by :math:`(n-1)` matrix ``Z`` is returned. At the
:math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and
``Z[i, 1]`` are combined to form cluster :math:`n + i`. A
cluster with an index less than :math:`n` corresponds to one of
the :math:`n` original observations. The distance between
clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The
fourth value ``Z[i, 3]`` represents the number of original
observations in the newly formed cluster.
The following linkage methods are used to compute the distance
:math:`d(s, t)` between two clusters :math:`s` and
:math:`t`. The algorithm begins with a forest of clusters that
have yet to be used in the hierarchy being formed. When two
clusters :math:`s` and :math:`t` from this forest are combined
into a single cluster :math:`u`, :math:`s` and :math:`t` are
removed from the forest, and :math:`u` is added to the
forest. When only one cluster remains in the forest, the algorithm
stops, and this cluster becomes the root.
A distance matrix is maintained at each iteration. The ``d[i,j]``
entry corresponds to the distance between cluster :math:`i` and
:math:`j` in the original forest.
At each iteration, the algorithm must update the distance matrix
to reflect the distance of the newly formed cluster u with the
remaining clusters in the forest.
Suppose there are :math:`|u|` original observations
:math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and
:math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in
cluster :math:`v`. Recall :math:`s` and :math:`t` are
combined to form cluster :math:`u`. Let :math:`v` be any
remaining cluster in the forest that is not :math:`u`.
The following are methods for calculating the distance between the
newly formed cluster :math:`u` and each :math:`v`.
* method='single' assigns
.. math::
d(u,v) = \\min(dist(u[i],v[j]))
for all points :math:`i` in cluster :math:`u` and
:math:`j` in cluster :math:`v`. This is also known as the
Nearest Point Algorithm.
* method='complete' assigns
.. math::
d(u, v) = \\max(dist(u[i],v[j]))
for all points :math:`i` in cluster u and :math:`j` in
cluster :math:`v`. This is also known by the Farthest Point
Algorithm or Voor Hees Algorithm.
* method='average' assigns
.. math::
d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])}
{(|u|*|v|)}
for all points :math:`i` and :math:`j` where :math:`|u|`
and :math:`|v|` are the cardinalities of clusters :math:`u`
and :math:`v`, respectively. This is also called the UPGMA
algorithm.
* method='weighted' assigns
.. math::
d(u,v) = (dist(s,v) + dist(t,v))/2
where cluster u was formed with cluster s and t and v
is a remaining cluster in the forest. (also called WPGMA)
* method='centroid' assigns
.. math::
dist(s,t) = ||c_s-c_t||_2
where :math:`c_s` and :math:`c_t` are the centroids of
clusters :math:`s` and :math:`t`, respectively. When two
clusters :math:`s` and :math:`t` are combined into a new
cluster :math:`u`, the new centroid is computed over all the
original objects in clusters :math:`s` and :math:`t`. The
distance then becomes the Euclidean distance between the
centroid of :math:`u` and the centroid of a remaining cluster
:math:`v` in the forest. This is also known as the UPGMC
algorithm.
* method='median' assigns math:`d(s,t)` like the ``centroid``
method. When two clusters :math:`s` and :math:`t` are combined
into a new cluster :math:`u`, the average of centroids s and t
give the new centroid :math:`u`. This is also known as the
WPGMC algorithm.
* method='ward' uses the Ward variance minimization algorithm.
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
Warning: When the minimum distance pair in the forest is chosen, there
may be two or more pairs with the same minimum distance. This
implementation may chose a different minimum than the MATLAB
version.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed distance matrix
is a flat array containing the upper triangular of the distance matrix.
This is the form that ``pdist`` returns. Alternatively, a collection of
:math:`m` observation vectors in n dimensions may be passed as an
:math:`m` by :math:`n` array.
method : str, optional
The linkage algorithm to use. See the ``Linkage Methods`` section below
for full descriptions.
metric : str, optional
The distance metric to use. See the ``distance.pdist`` function for a
list of valid distance metrics.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
"""
if not isinstance(method, string_types):
raise TypeError("Argument 'method' must be a string.")
y = _convert_to_double(np.asarray(y, order='c'))
s = y.shape
if len(s) == 1:
distance.is_valid_y(y, throw=True, name='y')
d = distance.num_obs_y(y)
if method not in _cpy_non_euclid_methods:
raise ValueError("Valid methods when the raw observations are "
"omitted are 'single', 'complete', 'weighted', "
"and 'average'.")
# Since the C code does not support striding using strides.
[y] = _copy_arrays_if_base_present([y])
Z = np.zeros((d - 1, 4))
if method == 'single':
_hierarchy.slink(y, Z, int(d))
else:
_hierarchy.linkage(y, Z, int(d),
int(_cpy_non_euclid_methods[method]))
elif len(s) == 2:
X = y
n = s[0]
if method not in _cpy_linkage_methods:
raise ValueError('Invalid method: %s' % method)
if method in _cpy_non_euclid_methods:
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
if method == 'single':
_hierarchy.slink(dm, Z, n)
else:
_hierarchy.linkage(dm, Z, n,
int(_cpy_non_euclid_methods[method]))
elif method in _cpy_euclid_methods:
if metric != 'euclidean':
raise ValueError(("Method '%s' requires the distance metric "
"to be euclidean") % method)
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
_hierarchy.linkage(dm, Z, n,
int(_cpy_euclid_methods[method]))
return Z
class ClusterNode:
"""
A tree node class for representing a cluster.
Leaf nodes correspond to original observations, while non-leaf nodes
correspond to non-singleton clusters.
The to_tree function converts a matrix returned by the linkage
function into an easy-to-use tree representation.
See Also
--------
to_tree : for converting a linkage matrix ``Z`` into a tree object.
"""
def __init__(self, id, left=None, right=None, dist=0, count=1):
if id < 0:
raise ValueError('The id must be non-negative.')
if dist < 0:
raise ValueError('The distance must be non-negative.')
if (left is None and right is not None) or \
(left is not None and right is None):
raise ValueError('Only full or proper binary trees are permitted.'
' This node has one child.')
if count < 1:
raise ValueError('A cluster must contain at least one original '
'observation.')
self.id = id
self.left = left
self.right = right
self.dist = dist
if self.left is None:
self.count = count
else:
self.count = left.count + right.count
def get_id(self):
"""
The identifier of the target node.
For ``0 <= i < n``, `i` corresponds to original observation i.
For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed
at iteration ``i-n``.
Returns
-------
id : int
The identifier of the target node.
"""
return self.id
def get_count(self):
"""
The number of leaf nodes (original observations) belonging to
the cluster node nd. If the target node is a leaf, 1 is
returned.
Returns
-------
get_count : int
The number of leaf nodes below the target node.
"""
return self.count
def get_left(self):
"""
Return a reference to the left child tree object.
Returns
-------
left : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.left
def get_right(self):
"""
Returns a reference to the right child tree object.
Returns
-------
right : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.right
def is_leaf(self):
"""
Returns True if the target node is a leaf.
Returns
-------
leafness : bool
True if the target node is a leaf node.
"""
return self.left is None
def pre_order(self, func=(lambda x: x.id)):
"""
Performs pre-order traversal without recursive function calls.
When a leaf node is first encountered, ``func`` is called with
the leaf node as its argument, and its result is appended to
the list.
For example, the statement::
ids = root.pre_order(lambda x: x.id)
returns a list of the node ids corresponding to the leaf nodes
of the tree as they appear from left to right.
Parameters
----------
func : function
Applied to each leaf ClusterNode object in the pre-order traversal.
Given the i'th leaf node in the pre-ordeR traversal ``n[i]``, the
result of func(n[i]) is stored in L[i]. If not provided, the index
of the original observation to which the node corresponds is used.
Returns
-------
L : list
The pre-order traversal.
"""
# Do a preorder traversal, caching the result. To avoid having to do
# recursion, we'll store the previous index we've visited in a vector.
n = self.count
curNode = [None] * (2 * n)
lvisited = set()
rvisited = set()
curNode[0] = self
k = 0
preorder = []
while k >= 0:
nd = curNode[k]
ndid = nd.id
if nd.is_leaf():
preorder.append(func(nd))
k = k - 1
else:
if ndid not in lvisited:
curNode[k + 1] = nd.left
lvisited.add(ndid)
k = k + 1
elif ndid not in rvisited:
curNode[k + 1] = nd.right
rvisited.add(ndid)
k = k + 1
# If we've visited the left and right of this non-leaf
# node already, go up in the tree.
else:
k = k - 1
return preorder
_cnode_bare = ClusterNode(0)
_cnode_type = type(ClusterNode)
def to_tree(Z, rd=False):
"""
Converts a hierarchical clustering encoded in the matrix ``Z`` (by
linkage) into an easy-to-use tree object.
The reference r to the root ClusterNode object is returned.
Each ClusterNode object has a left, right, dist, id, and count
attribute. The left and right attributes point to ClusterNode objects
that were combined to generate the cluster. If both are None then
the ClusterNode object is a leaf node, its count must be 1, and its
distance is meaningless but set to 0.
Note: This function is provided for the convenience of the library
user. ClusterNodes are not used as input to any of the functions in this
library.
Parameters
----------
Z : ndarray
The linkage matrix in proper form (see the ``linkage``
function documentation).
rd : bool, optional
When False, a reference to the root ClusterNode object is
returned. Otherwise, a tuple (r,d) is returned. ``r`` is a
reference to the root node while ``d`` is a dictionary
mapping cluster ids to ClusterNode references. If a cluster id is
less than n, then it corresponds to a singleton cluster
(leaf node). See ``linkage`` for more information on the
assignment of cluster ids to clusters.
Returns
-------
L : list
The pre-order traversal.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# The number of original objects is equal to the number of rows minus
# 1.
n = Z.shape[0] + 1
# Create a list full of None's to store the node objects
d = [None] * (n * 2 - 1)
# Create the nodes corresponding to the n original objects.
for i in xrange(0, n):
d[i] = ClusterNode(i)
nd = None
for i in xrange(0, n - 1):
fi = int(Z[i, 0])
fj = int(Z[i, 1])
if fi > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 0') % fi)
if fj > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 1') % fj)
nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2])
# ^ id ^ left ^ right ^ dist
if Z[i, 3] != nd.count:
raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '
'incorrect.') % i)
d[n + i] = nd
if rd:
return (nd, d)
else:
return nd
def _convert_to_bool(X):
if X.dtype != np.bool:
X = X.astype(np.bool)
if not X.flags.contiguous:
X = X.copy()
return X
def _convert_to_double(X):
if X.dtype != np.double:
X = X.astype(np.double)
if not X.flags.contiguous:
X = X.copy()
return X
def cophenet(Z, Y=None):
"""
Calculates the cophenetic distances between each observation in
the hierarchical clustering defined by the linkage ``Z``.
Suppose ``p`` and ``q`` are original observations in
disjoint clusters ``s`` and ``t``, respectively and
``s`` and ``t`` are joined by a direct parent cluster
``u``. The cophenetic distance between observations
``i`` and ``j`` is simply the distance between
clusters ``s`` and ``t``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as an array
(see ``linkage`` function).
Y : ndarray (optional)
Calculates the cophenetic correlation coefficient ``c`` of a
hierarchical clustering defined by the linkage matrix `Z`
of a set of :math:`n` observations in :math:`m`
dimensions. `Y` is the condensed distance matrix from which
`Z` was generated.
Returns
-------
c : ndarray
The cophentic correlation distance (if ``y`` is passed).
d : ndarray
The cophenetic distance matrix in condensed form. The
:math:`ij` th entry is the cophenetic distance between
original observations :math:`i` and :math:`j`.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
zz = np.zeros((n * (n - 1)) // 2, dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
Z = _convert_to_double(Z)
_hierarchy.cophenetic_distances(Z, zz, int(n))
if Y is None:
return zz
Y = np.asarray(Y, order='c')
distance.is_valid_y(Y, throw=True, name='Y')
z = zz.mean()
y = Y.mean()
Yy = Y - y
Zz = zz - z
numerator = (Yy * Zz)
denomA = Yy ** 2
denomB = Zz ** 2
c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum()))
return (c, zz)
def inconsistent(Z, d=2):
"""
Calculates inconsistency statistics on a linkage.
Note: This function behaves similarly to the MATLAB(TM)
inconsistent function.
Parameters
----------
Z : ndarray
The :math:`(n-1)` by 4 matrix encoding the linkage
(hierarchical clustering). See ``linkage`` documentation
for more information on its form.
d : int, optional
The number of links up to `d` levels below each
non-singleton cluster.
Returns
-------
R : ndarray
A :math:`(n-1)` by 5 matrix where the ``i``'th row
contains the link statistics for the non-singleton cluster
``i``. The link statistics are computed over the link
heights for links :math:`d` levels below the cluster
``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard
deviation of the link heights, respectively; ``R[i,2]`` is
the number of links included in the calculation; and
``R[i,3]`` is the inconsistency coefficient,
.. math:: \\frac{\\mathtt{Z[i,2]}-\\mathtt{R[i,0]}} {R[i,1]}
"""
Z = np.asarray(Z, order='c')
Zs = Z.shape
is_valid_linkage(Z, throw=True, name='Z')
if (not d == np.floor(d)) or d < 0:
raise ValueError('The second argument d must be a nonnegative '
'integer value.')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
n = Zs[0] + 1
R = np.zeros((n - 1, 4), dtype=np.double)
_hierarchy.inconsistent(Z, R, int(n), int(d))
return R
def from_mlab_linkage(Z):
"""
Converts a linkage matrix generated by MATLAB(TM) to a new
linkage matrix compatible with this module.
The conversion does two things:
* the indices are converted from ``1..N`` to ``0..(N-1)`` form,
and
* a fourth column Z[:,3] is added where Z[i,3] is represents the
number of original observations (leaves) in the non-singleton
cluster i.
This function is useful when loading in linkages from legacy data
files generated by MATLAB.
Parameters
----------
Z : ndarray
A linkage matrix generated by MATLAB(TM).
Returns
-------
ZS : ndarray
A linkage matrix compatible with this library.
"""
Z = np.asarray(Z, dtype=np.double, order='c')
Zs = Z.shape
# If it's empty, return it.
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
if len(Zs) != 2:
raise ValueError("The linkage array must be rectangular.")
# If it contains no rows, return it.
if Zs[0] == 0:
return Z.copy()
Zpart = Z.copy()
if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]:
raise ValueError('The format of the indices is not 1..N')
Zpart[:, 0:2] -= 1.0
CS = np.zeros((Zs[0],), dtype=np.double)
_hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1)
return np.hstack([Zpart, CS.reshape(Zs[0], 1)])
def to_mlab_linkage(Z):
"""
Converts a linkage matrix to a MATLAB(TM) compatible one.
Converts a linkage matrix ``Z`` generated by the linkage function
of this module to a MATLAB(TM) compatible one. The return linkage
matrix has the last column removed and the cluster indices are
converted to ``1..N`` indexing.
Parameters
----------
Z : ndarray
A linkage matrix generated by this library.
Returns
-------
to_mlab_linkage : ndarray
A linkage matrix compatible with MATLAB(TM)'s hierarchical
clustering functions.
The return linkage matrix has the last column removed
and the cluster indices are converted to ``1..N`` indexing.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
Zs = Z.shape
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
is_valid_linkage(Z, throw=True, name='Z')
ZP = Z[:, 0:3].copy()
ZP[:, 0:2] += 1.0
return ZP
def is_monotonic(Z):
"""
Returns True if the linkage passed is monotonic.
The linkage is monotonic if for every cluster :math:`s` and :math:`t`
joined, the distance between them is no less than the distance
between any previously joined clusters.
Parameters
----------
Z : ndarray
The linkage matrix to check for monotonicity.
Returns
-------
b : bool
A boolean indicating whether the linkage is monotonic.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# We expect the i'th value to be greater than its successor.
return (Z[1:, 2] >= Z[:-1, 2]).all()
def is_valid_im(R, warning=False, throw=False, name=None):
"""Returns True if the inconsistency matrix passed is valid.
It must be a :math:`n` by 4 numpy array of doubles. The standard
deviations ``R[:,1]`` must be nonnegative. The link counts
``R[:,2]`` must be positive and no greater than :math:`n-1`.
Parameters
----------
R : ndarray
The inconsistency matrix to check for validity.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
R = np.asarray(R, order='c')
valid = True
try:
if type(R) != np.ndarray:
if name:
raise TypeError(('Variable \'%s\' passed as inconsistency '
'matrix is not a numpy array.') % name)
else:
raise TypeError('Variable passed as inconsistency matrix '
'is not a numpy array.')
if R.dtype != np.double:
if name:
raise TypeError(('Inconsistency matrix \'%s\' must contain '
'doubles (double).') % name)
else:
raise TypeError('Inconsistency matrix must contain doubles '
'(double).')
if len(R.shape) != 2:
if name:
raise ValueError(('Inconsistency matrix \'%s\' must have '
'shape=2 (i.e. be two-dimensional).') % name)
else:
raise ValueError('Inconsistency matrix must have shape=2 '
'(i.e. be two-dimensional).')
if R.shape[1] != 4:
if name:
raise ValueError(('Inconsistency matrix \'%s\' must have 4 '
'columns.') % name)
else:
raise ValueError('Inconsistency matrix must have 4 columns.')
if R.shape[0] < 1:
if name:
raise ValueError(('Inconsistency matrix \'%s\' must have at '
'least one row.') % name)
else:
raise ValueError('Inconsistency matrix must have at least '
'one row.')
if (R[:, 0] < 0).any():
if name:
raise ValueError(('Inconsistency matrix \'%s\' contains '
'negative link height means.') % name)
else:
raise ValueError('Inconsistency matrix contains negative '
'link height means.')
if (R[:, 1] < 0).any():
if name:
raise ValueError(('Inconsistency matrix \'%s\' contains '
'negative link height standard '
'deviations.') % name)
else:
raise ValueError('Inconsistency matrix contains negative '
'link height standard deviations.')
if (R[:, 2] < 0).any():
if name:
raise ValueError(('Inconsistency matrix \'%s\' contains '
'negative link counts.') % name)
else:
raise ValueError('Inconsistency matrix contains negative '
'link counts.')
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def is_valid_linkage(Z, warning=False, throw=False, name=None):
"""
Checks the validity of a linkage matrix.
A linkage matrix is valid if it is a two dimensional
ndarray (type double) with :math:`n`
rows and 4 columns. The first two columns must contain indices
between 0 and :math:`2n-1`. For a given row ``i``,
:math:`0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1`
and :math:`0 \\leq Z[i,1] \\leq i+n-1`
(i.e. a cluster cannot join another cluster unless the cluster
being joined has been generated.)
Parameters
----------
Z : array_like
Linkage matrix.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True iff the inconsistency matrix is valid.
"""
Z = np.asarray(Z, order='c')
valid = True
try:
if type(Z) != np.ndarray:
if name:
raise TypeError(('\'%s\' passed as a linkage is not a valid '
'array.') % name)
else:
raise TypeError('Variable is not a valid array.')
if Z.dtype != np.double:
if name:
raise TypeError('Linkage matrix \'%s\' must contain doubles.'
% name)
else:
raise TypeError('Linkage matrix must contain doubles.')
if len(Z.shape) != 2:
if name:
raise ValueError(('Linkage matrix \'%s\' must have shape=2 '
'(i.e. be two-dimensional).') % name)
else:
raise ValueError('Linkage matrix must have shape=2 '
'(i.e. be two-dimensional).')
if Z.shape[1] != 4:
if name:
raise ValueError('Linkage matrix \'%s\' must have 4 columns.'
% name)
else:
raise ValueError('Linkage matrix must have 4 columns.')
if Z.shape[0] == 0:
raise ValueError('Linkage must be computed on at least two '
'observations.')
n = Z.shape[0]
if n > 1:
if ((Z[:, 0] < 0).any() or (Z[:, 1] < 0).any()):
if name:
raise ValueError(('Linkage \'%s\' contains negative '
'indices.') % name)
else:
raise ValueError('Linkage contains negative indices.')
if (Z[:, 2] < 0).any():
if name:
raise ValueError(('Linkage \'%s\' contains negative '
'distances.') % name)
else:
raise ValueError('Linkage contains negative distances.')
if (Z[:, 3] < 0).any():
if name:
raise ValueError('Linkage \'%s\' contains negative counts.'
% name)
else:
raise ValueError('Linkage contains negative counts.')
if _check_hierarchy_uses_cluster_before_formed(Z):
if name:
raise ValueError(('Linkage \'%s\' uses non-singleton cluster '
'before its formed.') % name)
else:
raise ValueError("Linkage uses non-singleton cluster before "
"it's formed.")
if _check_hierarchy_uses_cluster_more_than_once(Z):
if name:
raise ValueError(('Linkage \'%s\' uses the same cluster more '
'than once.') % name)
else:
raise ValueError('Linkage uses the same cluster more than '
'once.')
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def _check_hierarchy_uses_cluster_before_formed(Z):
n = Z.shape[0] + 1
for i in xrange(0, n - 1):
if Z[i, 0] >= n + i or Z[i, 1] >= n + i:
return True
return False
def _check_hierarchy_uses_cluster_more_than_once(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]:
return True
chosen.add(Z[i, 0])
chosen.add(Z[i, 1])
return False
def _check_hierarchy_not_all_clusters_used(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
chosen.add(int(Z[i, 0]))
chosen.add(int(Z[i, 1]))
must_chosen = set(range(0, 2 * n - 2))
return len(must_chosen.difference(chosen)) > 0
def num_obs_linkage(Z):
"""
Returns the number of original observations of the linkage matrix
passed.
Parameters
----------
Z : ndarray
The linkage matrix on which to perform the operation.
Returns
-------
n : int
The number of original observations in the linkage.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
return (Z.shape[0] + 1)
def correspond(Z, Y):
"""
Checks for correspondence between linkage and condensed distance matrices
They must have the same number of original observations for
the check to succeed.
This function is useful as a sanity check in algorithms that make
extensive use of linkage and distance matrices that must
correspond to the same set of original observations.
Parameters
----------
Z : array_like
The linkage matrix to check for correspondence.
Y : array_like
The condensed distance matrix to check for correspondence.
Returns
-------
b : bool
A boolean indicating whether the linkage matrix and distance
matrix could possibly correspond to one another.
"""
is_valid_linkage(Z, throw=True)
distance.is_valid_y(Y, throw=True)
Z = np.asarray(Z, order='c')
Y = np.asarray(Y, order='c')
return distance.num_obs_y(Y) == num_obs_linkage(Z)
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
"""
Forms flat clusters from the hierarchical clustering defined by
the linkage matrix ``Z``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded with the matrix returned
by the `linkage` function.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
The criterion to use in forming flat clusters. This can
be any of the following values:
``inconsistent`` : If a cluster node and all its
descendants have an inconsistent value less than or equal
to `t` then all its leaf descendants belong to the
same flat cluster. When no non-singleton cluster meets
this criterion, every node is assigned to its own
cluster. (Default)
``distance`` : Forms flat clusters so that the original
observations in each flat cluster have no greater a
cophenetic distance than `t`.
``maxclust`` : Finds a minimum threshold ``r`` so that
the cophenetic distance between any two original
observations in the same flat cluster is no more than
``r`` and no more than `t` flat clusters are formed.
``monocrit`` : Forms a flat cluster from a cluster node c
with index i when ``monocrit[j] <= t``.
For example, to threshold on the maximum mean distance
as computed in the inconsistency matrix R with a
threshold of 0.8 do:
MR = maxRstat(Z, R, 3)
cluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
``maxclust_monocrit`` : Forms a flat cluster from a
non-singleton cluster node ``c`` when ``monocrit[i] <=
r`` for all cluster indices ``i`` below and including
``c``. ``r`` is minimized such that no more than ``t``
flat clusters are formed. monocrit must be
monotonic. For example, to minimize the threshold t on
maximum inconsistency values so that no more than 3 flat
clusters are formed, do:
MI = maxinconsts(Z, R)
cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
depth : int, optional
The maximum depth to perform the inconsistency calculation.
It has no meaning for the other criteria. Default is 2.
R : ndarray, optional
The inconsistency matrix to use for the 'inconsistent'
criterion. This matrix is computed if not provided.
monocrit : ndarray, optional
An array of length n-1. `monocrit[i]` is the
statistics upon which non-singleton i is thresholded. The
monocrit vector must be monotonic, i.e. given a node c with
index i, for all node indices j corresponding to nodes
below c, `monocrit[i] >= monocrit[j]`.
Returns
-------
fcluster : ndarray
An array of length n. T[i] is the flat cluster number to
which original observation i belongs.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
if criterion == 'inconsistent':
if R is None:
R = inconsistent(Z, depth)
else:
R = np.asarray(R, order='c')
is_valid_im(R, throw=True, name='R')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[R] = _copy_arrays_if_base_present([R])
_hierarchy.cluster_in(Z, R, T, float(t), int(n))
elif criterion == 'distance':
_hierarchy.cluster_dist(Z, T, float(t), int(n))
elif criterion == 'maxclust':
_hierarchy.cluster_maxclust_dist(Z, T, int(n), int(t))
elif criterion == 'monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n))
elif criterion == 'maxclust_monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t))
else:
raise ValueError('Invalid cluster formation criterion: %s'
% str(criterion))
return T
def fclusterdata(X, t, criterion='inconsistent',
metric='euclidean', depth=2, method='single', R=None):
"""
Cluster observation data using a given metric.
Clusters the original observations in the n-by-m data
matrix X (n observations in m dimensions), using the euclidean
distance metric to calculate distances between original observations,
performs hierarchical clustering using the single linkage algorithm,
and forms flat clusters using the inconsistency method with `t` as the
cut-off threshold.
A one-dimensional array T of length n is returned. T[i] is the index
of the flat cluster to which the original observation i belongs.
Parameters
----------
X : (N, M) ndarray
N by M data matrix with N observations in M dimensions.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
Specifies the criterion for forming flat clusters. Valid
values are 'inconsistent' (default), 'distance', or 'maxclust'
cluster formation algorithms. See `fcluster` for descriptions.
metric : str, optional
The distance metric for calculating pairwise distances. See
`distance.pdist` for descriptions and linkage to verify
compatibility with the linkage method.
depth : int, optional
The maximum depth for the inconsistency calculation. See
`inconsistent` for more information.
method : str, optional
The linkage method to use (single, complete, average,
weighted, median centroid, ward). See `linkage` for more
information. Default is "single".
R : ndarray, optional
The inconsistency matrix. It will be computed if necessary
if it is not passed.
Returns
-------
fclusterdata : ndarray
A vector of length n. T[i] is the flat cluster number to
which original observation i belongs.
Notes
-----
This function is similar to the MATLAB function clusterdata.
"""
X = np.asarray(X, order='c', dtype=np.double)
if type(X) != np.ndarray or len(X.shape) != 2:
raise TypeError('The observation matrix X must be an n by m numpy '
'array.')
Y = distance.pdist(X, metric=metric)
Z = linkage(Y, method=method)
if R is None:
R = inconsistent(Z, d=depth)
else:
R = np.asarray(R, order='c')
T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
return T
def leaves_list(Z):
"""
Returns a list of leaf node ids
The return corresponds to the observation vector index as it appears
in the tree from left to right. Z is a linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. `Z` is
a linkage matrix. See ``linkage`` for more information.
Returns
-------
leaves_list : ndarray
The list of leaf node ids.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
ML = np.zeros((n,), dtype='i')
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.prelist(Z, ML, int(n))
return ML
# Maps number of leaves to text size.
#
# p <= 20, size="12"
# 20 < p <= 30, size="10"
# 30 < p <= 50, size="8"
# 50 < p <= np.inf, size="6"
_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
_drotation = {20: 0, 40: 45, np.inf: 90}
_dtextsortedkeys = list(_dtextsizes.keys())
_dtextsortedkeys.sort()
_drotationsortedkeys = list(_drotation.keys())
_drotationsortedkeys.sort()
def _remove_dups(L):
"""
Removes duplicates AND preserves the original order of the elements.
The set class is not guaranteed to do this.
"""
seen_before = set([])
L2 = []
for i in L:
if i not in seen_before:
seen_before.add(i)
L2.append(i)
return L2
def _get_tick_text_size(p):
for k in _dtextsortedkeys:
if p <= k:
return _dtextsizes[k]
def _get_tick_rotation(p):
for k in _drotationsortedkeys:
if p <= k:
return _drotation[k]
def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=None,
leaf_rotation=None, contraction_marks=None,
ax=None, above_threshold_color='b'):
# Import matplotlib here so that it's not imported unless dendrograms
# are plotted. Raise an informative error if importing fails.
try:
# if an axis is provided, don't use pylab at all
if ax is None:
import matplotlib.pylab
import matplotlib.patches
import matplotlib.collections
except ImportError:
raise ImportError("You must install the matplotlib library to plot the dendrogram. Use no_plot=True to calculate the dendrogram without plotting.")
if ax is None:
ax = matplotlib.pylab.gca()
# if we're using pylab, we want to trigger a draw at the end
trigger_redraw = True
else:
trigger_redraw = False
# Independent variable plot width
ivw = len(ivl) * 10
# Depenendent variable plot height
dvw = mh + mh * 0.05
ivticks = np.arange(5, len(ivl) * 10 + 5, 10)
if orientation == 'top':
ax.set_ylim([0, dvw])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.set_xticklabels(ivl)
ax.xaxis.set_ticks_position('bottom')
lbls = ax.get_xticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
else:
leaf_rot = float(_get_tick_rotation(len(ivl)))
map(lambda lbl: lbl.set_rotation(leaf_rot), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
else:
leaf_fs = float(_get_tick_text_size(len(ivl)))
map(lambda lbl: lbl.set_rotation(leaf_fs), lbls)
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
elif orientation == 'bottom':
ax.set_ylim([dvw, 0])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.set_xticklabels(ivl)
lbls = ax.get_xticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
else:
leaf_rot = float(_get_tick_rotation(p))
map(lambda lbl: lbl.set_rotation(leaf_rot), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
else:
leaf_fs = float(_get_tick_text_size(p))
map(lambda lbl: lbl.set_rotation(leaf_fs), lbls)
ax.xaxis.set_ticks_position('top')
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
elif orientation == 'left':
ax.set_xlim([0, dvw])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.set_yticklabels(ivl)
lbls = ax.get_yticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
ax.yaxis.set_ticks_position('left')
# Make the tick marks invisible because they cover up the
# links
for line in ax.get_yticklines():
line.set_visible(False)
elif orientation == 'right':
ax.set_xlim([dvw, 0])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.set_yticklabels(ivl)
lbls = ax.get_yticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
ax.yaxis.set_ticks_position('right')
# Make the tick marks invisible because they cover up the links
for line in ax.get_yticklines():
line.set_visible(False)
# Let's use collections instead. This way there is a separate legend
# item for each tree grouping, rather than stupidly one for each line
# segment.
colors_used = _remove_dups(color_list)
color_to_lines = {}
for color in colors_used:
color_to_lines[color] = []
for (xline, yline, color) in zip(xlines, ylines, color_list):
color_to_lines[color].append(list(zip(xline, yline)))
colors_to_collections = {}
# Construct the collections.
for color in colors_used:
coll = matplotlib.collections.LineCollection(color_to_lines[color],
colors=(color,))
colors_to_collections[color] = coll
# Add all the groupings below the color threshold.
for color in colors_used:
if color != above_threshold_color:
ax.add_collection(colors_to_collections[color])
# If there is a grouping of links above the color threshold,
# it should go last.
if above_threshold_color in colors_to_collections:
ax.add_collection(colors_to_collections[above_threshold_color])
if contraction_marks is not None:
if orientation in ('left', 'right'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((y, x),
width=dvw / 100, height=1.0)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if orientation in ('top', 'bottom'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((x, y),
width=1.0, height=dvw / 100)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if trigger_redraw:
matplotlib.pylab.draw_if_interactive()
_link_line_colors = ['g', 'r', 'c', 'm', 'y', 'k']
def set_link_color_palette(palette):
"""
Set list of matplotlib color codes for dendrogram color_threshold.
Parameters
----------
palette : list
A list of matplotlib color codes. The order of
the color codes is the order in which the colors are cycled
through when color thresholding in the dendrogram.
"""
if type(palette) not in (list, tuple):
raise TypeError("palette must be a list or tuple")
_ptypes = [isinstance(p, string_types) for p in palette]
if False in _ptypes:
raise TypeError("all palette list elements must be color strings")
for i in list(_link_line_colors):
_link_line_colors.remove(i)
_link_line_colors.extend(list(palette))
def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
get_leaves=True, orientation='top', labels=None,
count_sort=False, distance_sort=False, show_leaf_counts=True,
no_plot=False, no_labels=False, color_list=None,
leaf_font_size=None, leaf_rotation=None, leaf_label_func=None,
no_leaves=False, show_contracted=False,
link_color_func=None, ax=None, above_threshold_color='b'):
"""
Plots the hierarchical clustering as a dendrogram.
The dendrogram illustrates how each cluster is
composed by drawing a U-shaped link between a non-singleton
cluster and its children. The height of the top of the U-link is
the distance between its children clusters. It is also the
cophenetic distance between original observations in the two
children clusters. It is expected that the distances in Z[:,2] be
monotonic, otherwise crossings appear in the dendrogram.
Parameters
----------
Z : ndarray
The linkage matrix encoding the hierarchical clustering to
render as a dendrogram. See the ``linkage`` function for more
information on the format of ``Z``.
p : int, optional
The ``p`` parameter for ``truncate_mode``.
truncate_mode : str, optional
The dendrogram can be hard to read when the original
observation matrix from which the linkage is derived is
large. Truncation is used to condense the dendrogram. There
are several modes:
``None/'none'``
No truncation is performed (Default).
``'lastp'``
The last ``p`` non-singleton formed in the linkage are the only
non-leaf nodes in the linkage; they correspond to rows
``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are
contracted into leaf nodes.
``'mlab'``
This corresponds to MATLAB(TM) behavior. (not implemented yet)
``'level'/'mtica'``
No more than ``p`` levels of the dendrogram tree are displayed.
This corresponds to Mathematica(TM) behavior.
color_threshold : double, optional
For brevity, let :math:`t` be the ``color_threshold``.
Colors all the descendent links below a cluster node
:math:`k` the same color if :math:`k` is the first node below
the cut threshold :math:`t`. All links connecting nodes with
distances greater than or equal to the threshold are colored
blue. If :math:`t` is less than or equal to zero, all nodes
are colored blue. If ``color_threshold`` is None or
'default', corresponding with MATLAB(TM) behavior, the
threshold is set to ``0.7*max(Z[:,2])``.
get_leaves : bool, optional
Includes a list ``R['leaves']=H`` in the result
dictionary. For each :math:`i`, ``H[i] == j``, cluster node
``j`` appears in position ``i`` in the left-to-right traversal
of the leaves, where :math:`j < 2n-1` and :math:`i < n`.
orientation : str, optional
The direction to plot the dendrogram, which can be any
of the following strings:
``'top'``
Plots the root at the top, and plot descendent links going downwards.
(default).
``'bottom'``
Plots the root at the bottom, and plot descendent links going
upwards.
``'left'``
Plots the root at the left, and plot descendent links going right.
``'right'``
Plots the root at the right, and plot descendent links going left.
labels : ndarray, optional
By default ``labels`` is None so the index of the original observation
is used to label the leaf nodes. Otherwise, this is an :math:`n`
-sized list (or tuple). The ``labels[i]`` value is the text to put
under the :math:`i` th leaf node only if it corresponds to an original
observation and not a non-singleton cluster.
count_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum number of original objects in its cluster
is plotted first.
``'descendent'``
The child with the maximum number of original objects in its cluster
is plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
distance_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum distance between its direct descendents is
plotted first.
``'descending'``
The child with the maximum distance between its direct descendents is
plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
show_leaf_counts : bool, optional
When True, leaf nodes representing :math:`k>1` original
observation are labeled with the number of observations they
contain in parentheses.
no_plot : bool, optional
When True, the final rendering is not performed. This is
useful if only the data structures computed for the rendering
are needed or if matplotlib is not available.
no_labels : bool, optional
When True, no labels appear next to the leaf nodes in the
rendering of the dendrogram.
leaf_rotation : double, optional
Specifies the angle (in degrees) to rotate the leaf
labels. When unspecified, the rotation is based on the number of
nodes in the dendrogram (default is 0).
leaf_font_size : int, optional
Specifies the font size (in points) of the leaf labels. When
unspecified, the size based on the number of nodes in the
dendrogram.
leaf_label_func : lambda or function, optional
When leaf_label_func is a callable function, for each
leaf with cluster index :math:`k < 2n-1`. The function
is expected to return a string with the label for the
leaf.
Indices :math:`k < n` correspond to original observations
while indices :math:`k \\geq n` correspond to non-singleton
clusters.
For example, to label singletons with their node id and
non-singletons with their id, count, and inconsistency
coefficient, simply do:
>>> # First define the leaf label function.
>>> def llf(id):
... if id < n:
... return str(id)
... else:
>>> return '[%d %d %1.2f]' % (id, count, R[n-id,3])
>>>
>>> # The text for the leaf nodes is going to be big so force
>>> # a rotation of 90 degrees.
>>> dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
show_contracted : bool, optional
When True the heights of non-singleton nodes contracted
into a leaf node are plotted as crosses along the link
connecting that leaf node. This really is only useful when
truncation is used (see ``truncate_mode`` parameter).
link_color_func : callable, optional
If given, `link_color_function` is called with each non-singleton id
corresponding to each U-shaped link it will paint. The function is
expected to return the color to paint the link, encoded as a matplotlib
color string code. For example:
>>> dendrogram(Z, link_color_func=lambda k: colors[k])
colors the direct links below each untruncated non-singleton node
``k`` using ``colors[k]``.
ax : matplotlib Axes instance, optional
If None and `no_plot` is not True, the dendrogram will be plotted
on the current axes. Otherwise if `no_plot` is not True the
dendrogram will be plotted on the given ``Axes`` instance. This can be
useful if the dendrogram is part of a more complex figure.
above_threshold_color : str, optional
This matplotlib color string sets the color of the links above the
color_threshold. The default is 'b'.
Returns
-------
R : dict
A dictionary of data structures computed to render the
dendrogram. Its has the following keys:
``'color_list'``
A list of color names. The k'th element represents the color of the
k'th link.
``'icoord'`` and ``'dcoord'``
Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]``
where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]``
where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is
``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``.
``'ivl'``
A list of labels corresponding to the leaf nodes.
``'leaves'``
For each i, ``H[i] == j``, cluster node ``j`` appears in position
``i`` in the left-to-right traversal of the leaves, where
:math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the
``i``-th leaf node corresponds to an original observation.
Otherwise, it corresponds to a non-singleton cluster.
"""
# Features under consideration.
#
# ... = dendrogram(..., leaves_order=None)
#
# Plots the leaves in the order specified by a vector of
# original observation indices. If the vector contains duplicates
# or results in a crossing, an exception will be thrown. Passing
# None orders leaf nodes based on the order they appear in the
# pre-order traversal.
Z = np.asarray(Z, order='c')
if orientation not in ["top", "left", "bottom", "right"]:
raise ValueError("orientation must be one of 'top', 'left', "
"'bottom', or 'right'")
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
if type(p) in (int, float):
p = int(p)
else:
raise TypeError('The second argument must be a number')
if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None):
raise ValueError('Invalid truncation mode.')
if truncate_mode == 'lastp' or truncate_mode == 'mlab':
if p > n or p == 0:
p = n
if truncate_mode == 'mtica' or truncate_mode == 'level':
if p <= 0:
p = np.inf
if get_leaves:
lvs = []
else:
lvs = None
icoord_list = []
dcoord_list = []
color_list = []
current_color = [0]
currently_below_threshold = [False]
if no_leaves:
ivl = None
else:
ivl = []
if color_threshold is None or \
(isinstance(color_threshold, string_types) and
color_threshold == 'default'):
color_threshold = max(Z[:, 2]) * 0.7
R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,
'leaves': lvs, 'color_list': color_list}
if show_contracted:
contraction_marks = []
else:
contraction_marks = None
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=2 * n - 2, iv=0.0, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
if not no_plot:
mh = max(Z[:, 2])
_plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,
no_labels, color_list,
leaf_font_size=leaf_font_size,
leaf_rotation=leaf_rotation,
contraction_marks=contraction_marks,
ax=ax,
above_threshold_color=above_threshold_color)
return R
def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
# If leaf node labels are to be displayed...
if ivl is not None:
# If a leaf_label_func has been provided, the label comes from the
# string returned from the leaf_label_func, which is a function
# passed to dendrogram.
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
# Otherwise, if the dendrogram caller has passed a labels list
# for the leaf nodes, use it.
if labels is not None:
ivl.append(labels[int(i - n)])
else:
# Otherwise, use the id as the label for the leaf.x
ivl.append(str(int(i)))
def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels, show_leaf_counts):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
if ivl is not None:
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
if show_leaf_counts:
ivl.append("(" + str(int(Z[i - n, 3])) + ")")
else:
ivl.append("")
def _append_contraction_marks(Z, iv, i, n, contraction_marks):
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks):
if i >= n:
contraction_marks.append((iv, Z[i - n, 2]))
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _dendrogram_calculate_info(Z, p, truncate_mode,
color_threshold=np.inf, get_leaves=True,
orientation='top', labels=None,
count_sort=False, distance_sort=False,
show_leaf_counts=False, i=-1, iv=0.0,
ivl=[], n=0, icoord_list=[], dcoord_list=[],
lvs=None, mhr=False,
current_color=[], color_list=[],
currently_below_threshold=[],
leaf_label_func=None, level=0,
contraction_marks=None,
link_color_func=None,
above_threshold_color='b'):
"""
Calculates the endpoints of the links as well as the labels for the
the dendrogram rooted at the node with index i. iv is the independent
variable value to plot the left-most leaf node below the root node i
(if orientation='top', this would be the left-most x value where the
plotting of this root node i and its descendents should begin).
ivl is a list to store the labels of the leaf nodes. The leaf_label_func
is called whenever ivl != None, labels == None, and
leaf_label_func != None. When ivl != None and labels != None, the
labels list is used only for labeling the leaf nodes. When
ivl == None, no labels are generated for leaf nodes.
When get_leaves==True, a list of leaves is built as they are visited
in the dendrogram.
Returns a tuple with l being the independent variable coordinate that
corresponds to the midpoint of cluster to the left of cluster i if
i is non-singleton, otherwise the independent coordinate of the leaf
node if i is a leaf node.
Returns
-------
A tuple (left, w, h, md), where:
* left is the independent variable coordinate of the center of the
the U of the subtree
* w is the amount of space used for the subtree (in independent
variable units)
* h is the height of the subtree in dependent variable units
* md is the max(Z[*,2]) for all nodes * below and including
the target node.
"""
if n == 0:
raise ValueError("Invalid singleton cluster count n.")
if i == -1:
raise ValueError("Invalid root cluster index i.")
if truncate_mode == 'lastp':
# If the node is a leaf node but corresponds to a non-single cluster,
# it's label is either the empty string or the number of original
# observations belonging to cluster i.
if i < 2 * n - p and i >= n:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mtica', 'level'):
if i > n and level > p:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mlab',):
pass
# Otherwise, only truncate if we have a leaf node.
#
# If the truncate_mode is mlab, the linkage has been modified
# with the truncated tree.
#
# Only place leaves if they correspond to original observations.
if i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
# !!! Otherwise, we don't have a leaf node, so work on plotting a
# non-leaf node.
# Actual indices of a and b
aa = int(Z[i - n, 0])
ab = int(Z[i - n, 1])
if aa > n:
# The number of singletons below cluster a
na = Z[aa - n, 3]
# The distance between a's two direct children.
da = Z[aa - n, 2]
else:
na = 1
da = 0.0
if ab > n:
nb = Z[ab - n, 3]
db = Z[ab - n, 2]
else:
nb = 1
db = 0.0
if count_sort == 'ascending' or count_sort == True:
# If a has a count greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if na > nb:
# The cluster index to draw to the left (ua) will be ab
# and the one to draw to the right (ub) will be aa
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif count_sort == 'descending':
# If a has a count less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if na > nb:
ua = aa
ub = ab
else:
ua = ab
ub = aa
elif distance_sort == 'ascending' or distance_sort == True:
# If a has a distance greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if da > db:
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif distance_sort == 'descending':
# If a has a distance less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if da > db:
ua = aa
ub = ab
else:
ua = ab
ub = aa
else:
ua = aa
ub = ab
# Updated iv variable and the amount of space used.
(uiva, uwa, uah, uamd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ua, iv=iv, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
h = Z[i - n, 2]
if h >= color_threshold or color_threshold <= 0:
c = above_threshold_color
if currently_below_threshold[0]:
current_color[0] = (current_color[0] + 1) % len(_link_line_colors)
currently_below_threshold[0] = False
else:
currently_below_threshold[0] = True
c = _link_line_colors[current_color[0]]
(uivb, uwb, ubh, ubmd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ub, iv=iv + uwa, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
max_dist = max(uamd, ubmd, h)
icoord_list.append([uiva, uiva, uivb, uivb])
dcoord_list.append([uah, h, h, ubh])
if link_color_func is not None:
v = link_color_func(int(i))
if not isinstance(v, string_types):
raise TypeError("link_color_func must return a matplotlib "
"color string!")
color_list.append(v)
else:
color_list.append(c)
return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)
def is_isomorphic(T1, T2):
"""
Determines if two different cluster assignments are equivalent.
Parameters
----------
T1 : array_like
An assignment of singleton cluster ids to flat cluster ids.
T2 : array_like
An assignment of singleton cluster ids to flat cluster ids.
Returns
-------
b : bool
Whether the flat cluster assignments `T1` and `T2` are
equivalent.
"""
T1 = np.asarray(T1, order='c')
T2 = np.asarray(T2, order='c')
if type(T1) != np.ndarray:
raise TypeError('T1 must be a numpy array.')
if type(T2) != np.ndarray:
raise TypeError('T2 must be a numpy array.')
T1S = T1.shape
T2S = T2.shape
if len(T1S) != 1:
raise ValueError('T1 must be one-dimensional.')
if len(T2S) != 1:
raise ValueError('T2 must be one-dimensional.')
if T1S[0] != T2S[0]:
raise ValueError('T1 and T2 must have the same number of elements.')
n = T1S[0]
d = {}
for i in xrange(0, n):
if T1[i] in d:
if d[T1[i]] != T2[i]:
return False
else:
d[T1[i]] = T2[i]
return True
def maxdists(Z):
"""
Returns the maximum distance between any non-singleton cluster.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
Returns
-------
maxdists : ndarray
A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents
the maximum distance between any cluster (including
singletons) below and including the node with index i. More
specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the
set of all node indices below and including node i.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
MD = np.zeros((n - 1,))
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n))
return MD
def maxinconsts(Z, R):
"""
Returns the maximum inconsistency coefficient for each
non-singleton cluster and its descendents.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : ndarray
The inconsistency matrix.
Returns
-------
MI : ndarray
A monotonic ``(n-1)``-sized numpy array of doubles.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
n = Z.shape[0] + 1
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
MI = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3)
return MI
def maxRstat(Z, R, i):
"""
Returns the maximum statistic for each non-singleton cluster and
its descendents.
Parameters
----------
Z : array_like
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : array_like
The inconsistency matrix.
i : int
The column of `R` to use as the statistic.
Returns
-------
MR : ndarray
Calculates the maximum statistic for the i'th column of the
inconsistency matrix `R` for each non-singleton cluster
node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where
``Q(j)`` the set of all node ids corresponding to nodes below
and including ``j``.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
if type(i) is not int:
raise TypeError('The third argument must be an integer.')
if i < 0 or i > 3:
raise ValueError('i must be an integer between 0 and 3 inclusive.')
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
n = Z.shape[0] + 1
MR = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)
return MR
def leaders(Z, T):
"""
Returns the root nodes in a hierarchical clustering.
Returns the root nodes in a hierarchical clustering corresponding
to a cut defined by a flat cluster assignment vector ``T``. See
the ``fcluster`` function for more information on the format of ``T``.
For each flat cluster :math:`j` of the :math:`k` flat clusters
represented in the n-sized flat cluster assignment vector ``T``,
this function finds the lowest cluster node :math:`i` in the linkage
tree Z such that:
* leaf descendents belong only to flat cluster j
(i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where
:math:`S(i)` is the set of leaf ids of leaf nodes descendent
with cluster node :math:`i`)
* there does not exist a leaf that is not descendent with
:math:`i` that also belongs to cluster :math:`j`
(i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If
this condition is violated, ``T`` is not a valid cluster
assignment vector, and an exception will be thrown.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
T : ndarray
The flat cluster assignment vector.
Returns
-------
L : ndarray
The leader linkage node id's stored as a k-element 1-D array
where ``k`` is the number of flat clusters found in ``T``.
``L[j]=i`` is the linkage cluster node id that is the
leader of flat cluster with id M[j]. If ``i < n``, ``i``
corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with
id 8's leader is linkage node 2.
M : ndarray
The leader linkage node id's stored as a k-element 1-D array where
``k`` is the number of flat clusters found in ``T``. This allows the
set of flat cluster ids to be any arbitrary set of ``k`` integers.
"""
Z = np.asarray(Z, order='c')
T = np.asarray(T, order='c')
if type(T) != np.ndarray or T.dtype != 'i':
raise TypeError('T must be a one-dimensional numpy array of integers.')
is_valid_linkage(Z, throw=True, name='Z')
if len(T) != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
Cl = np.unique(T)
kk = len(Cl)
L = np.zeros((kk,), dtype='i')
M = np.zeros((kk,), dtype='i')
n = Z.shape[0] + 1
[Z, T] = _copy_arrays_if_base_present([Z, T])
s = _hierarchy.leaders(Z, T, L, M, int(kk), int(n))
if s >= 0:
raise ValueError(('T is not a valid assignment vector. Error found '
'when examining linkage node %d (< 2n-1).') % s)
return (L, M)
# These are test functions to help me test the leaders function.
def _leaders_test(Z, T):
tr = to_tree(Z)
_leaders_test_recurs_mark(tr, T)
return tr
def _leader_identify(tr, T):
if tr.is_leaf():
return T[tr.id]
else:
left = tr.get_left()
right = tr.get_right()
lfid = _leader_identify(left, T)
rfid = _leader_identify(right, T)
print('ndid: %d lid: %d lfid: %d rid: %d rfid: %d'
% (tr.get_id(), left.get_id(), lfid, right.get_id(), rfid))
if lfid != rfid:
if lfid != -1:
print('leader: %d with tag %d' % (left.id, lfid))
if rfid != -1:
print('leader: %d with tag %d' % (right.id, rfid))
return -1
else:
return lfid
def _leaders_test_recurs_mark(tr, T):
if tr.is_leaf():
tr.asgn = T[tr.id]
else:
tr.asgn = -1
_leaders_test_recurs_mark(tr.left, T)
_leaders_test_recurs_mark(tr.right, T)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.