repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
CPqD/RouteFlow | pox/pox/lib/packet/eapol.py | 27 | 3312 | # Copyright 2011 James McCauley
# Copyright 2008 (C) Nicira, Inc.
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
# This file is derived from the packet library in NOX, which was
# developed by Nicira, Inc.
#======================================================================
#
# EAPOL Header Format (see IEEE 802.1X-2004):
#
# Octet 0: Protocol version (1 or 2).
# Octet 1: Packet type:
# 0 = EAP packet
# 1 = EAPOL-Start
# 2 = EAPOL-Logoff
# 3 = EAPOL-Key
# 4 = EAPOL-Encapsulated-ASF-Alert
# Octets 2-3: Length of packet body field (0 if packet body is absent)
# Octets 4-end: Packet body (present only for packet types 0, 3, 4)
#
#======================================================================
import struct
from packet_utils import *
from packet_base import packet_base
from eap import *
class eapol(packet_base):
"EAP over LAN packet"
MIN_LEN = 4
V1_PROTO = 1
V2_PROTO = 2
EAP_TYPE = 0
EAPOL_START_TYPE = 1
EAPOL_LOGOFF_TYPE = 2
EAPOL_KEY_TYPE = 3
EAPOL_ENCAPSULATED_ASF_ALERT = 4
type_names = {EAP_TYPE: "EAP",
EAPOL_START_TYPE: "EAPOL-Start",
EAPOL_LOGOFF_TYPE: "EAPOL-Logoff",
EAPOL_KEY_TYPE: "EAPOL-Key",
EAPOL_ENCAPSULATED_ASF_ALERT: "EAPOL-Encapsulated-ASF-Alert"}
@staticmethod
def type_name(type):
return eapol.type_names.get(type, "type%d" % type)
def __init__(self, raw=None, prev=None, **kw):
packet_base.__init__(self)
self.prev = prev
self.version = self.V1_PROTO
self.type = self.EAP_TYPE
self.bodylen = 0
if raw is not None:
self.parse(raw)
self._init(kw)
def __str__(self):
s = '[EAPOL v%d %s]' % (self.version, self.type_name(self.type))
return s
def parse(self, raw):
assert isinstance(raw, bytes)
self.raw = raw
dlen = len(raw)
if dlen < self.MIN_LEN:
self.msg('(eapol parse) warning EAPOL packet data too short to parse header: data len %u' % (dlen,))
return
(self.version, self.type, self.bodylen) \
= struct.unpack('!BBH', raw[:self.MIN_LEN])
self.parsed = True
if self.type == self.EAP_TYPE:
self.next = eap(raw=raw[self.MIN_LEN:], prev=self)
elif (self.type == self.EAPOL_START_TYPE
or self.type == self.EAPOL_LOGOFF_TYPE):
pass # These types have no payloads.
else:
self.msg('warning unsupported EAPOL type: %s' % (self.type_name(self.type),))
def hdr(self, payload):
return struct.pack('!BBH', self.version, self.type, self.bodylen)
| apache-2.0 |
dblia/nosql-ganeti | lib/network.py | 3 | 7510 | #
#
# Copyright (C) 2011, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""IP address pool management functions.
"""
import ipaddr
from bitarray import bitarray
from ganeti import errors
def _ComputeIpv4NumHosts(network_size):
"""Derives the number of hosts in an IPv4 network from the size.
"""
return 2 ** (32 - network_size)
IPV4_NETWORK_MIN_SIZE = 30
# FIXME: This limit is for performance reasons. Remove when refactoring
# for performance tuning was successful.
IPV4_NETWORK_MAX_SIZE = 16
IPV4_NETWORK_MIN_NUM_HOSTS = _ComputeIpv4NumHosts(IPV4_NETWORK_MIN_SIZE)
IPV4_NETWORK_MAX_NUM_HOSTS = _ComputeIpv4NumHosts(IPV4_NETWORK_MAX_SIZE)
class AddressPool(object):
"""Address pool class, wrapping an C{objects.Network} object.
This class provides methods to manipulate address pools, backed by
L{objects.Network} objects.
"""
FREE = bitarray("0")
RESERVED = bitarray("1")
def __init__(self, network):
"""Initialize a new IPv4 address pool from an L{objects.Network} object.
@type network: L{objects.Network}
@param network: the network object from which the pool will be generated
"""
self.network = None
self.gateway = None
self.network6 = None
self.gateway6 = None
self.net = network
self.network = ipaddr.IPNetwork(self.net.network)
if self.network.numhosts > IPV4_NETWORK_MAX_NUM_HOSTS:
raise errors.AddressPoolError("A big network with %s host(s) is currently"
" not supported. please specify at most a"
" /%s network" %
(str(self.network.numhosts),
IPV4_NETWORK_MAX_SIZE))
if self.network.numhosts < IPV4_NETWORK_MIN_NUM_HOSTS:
raise errors.AddressPoolError("A network with only %s host(s) is too"
" small, please specify at least a /%s"
" network" %
(str(self.network.numhosts),
IPV4_NETWORK_MIN_SIZE))
if self.net.gateway:
self.gateway = ipaddr.IPAddress(self.net.gateway)
if self.net.network6:
self.network6 = ipaddr.IPv6Network(self.net.network6)
if self.net.gateway6:
self.gateway6 = ipaddr.IPv6Address(self.net.gateway6)
if self.net.reservations:
self.reservations = bitarray(self.net.reservations)
else:
self.reservations = bitarray(self.network.numhosts)
# pylint: disable=E1103
self.reservations.setall(False)
if self.net.ext_reservations:
self.ext_reservations = bitarray(self.net.ext_reservations)
else:
self.ext_reservations = bitarray(self.network.numhosts)
# pylint: disable=E1103
self.ext_reservations.setall(False)
assert len(self.reservations) == self.network.numhosts
assert len(self.ext_reservations) == self.network.numhosts
def Contains(self, address):
if address is None:
return False
addr = ipaddr.IPAddress(address)
return addr in self.network
def _GetAddrIndex(self, address):
addr = ipaddr.IPAddress(address)
if not addr in self.network:
raise errors.AddressPoolError("%s does not contain %s" %
(self.network, addr))
return int(addr) - int(self.network.network)
def Update(self):
"""Write address pools back to the network object.
"""
# pylint: disable=E1103
self.net.ext_reservations = self.ext_reservations.to01()
self.net.reservations = self.reservations.to01()
def _Mark(self, address, value=True, external=False):
idx = self._GetAddrIndex(address)
if external:
self.ext_reservations[idx] = value
else:
self.reservations[idx] = value
self.Update()
def _GetSize(self):
return 2 ** (32 - self.network.prefixlen)
@property
def all_reservations(self):
"""Return a combined map of internal and external reservations.
"""
return (self.reservations | self.ext_reservations)
def Validate(self):
assert len(self.reservations) == self._GetSize()
assert len(self.ext_reservations) == self._GetSize()
all_res = self.reservations & self.ext_reservations
assert not all_res.any()
if self.gateway is not None:
assert self.gateway in self.network
if self.network6 and self.gateway6:
assert self.gateway6 in self.network6
return True
def IsFull(self):
"""Check whether the network is full.
"""
return self.all_reservations.all()
def GetReservedCount(self):
"""Get the count of reserved addresses.
"""
return self.all_reservations.count(True)
def GetFreeCount(self):
"""Get the count of unused addresses.
"""
return self.all_reservations.count(False)
def GetMap(self):
"""Return a textual representation of the network's occupation status.
"""
return self.all_reservations.to01().replace("1", "X").replace("0", ".")
def IsReserved(self, address):
"""Checks if the given IP is reserved.
"""
idx = self._GetAddrIndex(address)
return self.all_reservations[idx]
def Reserve(self, address, external=False):
"""Mark an address as used.
"""
if self.IsReserved(address):
raise errors.AddressPoolError("%s is already reserved" % address)
self._Mark(address, external=external)
def Release(self, address, external=False):
"""Release a given address reservation.
"""
self._Mark(address, value=False, external=external)
def GetFreeAddress(self):
"""Returns the first available address.
"""
if self.IsFull():
raise errors.AddressPoolError("%s is full" % self.network)
idx = self.all_reservations.index(False)
address = str(self.network[idx])
self.Reserve(address)
return address
def GenerateFree(self):
"""Returns the first free address of the network.
@raise errors.AddressPoolError: Pool is full
"""
idx = self.all_reservations.search(self.FREE, 1)
if idx:
return str(self.network[idx[0]])
else:
raise errors.AddressPoolError("%s is full" % self.network)
def GetExternalReservations(self):
"""Returns a list of all externally reserved addresses.
"""
# pylint: disable=E1103
idxs = self.ext_reservations.search(self.RESERVED)
return [str(self.network[idx]) for idx in idxs]
@classmethod
def InitializeNetwork(cls, net):
"""Initialize an L{objects.Network} object.
Reserve the network, broadcast and gateway IP addresses.
"""
obj = cls(net)
obj.Update()
for ip in [obj.network[0], obj.network[-1]]:
obj.Reserve(ip, external=True)
if obj.net.gateway is not None:
obj.Reserve(obj.net.gateway, external=True)
obj.Validate()
return obj
| gpl-2.0 |
joshloyal/scikit-learn | examples/calibration/plot_compare_calibration.py | 82 | 5012 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probabilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilities to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilities closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
microelly2/Animation | Scaler.py | 1 | 2545 | import math,os
import FreeCAD, FreeCADGui, Animation, PySide
from Animation import say,sayErr,sayexc,sayd
from EditWidget import EditWidget
__vers__= '0.1'
__dir__ = os.path.dirname(__file__)
def createScaler(name='My_Scaler'):
obj = FreeCAD.ActiveDocument.addObject("App::DocumentObjectGroupPython",name)
obj.addProperty("App::PropertyInteger","start","Base","start").start=0
obj.addProperty("App::PropertyInteger","end","Base","")
obj.addProperty("App::PropertyInteger","duration","Base","")
obj.addProperty("App::PropertyFloat","xScale","Scale","Rotationsachse Zentrum relativ").xScale=0
obj.addProperty("App::PropertyFloat","xVa","Scale","Rotationsachse Zentrum relativ").xVa=1
obj.addProperty("App::PropertyFloat","xVe","Scale","Rotationsachse Zentrum relativ").xVe=2
obj.addProperty("App::PropertyFloat","yScale","Scale","Rotationsachse Zentrum relativ").yScale=0
obj.addProperty("App::PropertyFloat","yVa","Scale","Rotationsachse Zentrum relativ").yVa=1
obj.addProperty("App::PropertyFloat","yVe","Scale","Rotationsachse Zentrum relativ").yVe=2
obj.addProperty("App::PropertyFloat","zScale","Scale","Rotationsachse Zentrum relativ").zScale=1
obj.addProperty("App::PropertyFloat","zVa","Scale","Rotationsachse Zentrum relativ").zVa=1
obj.addProperty("App::PropertyFloat","zVe","Scale","Rotationsachse Zentrum relativ").zVe=2
obj.addProperty("App::PropertyLink","obj2","Object","rotating object ")
_Scaler(obj)
_ViewProviderScaler(obj.ViewObject)
return obj
class _Scaler(Animation._Actor):
def __init__(self,obj):
obj.Proxy = self
self.Type = "_Scaler"
self.obj2=obj
def execute(self,obj):
sayd("execute _Scaler")
if hasattr(obj,'obj2'):
#say(obj.obj2)
pass
obj.setEditorMode("end", 1) #ro
obj.end=obj.start+obj.duration
def step(self,now):
FreeCAD.yy=self
if now<=self.obj2.start or now>self.obj2.end:
pass
else:
relativ=1.00/(self.obj2.end-self.obj2.start)*(now-self.obj2.start)
sc=self.obj2.obj2.Scale
relativbase=self.obj2.xVe/self.obj2.xVa*relativ
if relativ==0:
nwx=self.obj2.xVa
nwy=self.obj2.yVa
nwz=self.obj2.zVa
else:
nwx=relativbase**self.obj2.xScale*self.obj2.xVe
nwy=relativbase**self.obj2.yScale*self.obj2.yVe
nwz=relativbase**self.obj2.zScale*self.obj2.zVe
newScale=(nwx,nwy,nwz)
self.obj2.obj2.Scale=newScale
FreeCAD.ActiveDocument.recompute()
FreeCADGui.Selection.clearSelection()
class _ViewProviderScaler(Animation._ViewProviderActor):
def getIcon(self):
return __dir__ + '/icons/scaler.png'
| gpl-2.0 |
pmclanahan/bedrock | bedrock/tabzilla/middleware.py | 29 | 1054 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.conf import settings
from bedrock.base.middleware import LocaleURLMiddleware
class TabzillaLocaleURLMiddleware(LocaleURLMiddleware):
def process_request(self, request):
resp = super(TabzillaLocaleURLMiddleware, self).process_request(request)
# no locale redirect happening
if resp is None:
return resp
is_enabled = not settings.TEMPLATE_DEBUG and settings.CDN_BASE_URL
is_interesting = 'tabzilla.js' in resp.get('location', '')
if is_enabled and is_interesting:
# CDN URL should be protocol relative, but that won't work
# in a Location header.
protocol = 'https:' if request.is_secure() else 'http:'
cdn_base = protocol + settings.CDN_BASE_URL
resp['location'] = cdn_base + resp['location']
return resp
| mpl-2.0 |
retr0h/ansible-modules-core | cloud/ec2.py | 6 | 42404 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2
short_description: create, terminate, start or stop an instance in ec2, return instanceid
description:
- Creates or terminates ec2 instances. When created optionally waits for it to be 'running'. This module has a dependency on python-boto >= 2.5
version_added: "0.9"
options:
key_name:
description:
- key pair to use on the instance
required: false
default: null
aliases: ['keypair']
id:
description:
- identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances. This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on. For details, see the description of client token at U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
required: false
default: null
aliases: []
group:
description:
- security group (or list of groups) to use with the instance
required: false
default: null
aliases: [ 'groups' ]
group_id:
version_added: "1.1"
description:
- security group id (or list of ids) to use with the instance
required: false
default: null
aliases: []
region:
version_added: "1.2"
description:
- The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
default: null
aliases: [ 'aws_region', 'ec2_region' ]
zone:
version_added: "1.2"
description:
- AWS availability zone in which to launch the instance
required: false
default: null
aliases: [ 'aws_zone', 'ec2_zone' ]
instance_type:
description:
- instance type to use for the instance
required: true
default: null
aliases: []
spot_price:
version_added: "1.5"
description:
- Maximum spot price to bid, If not set a regular on-demand instance is requested. A spot request is made with this maximum bid. When it is filled, the instance is started.
required: false
default: null
aliases: []
image:
description:
- I(emi) (or I(ami)) to use for the instance
required: true
default: null
aliases: []
kernel:
description:
- kernel I(eki) to use for the instance
required: false
default: null
aliases: []
ramdisk:
description:
- ramdisk I(eri) to use for the instance
required: false
default: null
aliases: []
wait:
description:
- wait for the instance to be in state 'running' before returning
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
aliases: []
spot_wait_timeout:
version_added: "1.5"
description:
- how long to wait for the spot instance request to be fulfilled
default: 600
aliases: []
count:
description:
- number of instances to launch
required: False
default: 1
aliases: []
monitoring:
version_added: "1.1"
description:
- enable detailed monitoring (CloudWatch) for instance
required: false
default: null
aliases: []
user_data:
version_added: "0.9"
description:
- opaque blob of data which is made available to the ec2 instance
required: false
default: null
aliases: []
instance_tags:
version_added: "1.0"
description:
- a hash/dictionary of tags to add to the new instance; '{"key":"value"}' and '{"key":"value","key":"value"}'
required: false
default: null
aliases: []
placement_group:
version_added: "1.3"
description:
- placement group for the instance when using EC2 Clustered Compute
required: false
default: null
aliases: []
vpc_subnet_id:
version_added: "1.1"
description:
- the subnet ID in which to launch the instance (VPC)
required: false
default: null
aliases: []
assign_public_ip:
version_added: "1.5"
description:
- when provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+
required: false
default: null
aliases: []
private_ip:
version_added: "1.2"
description:
- the private ip address to assign the instance (from the vpc subnet)
required: false
default: null
aliases: []
instance_profile_name:
version_added: "1.3"
description:
- Name of the IAM instance profile to use. Boto library must be 2.5.0+
required: false
default: null
aliases: []
instance_ids:
version_added: "1.3"
description:
- "list of instance ids, currently used for states: absent, running, stopped"
required: false
default: null
aliases: []
source_dest_check:
version_added: "1.6"
description:
- Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers)
required: false
default: true
state:
version_added: "1.3"
description:
- create or terminate instances
required: false
default: 'present'
aliases: []
choices: ['present', 'absent', 'running', 'stopped']
volumes:
version_added: "1.5"
description:
- a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume.
required: false
default: null
aliases: []
ebs_optimized:
version_added: "1.6"
description:
- whether instance is using optimized EBS volumes, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html)
required: false
default: false
exact_count:
version_added: "1.5"
description:
- An integer value which indicates how many instances that match the 'count_tag' parameter should be running. Instances are either created or terminated based on this value.
required: false
default: null
aliases: []
count_tag:
version_added: "1.5"
description:
- Used with 'exact_count' to determine how many nodes based on a specific tag criteria should be running. This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers that are tagged with "class=webserver".
required: false
default: null
aliases: []
author: Seth Vidal, Tim Gerla, Lester Wade
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic provisioning example
- local_action:
module: ec2
key_name: mykey
instance_type: c1.medium
image: emi-40603AD1
wait: yes
group: webserver
count: 3
# Advanced example with tagging and CloudWatch
- local_action:
module: ec2
key_name: mykey
group: databases
instance_type: m1.large
image: ami-6e649707
wait: yes
wait_timeout: 500
count: 5
instance_tags:
db: postgres
monitoring: yes
# Single instance with additional IOPS volume from snapshot and volume delete on termination
local_action:
module: ec2
key_name: mykey
group: webserver
instance_type: m1.large
image: ami-6e649707
wait: yes
wait_timeout: 500
volumes:
- device_name: /dev/sdb
snapshot: snap-abcdef12
device_type: io1
iops: 1000
volume_size: 100
delete_on_termination: true
monitoring: yes
# Multiple groups example
local_action:
module: ec2
key_name: mykey
group: ['databases', 'internal-services', 'sshable', 'and-so-forth']
instance_type: m1.large
image: ami-6e649707
wait: yes
wait_timeout: 500
count: 5
instance_tags:
db: postgres
monitoring: yes
# Multiple instances with additional volume from snapshot
local_action:
module: ec2
key_name: mykey
group: webserver
instance_type: m1.large
image: ami-6e649707
wait: yes
wait_timeout: 500
count: 5
volumes:
- device_name: /dev/sdb
snapshot: snap-abcdef12
volume_size: 10
monitoring: yes
# VPC example
- local_action:
module: ec2
key_name: mykey
group_id: sg-1dc53f72
instance_type: m1.small
image: ami-6e649707
wait: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Spot instance example
- local_action:
module: ec2
spot_price: 0.24
spot_wait_timeout: 600
keypair: mykey
group_id: sg-1dc53f72
instance_type: m1.small
image: ami-6e649707
wait: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Launch instances, runs some tasks
# and then terminate them
- name: Create a sandbox instance
hosts: localhost
gather_facts: False
vars:
key_name: my_keypair
instance_type: m1.small
security_group: my_securitygroup
image: my_ami_id
region: us-east-1
tasks:
- name: Launch instance
local_action: ec2 key_name={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image }} wait=true region={{ region }}
register: ec2
- name: Add new instance to host group
local_action: add_host hostname={{ item.public_ip }} groupname=launched
with_items: ec2.instances
- name: Wait for SSH to come up
local_action: wait_for host={{ item.public_dns_name }} port=22 delay=60 timeout=320 state=started
with_items: ec2.instances
- name: Configure instance(s)
hosts: launched
sudo: True
gather_facts: True
roles:
- my_awesome_role
- my_awesome_test
- name: Terminate instances
hosts: localhost
connection: local
tasks:
- name: Terminate instances that were previously launched
local_action:
module: ec2
state: 'absent'
instance_ids: '{{ ec2.instance_ids }}'
# Start a few existing instances, run some tasks
# and stop the instances
- name: Start sandbox instances
hosts: localhost
gather_facts: false
connection: local
vars:
instance_ids:
- 'i-xxxxxx'
- 'i-xxxxxx'
- 'i-xxxxxx'
region: us-east-1
tasks:
- name: Start the sandbox instances
local_action:
module: ec2
instance_ids: '{{ instance_ids }}'
region: '{{ region }}'
state: running
wait: True
role:
- do_neat_stuff
- do_more_neat_stuff
- name: Stop sandbox instances
hosts: localhost
gather_facts: false
connection: local
vars:
instance_ids:
- 'i-xxxxxx'
- 'i-xxxxxx'
- 'i-xxxxxx'
region: us-east-1
tasks:
- name: Stop the sanbox instances
local_action:
module: ec2
instance_ids: '{{ instance_ids }}'
region: '{{ region }}'
state: stopped
wait: True
#
# Enforce that 5 instances with a tag "foo" are running
#
- local_action:
module: ec2
key_name: mykey
instance_type: c1.medium
image: emi-40603AD1
wait: yes
group: webserver
instance_tags:
foo: bar
exact_count: 5
count_tag: foo
#
# Enforce that 5 running instances named "database" with a "dbtype" of "postgres"
#
- local_action:
module: ec2
key_name: mykey
instance_type: c1.medium
image: emi-40603AD1
wait: yes
group: webserver
instance_tags:
Name: database
dbtype: postgres
exact_count: 5
count_tag:
Name: database
dbtype: postgres
#
# count_tag complex argument examples
#
# instances with tag foo
count_tag:
foo:
# instances with tag foo=bar
count_tag:
foo: bar
# instances with tags foo=bar & baz
count_tag:
foo: bar
baz:
# instances with tags foo & bar & baz=bang
count_tag:
- foo
- bar
- baz: bang
'''
import sys
import time
from ast import literal_eval
try:
import boto.ec2
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
from boto.exception import EC2ResponseError
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def find_running_instances_by_count_tag(module, ec2, count_tag, zone=None):
# get reservations for instances that match tag(s) and are running
reservations = get_reservations(module, ec2, tags=count_tag, state="running", zone=zone)
instances = []
for res in reservations:
if hasattr(res, 'instances'):
for inst in res.instances:
instances.append(inst)
return reservations, instances
def _set_none_to_blank(dictionary):
result = dictionary
for k in result.iterkeys():
if type(result[k]) == dict:
result[k] = _set_non_to_blank(result[k])
elif not result[k]:
result[k] = ""
return result
def get_reservations(module, ec2, tags=None, state=None, zone=None):
# TODO: filters do not work with tags that have underscores
filters = dict()
if tags is not None:
if type(tags) is str:
try:
tags = literal_eval(tags)
except:
pass
# if string, we only care that a tag of that name exists
if type(tags) is str:
filters.update({"tag-key": tags})
# if list, append each item to filters
if type(tags) is list:
for x in tags:
if type(x) is dict:
x = _set_none_to_blank(x)
filters.update(dict(("tag:"+tn, tv) for (tn,tv) in x.iteritems()))
else:
filters.update({"tag-key": x})
# if dict, add the key and value to the filter
if type(tags) is dict:
tags = _set_none_to_blank(tags)
filters.update(dict(("tag:"+tn, tv) for (tn,tv) in tags.iteritems()))
if state:
# http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api
filters.update({'instance-state-name': state})
if zone:
filters.update({'availability-zone': zone})
results = ec2.get_all_instances(filters=filters)
return results
def get_instance_info(inst):
"""
Retrieves instance information from an instance
ID and returns it as a dictionary
"""
instance_info = {'id': inst.id,
'ami_launch_index': inst.ami_launch_index,
'private_ip': inst.private_ip_address,
'private_dns_name': inst.private_dns_name,
'public_ip': inst.ip_address,
'dns_name': inst.dns_name,
'public_dns_name': inst.public_dns_name,
'state_code': inst.state_code,
'architecture': inst.architecture,
'image_id': inst.image_id,
'key_name': inst.key_name,
'placement': inst.placement,
'region': inst.placement[:-1],
'kernel': inst.kernel,
'ramdisk': inst.ramdisk,
'launch_time': inst.launch_time,
'instance_type': inst.instance_type,
'root_device_type': inst.root_device_type,
'root_device_name': inst.root_device_name,
'state': inst.state,
'hypervisor': inst.hypervisor}
try:
instance_info['virtualization_type'] = getattr(inst,'virtualization_type')
except AttributeError:
instance_info['virtualization_type'] = None
try:
instance_info['ebs_optimized'] = getattr(inst, 'ebs_optimized')
except AttributeError:
instance_info['ebs_optimized'] = False
return instance_info
def boto_supports_associate_public_ip_address(ec2):
"""
Check if Boto library has associate_public_ip_address in the NetworkInterfaceSpecification
class. Added in Boto 2.13.0
ec2: authenticated ec2 connection object
Returns:
True if Boto library accepts associate_public_ip_address argument, else false
"""
try:
network_interface = boto.ec2.networkinterface.NetworkInterfaceSpecification()
getattr(network_interface, "associate_public_ip_address")
return True
except AttributeError:
return False
def boto_supports_profile_name_arg(ec2):
"""
Check if Boto library has instance_profile_name argument. instance_profile_name has been added in Boto 2.5.0
ec2: authenticated ec2 connection object
Returns:
True if Boto library accept instance_profile_name argument, else false
"""
run_instances_method = getattr(ec2, 'run_instances')
return 'instance_profile_name' in run_instances_method.func_code.co_varnames
def create_block_device(module, ec2, volume):
# Not aware of a way to determine this programatically
# http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
MAX_IOPS_TO_SIZE_RATIO = 30
if 'snapshot' not in volume and 'ephemeral' not in volume:
if 'volume_size' not in volume:
module.fail_json(msg = 'Size must be specified when creating a new volume or modifying the root volume')
if 'snapshot' in volume:
if 'device_type' in volume and volume.get('device_type') == 'io1' and 'iops' not in volume:
module.fail_json(msg = 'io1 volumes must have an iops value set')
if 'iops' in volume:
snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0]
size = volume.get('volume_size', snapshot.volume_size)
if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size:
module.fail_json(msg = 'IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO)
if 'ephemeral' in volume:
if 'snapshot' in volume:
module.fail_json(msg = 'Cannot set both ephemeral and snapshot')
return BlockDeviceType(snapshot_id=volume.get('snapshot'),
ephemeral_name=volume.get('ephemeral'),
size=volume.get('volume_size'),
volume_type=volume.get('device_type'),
delete_on_termination=volume.get('delete_on_termination', False),
iops=volume.get('iops'))
def boto_supports_param_in_spot_request(ec2, param):
"""
Check if Boto library has a <param> in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0.
ec2: authenticated ec2 connection object
Returns:
True if boto library has the named param as an argument on the request_spot_instances method, else False
"""
method = getattr(ec2, 'request_spot_instances')
return param in method.func_code.co_varnames
def enforce_count(module, ec2):
exact_count = module.params.get('exact_count')
count_tag = module.params.get('count_tag')
zone = module.params.get('zone')
reservations, instances = find_running_instances_by_count_tag(module, ec2, count_tag, zone)
changed = None
checkmode = False
instance_dict_array = []
changed_instance_ids = None
if len(instances) == exact_count:
changed = False
elif len(instances) < exact_count:
changed = True
to_create = exact_count - len(instances)
if not checkmode:
(instance_dict_array, changed_instance_ids, changed) \
= create_instances(module, ec2, override_count=to_create)
for inst in instance_dict_array:
instances.append(inst)
elif len(instances) > exact_count:
changed = True
to_remove = len(instances) - exact_count
if not checkmode:
all_instance_ids = sorted([ x.id for x in instances ])
remove_ids = all_instance_ids[0:to_remove]
instances = [ x for x in instances if x.id not in remove_ids]
(changed, instance_dict_array, changed_instance_ids) \
= terminate_instances(module, ec2, remove_ids)
terminated_list = []
for inst in instance_dict_array:
inst['state'] = "terminated"
terminated_list.append(inst)
instance_dict_array = terminated_list
# ensure all instances are dictionaries
all_instances = []
for inst in instances:
if type(inst) is not dict:
inst = get_instance_info(inst)
all_instances.append(inst)
return (all_instances, instance_dict_array, changed_instance_ids, changed)
def create_instances(module, ec2, override_count=None):
"""
Creates new instances
module : AnsibleModule object
ec2: authenticated ec2 connection object
Returns:
A list of dictionaries with instance information
about the instances that were launched
"""
key_name = module.params.get('key_name')
id = module.params.get('id')
group_name = module.params.get('group')
group_id = module.params.get('group_id')
zone = module.params.get('zone')
instance_type = module.params.get('instance_type')
spot_price = module.params.get('spot_price')
image = module.params.get('image')
if override_count:
count = override_count
else:
count = module.params.get('count')
monitoring = module.params.get('monitoring')
kernel = module.params.get('kernel')
ramdisk = module.params.get('ramdisk')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
placement_group = module.params.get('placement_group')
user_data = module.params.get('user_data')
instance_tags = module.params.get('instance_tags')
vpc_subnet_id = module.params.get('vpc_subnet_id')
assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
private_ip = module.params.get('private_ip')
instance_profile_name = module.params.get('instance_profile_name')
volumes = module.params.get('volumes')
ebs_optimized = module.params.get('ebs_optimized')
exact_count = module.params.get('exact_count')
count_tag = module.params.get('count_tag')
source_dest_check = module.boolean(module.params.get('source_dest_check'))
# group_id and group_name are exclusive of each other
if group_id and group_name:
module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)"))
sys.exit(1)
try:
# Here we try to lookup the group id from the security group name - if group is set.
if group_name:
grp_details = ec2.get_all_security_groups()
if type(group_name) == list:
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
elif type(group_name) == str:
for grp in grp_details:
if str(group_name) in str(grp):
group_id = [str(grp.id)]
group_name = [group_name]
# Now we try to lookup the group id testing if group exists.
elif group_id:
#wrap the group_id in a list if it's not one already
if type(group_id) == str:
group_id = [group_id]
grp_details = ec2.get_all_security_groups(group_ids=group_id)
grp_item = grp_details[0]
group_name = [grp_item.name]
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
# Lookup any instances that much our run id.
running_instances = []
count_remaining = int(count)
if id != None:
filter_dict = {'client-token':id, 'instance-state-name' : 'running'}
previous_reservations = ec2.get_all_instances(None, filter_dict)
for res in previous_reservations:
for prev_instance in res.instances:
running_instances.append(prev_instance)
count_remaining = count_remaining - len(running_instances)
# Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want.
if count_remaining == 0:
changed = False
else:
changed = True
try:
params = {'image_id': image,
'key_name': key_name,
'monitoring_enabled': monitoring,
'placement': zone,
'instance_type': instance_type,
'kernel_id': kernel,
'ramdisk_id': ramdisk,
'user_data': user_data}
if ebs_optimized:
params['ebs_optimized'] = ebs_optimized
if boto_supports_profile_name_arg(ec2):
params['instance_profile_name'] = instance_profile_name
else:
if instance_profile_name is not None:
module.fail_json(
msg="instance_profile_name parameter requires Boto version 2.5.0 or higher")
if assign_public_ip:
if not boto_supports_associate_public_ip_address(ec2):
module.fail_json(
msg="assign_public_ip parameter requires Boto version 2.13.0 or higher.")
elif not vpc_subnet_id:
module.fail_json(
msg="assign_public_ip only available with vpc_subnet_id")
else:
if private_ip:
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
subnet_id=vpc_subnet_id,
private_ip_address=private_ip,
groups=group_id,
associate_public_ip_address=assign_public_ip)
else:
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
subnet_id=vpc_subnet_id,
groups=group_id,
associate_public_ip_address=assign_public_ip)
interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)
params['network_interfaces'] = interfaces
else:
params['subnet_id'] = vpc_subnet_id
if vpc_subnet_id:
params['security_group_ids'] = group_id
else:
params['security_groups'] = group_name
if volumes:
bdm = BlockDeviceMapping()
for volume in volumes:
if 'device_name' not in volume:
module.fail_json(msg = 'Device name must be set for volume')
# Minimum volume size is 1GB. We'll use volume size explicitly set to 0
# to be a signal not to create this volume
if 'volume_size' not in volume or int(volume['volume_size']) > 0:
bdm[volume['device_name']] = create_block_device(module, ec2, volume)
params['block_device_map'] = bdm
# check to see if we're using spot pricing first before starting instances
if not spot_price:
if assign_public_ip and private_ip:
params.update(dict(
min_count = count_remaining,
max_count = count_remaining,
client_token = id,
placement_group = placement_group,
))
else:
params.update(dict(
min_count = count_remaining,
max_count = count_remaining,
client_token = id,
placement_group = placement_group,
private_ip_address = private_ip,
))
res = ec2.run_instances(**params)
instids = [ i.id for i in res.instances ]
while True:
try:
ec2.get_all_instances(instids)
break
except boto.exception.EC2ResponseError as e:
if "<Code>InvalidInstanceID.NotFound</Code>" in str(e):
# there's a race between start and get an instance
continue
else:
module.fail_json(msg = str(e))
else:
if private_ip:
module.fail_json(
msg='private_ip only available with on-demand (non-spot) instances')
if boto_supports_param_in_spot_request(ec2, placement_group):
params['placement_group'] = placement_group
elif placement_group :
module.fail_json(
msg="placement_group parameter requires Boto version 2.3.0 or higher.")
params.update(dict(
count = count_remaining,
))
res = ec2.request_spot_instances(spot_price, **params)
# Now we have to do the intermediate waiting
if wait:
spot_req_inst_ids = dict()
spot_wait_timeout = time.time() + spot_wait_timeout
while spot_wait_timeout > time.time():
reqs = ec2.get_all_spot_instance_requests()
for sirb in res:
if sirb.id in spot_req_inst_ids:
continue
for sir in reqs:
if sir.id == sirb.id and sir.instance_id is not None:
spot_req_inst_ids[sirb.id] = sir.instance_id
if len(spot_req_inst_ids) < count:
time.sleep(5)
else:
break
if spot_wait_timeout <= time.time():
module.fail_json(msg = "wait for spot requests timeout on %s" % time.asctime())
instids = spot_req_inst_ids.values()
except boto.exception.BotoServerError, e:
module.fail_json(msg = "Instance creation failed => %s: %s" % (e.error_code, e.error_message))
# The instances returned through run_instances can be in
# terminated state due to idempotency.
terminated_instances = [ str(instance.id) for instance in res.instances
if instance.state == 'terminated' ]
if terminated_instances:
module.fail_json(msg = "Instances with id(s) %s " % terminated_instances +
"were created previously but have since been terminated - " +
"use a (possibly different) 'instanceid' parameter")
# wait here until the instances are up
num_running = 0
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and num_running < len(instids):
try:
res_list = ec2.get_all_instances(instids)
except boto.exception.BotoServerError, e:
if e.error_code == 'InvalidInstanceID.NotFound':
time.sleep(1)
continue
else:
raise
num_running = 0
for res in res_list:
num_running += len([ i for i in res.instances if i.state=='running' ])
if len(res_list) <= 0:
# got a bad response of some sort, possibly due to
# stale/cached data. Wait a second and then try again
time.sleep(1)
continue
if wait and num_running < len(instids):
time.sleep(5)
else:
break
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime())
#We do this after the loop ends so that we end up with one list
for res in res_list:
running_instances.extend(res.instances)
# Enabled by default by Amazon
if not source_dest_check:
for inst in res.instances:
inst.modify_attribute('sourceDestCheck', False)
# Leave this as late as possible to try and avoid InvalidInstanceID.NotFound
if instance_tags:
try:
ec2.create_tags(instids, instance_tags)
except boto.exception.EC2ResponseError, e:
module.fail_json(msg = "Instance tagging failed => %s: %s" % (e.error_code, e.error_message))
instance_dict_array = []
created_instance_ids = []
for inst in running_instances:
d = get_instance_info(inst)
created_instance_ids.append(inst.id)
instance_dict_array.append(d)
return (instance_dict_array, created_instance_ids, changed)
def terminate_instances(module, ec2, instance_ids):
"""
Terminates a list of instances
module: Ansible module object
ec2: authenticated ec2 connection object
termination_list: a list of instances to terminate in the form of
[ {id: <inst-id>}, ..]
Returns a dictionary of instance information
about the instances terminated.
If the instance to be terminated is running
"changed" will be set to False.
"""
# Whether to wait for termination to complete before returning
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
instance_dict_array = []
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
module.fail_json(msg='instance_ids should be a list of instances, aborting')
terminated_instance_ids = []
for res in ec2.get_all_instances(instance_ids):
for inst in res.instances:
if inst.state == 'running' or inst.state == 'stopped':
terminated_instance_ids.append(inst.id)
instance_dict_array.append(get_instance_info(inst))
try:
ec2.terminate_instances([inst.id])
except EC2ResponseError, e:
module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e))
changed = True
# wait here until the instances are 'terminated'
if wait:
num_terminated = 0
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids):
response = ec2.get_all_instances( \
instance_ids=terminated_instance_ids, \
filters={'instance-state-name':'terminated'})
try:
num_terminated = len(response.pop().instances)
except Exception, e:
# got a bad response of some sort, possibly due to
# stale/cached data. Wait a second and then try again
time.sleep(1)
continue
if num_terminated < len(terminated_instance_ids):
time.sleep(5)
# waiting took too long
if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids):
module.fail_json(msg = "wait for instance termination timeout on %s" % time.asctime())
return (changed, instance_dict_array, terminated_instance_ids)
def startstop_instances(module, ec2, instance_ids, state):
"""
Starts or stops a list of existing instances
module: Ansible module object
ec2: authenticated ec2 connection object
instance_ids: The list of instances to start in the form of
[ {id: <inst-id>}, ..]
state: Intended state ("running" or "stopped")
Returns a dictionary of instance information
about the instances started/stopped.
If the instance was not able to change state,
"changed" will be set to False.
"""
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
instance_dict_array = []
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
module.fail_json(msg='instance_ids should be a list of instances, aborting')
# Check that our instances are not in the state we want to take them to
# and change them to our desired state
running_instances_array = []
for res in ec2.get_all_instances(instance_ids):
for inst in res.instances:
if inst.state != state:
instance_dict_array.append(get_instance_info(inst))
try:
if state == 'running':
inst.start()
else:
inst.stop()
except EC2ResponseError, e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
changed = True
## Wait for all the instances to finish starting or stopping
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time():
instance_dict_array = []
matched_instances = []
for res in ec2.get_all_instances(instance_ids):
for i in res.instances:
if i.state == state:
instance_dict_array.append(get_instance_info(i))
matched_instances.append(i)
if len(matched_instances) < len(instance_ids):
time.sleep(5)
else:
break
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime())
return (changed, instance_dict_array, instance_ids)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
key_name = dict(aliases = ['keypair']),
id = dict(),
group = dict(type='list'),
group_id = dict(type='list'),
zone = dict(aliases=['aws_zone', 'ec2_zone']),
instance_type = dict(aliases=['type']),
spot_price = dict(),
image = dict(),
kernel = dict(),
count = dict(type='int', default='1'),
monitoring = dict(type='bool', default=False),
ramdisk = dict(),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
spot_wait_timeout = dict(default=600),
placement_group = dict(),
user_data = dict(),
instance_tags = dict(type='dict'),
vpc_subnet_id = dict(),
assign_public_ip = dict(type='bool', default=False),
private_ip = dict(),
instance_profile_name = dict(),
instance_ids = dict(type='list'),
source_dest_check = dict(type='bool', default=True),
state = dict(default='present'),
exact_count = dict(type='int', default=None),
count_tag = dict(),
volumes = dict(type='list'),
ebs_optimized = dict(type='bool', default=False),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [
['exact_count', 'count'],
['exact_count', 'state'],
['exact_count', 'instance_ids']
],
)
ec2 = ec2_connect(module)
tagged_instances = []
state = module.params.get('state')
if state == 'absent':
instance_ids = module.params.get('instance_ids')
if not isinstance(instance_ids, list):
module.fail_json(msg='termination_list needs to be a list of instances to terminate')
(changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids)
elif state in ('running', 'stopped'):
instance_ids = module.params.get('instance_ids')
if not isinstance(instance_ids, list):
module.fail_json(msg='running list needs to be a list of instances to run: %s' % instance_ids)
(changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state)
elif state == 'present':
# Changed is always set to true when provisioning new instances
if not module.params.get('image'):
module.fail_json(msg='image parameter is required for new instance')
if module.params.get('exact_count') is None:
(instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2)
else:
(tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2)
module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
EducationforKids/e4k | node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/generator/eclipse.py | 1825 | 17014 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""GYP backend that generates Eclipse CDT settings files.
This backend DOES NOT generate Eclipse CDT projects. Instead, it generates XML
files that can be imported into an Eclipse CDT project. The XML file contains a
list of include paths and symbols (i.e. defines).
Because a full .cproject definition is not created by this generator, it's not
possible to properly define the include dirs and symbols for each file
individually. Instead, one set of includes/symbols is generated for the entire
project. This works fairly well (and is a vast improvement in general), but may
still result in a few indexer issues here and there.
This generator has no automated tests, so expect it to be broken.
"""
from xml.sax.saxutils import escape
import os.path
import subprocess
import gyp
import gyp.common
import gyp.msvs_emulation
import shlex
import xml.etree.cElementTree as ET
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!), so we convert them to variables
generator_default_variables[dirname] = '$' + dirname
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
# Include dirs will occasionally use the SHARED_INTERMEDIATE_DIR variable as
# part of the path when dealing with generated headers. This value will be
# replaced dynamically for each configuration.
generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \
'$SHARED_INTERMEDIATE_DIR'
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
flavor = gyp.common.GetFlavor(params)
default_variables.setdefault('OS', flavor)
if flavor == 'win':
# Copy additional generator configuration data from VS, which is shared
# by the Eclipse generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name, params,
compiler_path):
"""Calculate the set of include directories to be used.
Returns:
A list including all the include_dir's specified for every target followed
by any include directories that were added as cflag compiler options.
"""
gyp_includes_set = set()
compiler_includes_list = []
# Find compiler's default include dirs.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-xc++', '-v', '-'])
proc = subprocess.Popen(args=command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.communicate()[1]
# Extract the list of include dirs from the output, which has this format:
# ...
# #include "..." search starts here:
# #include <...> search starts here:
# /usr/include/c++/4.6
# /usr/local/include
# End of search list.
# ...
in_include_list = False
for line in output.splitlines():
if line.startswith('#include'):
in_include_list = True
continue
if line.startswith('End of search list.'):
break
if in_include_list:
include_dir = line.strip()
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if config_name in target['configurations']:
config = target['configurations'][config_name]
# Look for any include dirs that were explicitly added via cflags. This
# may be done in gyp files to force certain includes to come at the end.
# TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and
# remove this.
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
cflags = msvs_settings.GetCflags(config_name)
else:
cflags = config['cflags']
for cflag in cflags:
if cflag.startswith('-I'):
include_dir = cflag[2:]
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
# Find standard gyp include dirs.
if config.has_key('include_dirs'):
include_dirs = config['include_dirs']
for shared_intermediate_dir in shared_intermediate_dirs:
for include_dir in include_dirs:
include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR',
shared_intermediate_dir)
if not os.path.isabs(include_dir):
base_dir = os.path.dirname(target_name)
include_dir = base_dir + '/' + include_dir
include_dir = os.path.abspath(include_dir)
gyp_includes_set.add(include_dir)
# Generate a list that has all the include dirs.
all_includes_list = list(gyp_includes_set)
all_includes_list.sort()
for compiler_include in compiler_includes_list:
if not compiler_include in gyp_includes_set:
all_includes_list.append(compiler_include)
# All done.
return all_includes_list
def GetCompilerPath(target_list, data, options):
"""Determine a command that can be used to invoke the compiler.
Returns:
If this is a gyp project that has explicit make settings, try to determine
the compiler from that. Otherwise, see if a compiler was specified via the
CC_target environment variable.
"""
# First, see if the compiler is configured in make's settings.
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_dict = data[build_file].get('make_global_settings', {})
for key, value in make_global_settings_dict:
if key in ['CC', 'CXX']:
return os.path.join(options.toplevel_dir, value)
# Check to see if the compiler was specified as an environment variable.
for key in ['CC_target', 'CC', 'CXX']:
compiler = os.environ.get(key)
if compiler:
return compiler
return 'gcc'
def GetAllDefines(target_list, target_dicts, data, config_name, params,
compiler_path):
"""Calculate the defines for a project.
Returns:
A dict that includes explict defines declared in gyp files along with all of
the default defines that the compiler uses.
"""
# Get defines declared in the gyp files.
all_defines = {}
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
extra_defines = msvs_settings.GetComputedDefines(config_name)
else:
extra_defines = []
if config_name in target['configurations']:
config = target['configurations'][config_name]
target_defines = config['defines']
else:
target_defines = []
for define in target_defines + extra_defines:
split_define = define.split('=', 1)
if len(split_define) == 1:
split_define.append('1')
if split_define[0].strip() in all_defines:
# Already defined
continue
all_defines[split_define[0].strip()] = split_define[1].strip()
# Get default compiler defines (if possible).
if flavor == 'win':
return all_defines # Default defines already processed in the loop above.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-dM', '-'])
cpp_proc = subprocess.Popen(args=command, cwd='.',
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cpp_output = cpp_proc.communicate()[0]
cpp_lines = cpp_output.split('\n')
for cpp_line in cpp_lines:
if not cpp_line.strip():
continue
cpp_line_parts = cpp_line.split(' ', 2)
key = cpp_line_parts[1]
if len(cpp_line_parts) >= 3:
val = cpp_line_parts[2]
else:
val = '1'
all_defines[key] = val
return all_defines
def WriteIncludePaths(out, eclipse_langs, include_dirs):
"""Write the includes section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.IncludePaths">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for include_dir in include_dirs:
out.write(' <includepath workspace_path="false">%s</includepath>\n' %
include_dir)
out.write(' </language>\n')
out.write(' </section>\n')
def WriteMacros(out, eclipse_langs, defines):
"""Write the macros section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.Macros">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for key in sorted(defines.iterkeys()):
out.write(' <macro><name>%s</name><value>%s</value></macro>\n' %
(escape(key), escape(defines[key])))
out.write(' </language>\n')
out.write(' </section>\n')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.join(generator_flags.get('output_dir', 'out'),
config_name)
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
# Ninja uses out/Debug/gen while make uses out/Debug/obj/gen as the
# SHARED_INTERMEDIATE_DIR. Include both possible locations.
shared_intermediate_dirs = [os.path.join(toplevel_build, 'obj', 'gen'),
os.path.join(toplevel_build, 'gen')]
GenerateCdtSettingsFile(target_list,
target_dicts,
data,
params,
config_name,
os.path.join(toplevel_build,
'eclipse-cdt-settings.xml'),
options,
shared_intermediate_dirs)
GenerateClasspathFile(target_list,
target_dicts,
options.toplevel_dir,
toplevel_build,
os.path.join(toplevel_build,
'eclipse-classpath.xml'))
def GenerateCdtSettingsFile(target_list, target_dicts, data, params,
config_name, out_name, options,
shared_intermediate_dirs):
gyp.common.EnsureDirExists(out_name)
with open(out_name, 'w') as out:
out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
out.write('<cdtprojectproperties>\n')
eclipse_langs = ['C++ Source File', 'C Source File', 'Assembly Source File',
'GNU C++', 'GNU C', 'Assembly']
compiler_path = GetCompilerPath(target_list, data, options)
include_dirs = GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs,
config_name, params, compiler_path)
WriteIncludePaths(out, eclipse_langs, include_dirs)
defines = GetAllDefines(target_list, target_dicts, data, config_name,
params, compiler_path)
WriteMacros(out, eclipse_langs, defines)
out.write('</cdtprojectproperties>\n')
def GenerateClasspathFile(target_list, target_dicts, toplevel_dir,
toplevel_build, out_name):
'''Generates a classpath file suitable for symbol navigation and code
completion of Java code (such as in Android projects) by finding all
.java and .jar files used as action inputs.'''
gyp.common.EnsureDirExists(out_name)
result = ET.Element('classpath')
def AddElements(kind, paths):
# First, we need to normalize the paths so they are all relative to the
# toplevel dir.
rel_paths = set()
for path in paths:
if os.path.isabs(path):
rel_paths.add(os.path.relpath(path, toplevel_dir))
else:
rel_paths.add(path)
for path in sorted(rel_paths):
entry_element = ET.SubElement(result, 'classpathentry')
entry_element.set('kind', kind)
entry_element.set('path', path)
AddElements('lib', GetJavaJars(target_list, target_dicts, toplevel_dir))
AddElements('src', GetJavaSourceDirs(target_list, target_dicts, toplevel_dir))
# Include the standard JRE container and a dummy out folder
AddElements('con', ['org.eclipse.jdt.launching.JRE_CONTAINER'])
# Include a dummy out folder so that Eclipse doesn't use the default /bin
# folder in the root of the project.
AddElements('output', [os.path.join(toplevel_build, '.eclipse-java-build')])
ET.ElementTree(result).write(out_name)
def GetJavaJars(target_list, target_dicts, toplevel_dir):
'''Generates a sequence of all .jars used as inputs.'''
for target_name in target_list:
target = target_dicts[target_name]
for action in target.get('actions', []):
for input_ in action['inputs']:
if os.path.splitext(input_)[1] == '.jar' and not input_.startswith('$'):
if os.path.isabs(input_):
yield input_
else:
yield os.path.join(os.path.dirname(target_name), input_)
def GetJavaSourceDirs(target_list, target_dicts, toplevel_dir):
'''Generates a sequence of all likely java package root directories.'''
for target_name in target_list:
target = target_dicts[target_name]
for action in target.get('actions', []):
for input_ in action['inputs']:
if (os.path.splitext(input_)[1] == '.java' and
not input_.startswith('$')):
dir_ = os.path.dirname(os.path.join(os.path.dirname(target_name),
input_))
# If there is a parent 'src' or 'java' folder, navigate up to it -
# these are canonical package root names in Chromium. This will
# break if 'src' or 'java' exists in the package structure. This
# could be further improved by inspecting the java file for the
# package name if this proves to be too fragile in practice.
parent_search = dir_
while os.path.basename(parent_search) not in ['src', 'java']:
parent_search, _ = os.path.split(parent_search)
if not parent_search or parent_search == toplevel_dir:
# Didn't find a known root, just return the original path
yield dir_
break
else:
yield parent_search
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate an XML settings file that can be imported into a CDT project."""
if params['options'].generator_output:
raise NotImplementedError("--generator_output not implemented for eclipse")
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| mit |
psychopy/psychopy | psychopy/tests/test_app/test_builder/test_BuilderFrame.py | 1 | 5399 | from __future__ import print_function
from builtins import object
from os import path
import shutil
import py_compile
from tempfile import mkdtemp
import codecs
import pytest
import locale
import time
import psychopy.experiment
from psychopy import prefs
from psychopy.app.builder.dialogs import DlgComponentProperties
from psychopy.app.builder.validators import CodeSnippetValidator
from psychopy.experiment import Param
# Jeremy Gray March 2011
# caveats when comparing files:
# - dicts have no defined order, can load and save differently: use a
# known-diff file to suppress boring errors. This situation was
# addressed in 7e2c72a for stimOut by sorting the keys
# - namespace.makeValid() can change var names from the orig demos,
# but should not do so from a load-save-load because only the first
# load should change things
allComponents = psychopy.experiment.getComponents(fetchIcons=False)
import wx
class Test_BuilderFrame(object):
"""This test fetches all standard components and checks that, with default
settings, they can be added to a Routine and result in a script that compiles
"""
def setup(self):
self.here = path.abspath(path.dirname(__file__))
self.tmp_dir = mkdtemp(prefix='psychopy-tests-app')
def teardown(self):
shutil.rmtree(self.tmp_dir, ignore_errors=True)
@pytest.mark.usefixtures("get_app")
def test_BuilderFrame(self, get_app):
"""Tests of the Builder frame. We can call dialog boxes using
a timeout (will simulate OK being pressed)
"""
builderView = get_app.newBuilderFrame() # self._app comes from requires_app
expfile = path.join(prefs.paths['tests'],
'data', 'test001EntryImporting.psyexp')
builderView.fileOpen(filename=expfile)
builderView.setExperimentSettings(timeout=500)
builderView.isModified = False
builderView.runFile()
builderView.closeFrame()
def _getCleanExp(self, app):
""""""
builder = app.newBuilderFrame()
exp = builder.exp
exp.addRoutine('testRoutine')
testRoutine = exp.routines['testRoutine']
exp.flow.addRoutine(testRoutine, 0)
return exp
def _checkCompileWith(self, thisComp, app):
"""Adds the component to the current Routine and makes sure it still
compiles
"""
filename = thisComp.params['name'].val+'.py'
filepath = path.join(self.tmp_dir, filename)
exp = self._getCleanExp(app)
testRoutine = exp.routines['testRoutine']
testRoutine.addComponent(thisComp)
#make sure the mouse code compiles
# generate a script, similar to 'lastrun.py':
buff = exp.writeScript() # is a StringIO object
script = buff.getvalue()
assert len(script) > 1500 # default empty script is ~2200 chars
# save the script:
f = codecs.open(filepath, 'w', 'utf-8')
f.write(script)
f.close()
# compile the temp file to .pyc, catching error msgs (including no file at all):
py_compile.compile(filepath, doraise=True)
return filepath + 'c'
def test_MessageDialog(self):
"""Test the message dialog
"""
from psychopy.app.dialogs import MessageDialog
dlg = MessageDialog(message="Just a test", timeout=500)
ok = dlg.ShowModal()
assert ok == wx.ID_OK
@pytest.mark.usefixtures("get_app")
def test_ComponentDialogs(self, get_app):
"""Test the message dialog
"""
builderView = get_app.newBuilderFrame() # self._app comes from requires_app
componsPanel = builderView.componentButtons
for compBtn in list(componsPanel.compButtons):
# simulate clicking the button for each component
assert compBtn.onClick(timeout=500)
builderView.isModified = False
builderView.closeFrame()
del builderView, componsPanel
@pytest.mark.usefixtures("get_app")
def test_param_validator(self, get_app):
"""Test the code validator for component parameters"""
builderView = get_app.newBuilderFrame()
# Define 'tykes' - combinations of values likely to cause an error if certain features aren't working
tykes = [
{'fieldName': "brokenCode", 'param': Param(val="for + :", valType="code"), 'msg': "Python syntax error in field `{fieldName}`: {param.val}"}, # Make sure it's picking up clearly broken code
{'fieldName': "correctAns", 'param': Param(val="'space'", valType="code"), 'msg': ""}, # Single-element lists should not cause warning
]
for tyke in tykes:
# For each tyke, create a dummy environment
parent = DlgComponentProperties(
frame=builderView, title='Param Testing',
params={tyke['fieldName']: tyke['param']}, order=[],
testing=True)
# Set validator and validate
parent.SetValidator(CodeSnippetValidator(tyke['fieldName']))
parent.Validate()
# Does the message delivered by the validator match what is expected?
warnings = [w for w in list(parent.warningsDict.values()) if w] or ['']
msg = warnings[0]
assert msg == tyke['msg'].format(**tyke)
# Cleanup
parent.Destroy() | gpl-3.0 |
jrior001/evitaul-3.4.100-HTC | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
mvillalba/python-ant | src/ant/core/tests/message_tests.py | 5 | 13714 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2011, Martín Raúl Villalba
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
##############################################################################
import unittest
from ant.core.message import *
class MessageTest(unittest.TestCase):
def setUp(self):
self.message = Message()
def test_get_setPayload(self):
self.assertRaises(MessageError, self.message.setPayload,
'\xFF' * 15)
self.message.setPayload('\x11' * 5)
self.assertEquals(self.message.getPayload(), '\x11' * 5)
def test_get_setType(self):
self.assertRaises(MessageError, self.message.setType, -1)
self.assertRaises(MessageError, self.message.setType, 300)
self.message.setType(0x23)
self.assertEquals(self.message.getType(), 0x23)
def test_getChecksum(self):
self.message = Message(type_=MESSAGE_SYSTEM_RESET, payload='\x00')
self.assertEquals(self.message.getChecksum(), 0xEF)
self.message = Message(type_=MESSAGE_CHANNEL_ASSIGN,
payload='\x00' * 3)
self.assertEquals(self.message.getChecksum(), 0xE5)
def test_getSize(self):
self.message.setPayload('\x11' * 7)
self.assertEquals(self.message.getSize(), 11)
def test_encode(self):
self.message = Message(type_=MESSAGE_CHANNEL_ASSIGN,
payload='\x00' * 3)
self.assertEqual(self.message.encode(),
'\xA4\x03\x42\x00\x00\x00\xE5')
def test_decode(self):
self.assertRaises(MessageError, self.message.decode,
'\xA5\x03\x42\x00\x00\x00\xE5')
self.assertRaises(MessageError, self.message.decode,
'\xA4\x14\x42' + ('\x00' * 20) + '\xE5')
self.assertRaises(MessageError, self.message.decode,
'\xA4\x03\x42\x01\x02\xF3\xE5')
self.assertEqual(self.message.decode('\xA4\x03\x42\x00\x00\x00\xE5'),
7)
self.assertEqual(self.message.getType(), MESSAGE_CHANNEL_ASSIGN)
self.assertEqual(self.message.getPayload(), '\x00' * 3)
self.assertEqual(self.message.getChecksum(), 0xE5)
def test_getHandler(self):
handler = self.message.getHandler('\xA4\x03\x42\x00\x00\x00\xE5')
self.assertTrue(isinstance(handler, ChannelAssignMessage))
self.assertRaises(MessageError, self.message.getHandler,
'\xA4\x03\xFF\x00\x00\x00\xE5')
self.assertRaises(MessageError, self.message.getHandler,
'\xA4\x03\x42')
self.assertRaises(MessageError, self.message.getHandler,
'\xA4\x05\x42\x00\x00\x00\x00')
class ChannelMessageTest(unittest.TestCase):
def setUp(self):
self.message = ChannelMessage(type_=MESSAGE_SYSTEM_RESET)
def test_get_setChannelNumber(self):
self.assertEquals(self.message.getChannelNumber(), 0)
self.message.setChannelNumber(3)
self.assertEquals(self.message.getChannelNumber(), 3)
class ChannelUnassignMessageTest(unittest.TestCase):
# No currently defined methods need testing
pass
class ChannelAssignMessageTest(unittest.TestCase):
def setUp(self):
self.message = ChannelAssignMessage()
def test_get_setChannelType(self):
self.message.setChannelType(0x10)
self.assertEquals(self.message.getChannelType(), 0x10)
def test_get_setNetworkNumber(self):
self.message.setNetworkNumber(0x11)
self.assertEquals(self.message.getNetworkNumber(), 0x11)
def test_payload(self):
self.message.setChannelNumber(0x01)
self.message.setChannelType(0x02)
self.message.setNetworkNumber(0x03)
self.assertEquals(self.message.getPayload(), '\x01\x02\x03')
class ChannelIDMessageTest(unittest.TestCase):
def setUp(self):
self.message = ChannelIDMessage()
def test_get_setDeviceNumber(self):
self.message.setDeviceNumber(0x10FA)
self.assertEquals(self.message.getDeviceNumber(), 0x10FA)
def test_get_setDeviceType(self):
self.message.setDeviceType(0x10)
self.assertEquals(self.message.getDeviceType(), 0x10)
def test_get_setTransmissionType(self):
self.message.setTransmissionType(0x11)
self.assertEquals(self.message.getTransmissionType(), 0x11)
def test_payload(self):
self.message.setChannelNumber(0x01)
self.message.setDeviceNumber(0x0302)
self.message.setDeviceType(0x04)
self.message.setTransmissionType(0x05)
self.assertEquals(self.message.getPayload(), '\x01\x02\x03\x04\x05')
class ChannelPeriodMessageTest(unittest.TestCase):
def setUp(self):
self.message = ChannelPeriodMessage()
def test_get_setChannelPeriod(self):
self.message.setChannelPeriod(0x10FA)
self.assertEquals(self.message.getChannelPeriod(), 0x10FA)
def test_payload(self):
self.message.setChannelNumber(0x01)
self.message.setChannelPeriod(0x0302)
self.assertEquals(self.message.getPayload(), '\x01\x02\x03')
class ChannelSearchTimeoutMessageTest(unittest.TestCase):
def setUp(self):
self.message = ChannelSearchTimeoutMessage()
def test_get_setTimeout(self):
self.message.setTimeout(0x10)
self.assertEquals(self.message.getTimeout(), 0x10)
def test_payload(self):
self.message.setChannelNumber(0x01)
self.message.setTimeout(0x02)
self.assertEquals(self.message.getPayload(), '\x01\x02')
class ChannelFrequencyMessageTest(unittest.TestCase):
def setUp(self):
self.message = ChannelFrequencyMessage()
def test_get_setFrequency(self):
self.message.setFrequency(22)
self.assertEquals(self.message.getFrequency(), 22)
def test_payload(self):
self.message.setChannelNumber(0x01)
self.message.setFrequency(0x02)
self.assertEquals(self.message.getPayload(), '\x01\x02')
class ChannelTXPowerMessageTest(unittest.TestCase):
def setUp(self):
self.message = ChannelTXPowerMessage()
def test_get_setPower(self):
self.message.setPower(0xFA)
self.assertEquals(self.message.getPower(), 0xFA)
def test_payload(self):
self.message.setChannelNumber(0x01)
self.message.setPower(0x02)
self.assertEquals(self.message.getPayload(), '\x01\x02')
class NetworkKeyMessageTest(unittest.TestCase):
def setUp(self):
self.message = NetworkKeyMessage()
def test_get_setNumber(self):
self.message.setNumber(0xFA)
self.assertEquals(self.message.getNumber(), 0xFA)
def test_get_setKey(self):
self.message.setKey('\xFD' * 8)
self.assertEquals(self.message.getKey(), '\xFD' * 8)
def test_payload(self):
self.message.setNumber(0x01)
self.message.setKey('\x02\x03\x04\x05\x06\x07\x08\x09')
self.assertEquals(self.message.getPayload(),
'\x01\x02\x03\x04\x05\x06\x07\x08\x09')
class TXPowerMessageTest(unittest.TestCase):
def setUp(self):
self.message = TXPowerMessage()
def test_get_setPower(self):
self.message.setPower(0xFA)
self.assertEquals(self.message.getPower(), 0xFA)
def test_payload(self):
self.message.setPower(0x01)
self.assertEquals(self.message.getPayload(), '\x00\x01')
class SystemResetMessageTest(unittest.TestCase):
# No currently defined methods need testing
pass
class ChannelOpenMessageTest(unittest.TestCase):
# No currently defined methods need testing
pass
class ChannelCloseMessageTest(unittest.TestCase):
# No currently defined methods need testing
pass
class ChannelRequestMessageTest(unittest.TestCase):
def setUp(self):
self.message = ChannelRequestMessage()
def test_get_setMessageID(self):
self.message.setMessageID(0xFA)
self.assertEquals(self.message.getMessageID(), 0xFA)
self.assertRaises(MessageError, self.message.setMessageID, 0xFFFF)
def test_payload(self):
self.message.setChannelNumber(0x01)
self.message.setMessageID(0x02)
self.assertEquals(self.message.getPayload(), '\x01\x02')
class ChannelBroadcastDataMessageTest(unittest.TestCase):
# No currently defined methods need testing
pass
class ChannelAcknowledgedDataMessageTest(unittest.TestCase):
# No currently defined methods need testing
pass
class ChannelBurstDataMessageTest(unittest.TestCase):
# No currently defined methods need testing
pass
class ChannelEventMessageTest(unittest.TestCase):
def setUp(self):
self.message = ChannelEventMessage()
def test_get_setMessageID(self):
self.message.setMessageID(0xFA)
self.assertEquals(self.message.getMessageID(), 0xFA)
self.assertRaises(MessageError, self.message.setMessageID, 0xFFFF)
def test_get_setMessageCode(self):
self.message.setMessageCode(0xFA)
self.assertEquals(self.message.getMessageCode(), 0xFA)
self.assertRaises(MessageError, self.message.setMessageCode, 0xFFFF)
def test_payload(self):
self.message.setChannelNumber(0x01)
self.message.setMessageID(0x02)
self.message.setMessageCode(0x03)
self.assertEquals(self.message.getPayload(), '\x01\x02\x03')
class ChannelStatusMessageTest(unittest.TestCase):
def setUp(self):
self.message = ChannelStatusMessage()
def test_get_setStatus(self):
self.message.setStatus(0xFA)
self.assertEquals(self.message.getStatus(), 0xFA)
self.assertRaises(MessageError, self.message.setStatus, 0xFFFF)
def test_payload(self):
self.message.setChannelNumber(0x01)
self.message.setStatus(0x02)
self.assertEquals(self.message.getPayload(), '\x01\x02')
class VersionMessageTest(unittest.TestCase):
def setUp(self):
self.message = VersionMessage()
def test_get_setVersion(self):
self.message.setVersion('\xAB' * 9)
self.assertEquals(self.message.getVersion(), '\xAB' * 9)
self.assertRaises(MessageError, self.message.setVersion, '1234')
def test_payload(self):
self.message.setVersion('\x01' * 9)
self.assertEquals(self.message.getPayload(), '\x01' * 9)
class CapabilitiesMessageTest(unittest.TestCase):
def setUp(self):
self.message = CapabilitiesMessage()
def test_get_setMaxChannels(self):
self.message.setMaxChannels(0xFA)
self.assertEquals(self.message.getMaxChannels(), 0xFA)
self.assertRaises(MessageError, self.message.setMaxChannels, 0xFFFF)
def test_get_setMaxNetworks(self):
self.message.setMaxNetworks(0xFA)
self.assertEquals(self.message.getMaxNetworks(), 0xFA)
self.assertRaises(MessageError, self.message.setMaxNetworks, 0xFFFF)
def test_get_setStdOptions(self):
self.message.setStdOptions(0xFA)
self.assertEquals(self.message.getStdOptions(), 0xFA)
self.assertRaises(MessageError, self.message.setStdOptions, 0xFFFF)
def test_get_setAdvOptions(self):
self.message.setAdvOptions(0xFA)
self.assertEquals(self.message.getAdvOptions(), 0xFA)
self.assertRaises(MessageError, self.message.setAdvOptions, 0xFFFF)
def test_get_setAdvOptions2(self):
self.message.setAdvOptions2(0xFA)
self.assertEquals(self.message.getAdvOptions2(), 0xFA)
self.assertRaises(MessageError, self.message.setAdvOptions2, 0xFFFF)
self.message = CapabilitiesMessage(adv_opts2=None)
self.assertEquals(len(self.message.payload), 4)
def test_payload(self):
self.message.setMaxChannels(0x01)
self.message.setMaxNetworks(0x02)
self.message.setStdOptions(0x03)
self.message.setAdvOptions(0x04)
self.message.setAdvOptions2(0x05)
self.assertEquals(self.message.getPayload(), '\x01\x02\x03\x04\x05')
class SerialNumberMessageTest(unittest.TestCase):
def setUp(self):
self.message = SerialNumberMessage()
def test_get_setSerialNumber(self):
self.message.setSerialNumber('\xFA\xFB\xFC\xFD')
self.assertEquals(self.message.getSerialNumber(), '\xFA\xFB\xFC\xFD')
self.assertRaises(MessageError, self.message.setSerialNumber,
'\xFF' * 8)
def test_payload(self):
self.message.setSerialNumber('\x01\x02\x03\x04')
self.assertEquals(self.message.getPayload(), '\x01\x02\x03\x04')
| mit |
davehunt/selenium | py/selenium/webdriver/firefox/firefox_binary.py | 43 | 8752 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import platform
from subprocess import Popen, STDOUT
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common import utils
import time
class FirefoxBinary(object):
NO_FOCUS_LIBRARY_NAME = "x_ignore_nofocus.so"
def __init__(self, firefox_path=None, log_file=None):
"""
Creates a new instance of Firefox binary.
:Args:
- firefox_path - Path to the Firefox executable. By default, it will be detected from the standard locations.
- log_file - A file object to redirect the firefox process output to. It can be sys.stdout.
Please note that with parallel run the output won't be synchronous.
By default, it will be redirected to /dev/null.
"""
self._start_cmd = firefox_path
# We used to default to subprocess.PIPE instead of /dev/null, but after
# a while the pipe would fill up and Firefox would freeze.
self._log_file = log_file or open(os.devnull, "wb")
self.command_line = None
if self._start_cmd is None:
self._start_cmd = self._get_firefox_start_cmd()
if not self._start_cmd.strip():
raise WebDriverException(
"Failed to find firefox binary. You can set it by specifying "
"the path to 'firefox_binary':\n\nfrom "
"selenium.webdriver.firefox.firefox_binary import "
"FirefoxBinary\n\nbinary = "
"FirefoxBinary('/path/to/binary')\ndriver = "
"webdriver.Firefox(firefox_binary=binary)")
# Rather than modifying the environment of the calling Python process
# copy it and modify as needed.
self._firefox_env = os.environ.copy()
self._firefox_env["MOZ_CRASHREPORTER_DISABLE"] = "1"
self._firefox_env["MOZ_NO_REMOTE"] = "1"
self._firefox_env["NO_EM_RESTART"] = "1"
def add_command_line_options(self, *args):
self.command_line = args
def launch_browser(self, profile, timeout=30):
"""Launches the browser for the given profile name.
It is assumed the profile already exists.
"""
self.profile = profile
self._start_from_profile_path(self.profile.path)
self._wait_until_connectable(timeout=timeout)
def kill(self):
"""Kill the browser.
This is useful when the browser is stuck.
"""
if self.process:
self.process.kill()
self.process.wait()
def _start_from_profile_path(self, path):
self._firefox_env["XRE_PROFILE_PATH"] = path
if platform.system().lower() == 'linux':
self._modify_link_library_path()
command = [self._start_cmd, "-foreground"]
if self.command_line is not None:
for cli in self.command_line:
command.append(cli)
self.process = Popen(
command, stdout=self._log_file, stderr=STDOUT,
env=self._firefox_env)
def _wait_until_connectable(self, timeout=30):
"""Blocks until the extension is connectable in the firefox."""
count = 0
while not utils.is_connectable(self.profile.port):
if self.process.poll() is not None:
# Browser has exited
raise WebDriverException(
"The browser appears to have exited "
"before we could connect. If you specified a log_file in "
"the FirefoxBinary constructor, check it for details.")
if count >= timeout:
self.kill()
raise WebDriverException(
"Can't load the profile. Possible firefox version mismatch. "
"You must use GeckoDriver instead for Firefox 48+. Profile "
"Dir: %s If you specified a log_file in the "
"FirefoxBinary constructor, check it for details."
% (self.profile.path))
count += 1
time.sleep(1)
return True
def _find_exe_in_registry(self):
try:
from _winreg import OpenKey, QueryValue, HKEY_LOCAL_MACHINE, HKEY_CURRENT_USER
except ImportError:
from winreg import OpenKey, QueryValue, HKEY_LOCAL_MACHINE, HKEY_CURRENT_USER
import shlex
keys = (r"SOFTWARE\Classes\FirefoxHTML\shell\open\command",
r"SOFTWARE\Classes\Applications\firefox.exe\shell\open\command")
command = ""
for path in keys:
try:
key = OpenKey(HKEY_LOCAL_MACHINE, path)
command = QueryValue(key, "")
break
except OSError:
try:
key = OpenKey(HKEY_CURRENT_USER, path)
command = QueryValue(key, "")
break
except OSError:
pass
else:
return ""
if not command:
return ""
return shlex.split(command)[0]
def _get_firefox_start_cmd(self):
"""Return the command to start firefox."""
start_cmd = ""
if platform.system() == "Darwin":
start_cmd = "/Applications/Firefox.app/Contents/MacOS/firefox-bin"
# fallback to homebrew installation for mac users
if not os.path.exists(start_cmd):
start_cmd = os.path.expanduser("~") + start_cmd
elif platform.system() == "Windows":
start_cmd = (self._find_exe_in_registry() or self._default_windows_location())
elif platform.system() == 'Java' and os._name == 'nt':
start_cmd = self._default_windows_location()
else:
for ffname in ["firefox", "iceweasel"]:
start_cmd = self.which(ffname)
if start_cmd is not None:
break
else:
# couldn't find firefox on the system path
raise RuntimeError(
"Could not find firefox in your system PATH." +
" Please specify the firefox binary location or install firefox")
return start_cmd
def _default_windows_location(self):
program_files = [os.getenv("PROGRAMFILES", r"C:\Program Files"),
os.getenv("PROGRAMFILES(X86)", r"C:\Program Files (x86)")]
for path in program_files:
binary_path = os.path.join(path, r"Mozilla Firefox\firefox.exe")
if os.access(binary_path, os.X_OK):
return binary_path
return ""
def _modify_link_library_path(self):
existing_ld_lib_path = os.environ.get('LD_LIBRARY_PATH', '')
new_ld_lib_path = self._extract_and_check(
self.profile, self.NO_FOCUS_LIBRARY_NAME, "x86", "amd64")
new_ld_lib_path += existing_ld_lib_path
self._firefox_env["LD_LIBRARY_PATH"] = new_ld_lib_path
self._firefox_env['LD_PRELOAD'] = self.NO_FOCUS_LIBRARY_NAME
def _extract_and_check(self, profile, no_focus_so_name, x86, amd64):
paths = [x86, amd64]
built_path = ""
for path in paths:
library_path = os.path.join(profile.path, path)
if not os.path.exists(library_path):
os.makedirs(library_path)
import shutil
shutil.copy(os.path.join(
os.path.dirname(__file__),
path,
self.NO_FOCUS_LIBRARY_NAME),
library_path)
built_path += library_path + ":"
return built_path
def which(self, fname):
"""Returns the fully qualified path by searching Path of the given
name"""
for pe in os.environ['PATH'].split(os.pathsep):
checkname = os.path.join(pe, fname)
if os.access(checkname, os.X_OK) and not os.path.isdir(checkname):
return checkname
return None
| apache-2.0 |
vholer/ZenPacks.CERIT_SC.LinuxMonitorAdvanced | ZenPacks/CERIT_SC/LinuxMonitorAdvanced/lib/parse.py | 1 | 4454 | import re
__all__ = ['parse_mdstat']
MULTIPLIER = {
'KB' : 1024,
'K' : 1024,
'MB' : 1024 * 1024,
'M' : 1024 * 1024,
'B' : 1
}
def parse_mdstat(results):
linePattern = re.compile(r"\n\s*\n")
mds = []
# strip first and last line:
# > Personalities :
# > unused devices: <none>
for section in linePattern.split('\n'.join(results.splitlines()[1:-1])):
# md1 : active raid1 sde1[6](F) sdg1[1] sdb1[4] sdd1[3] sdc1[2]
# md0 : active raid1 vdg[2](S) vdf[1] vde[0]
# md0 : active raid1 vdg[2] vdf[1](F) vde[0]
#match=re.match(r"md(?P<id>\d+) : (?P<md_status>\w+)( \((?P<detail>[^)]+)\))? (?P<type>[\w\d]+) (?P<devicesStr>[^\n]+)$",section)
match=re.match(r"md(?P<id>\d+) : (?P<md_status>\w+( \(.*\))?) (?P<type>[\w\d]+) (?P<devicesStr>[^\n]+)",section)
if not match:
print "No match!"
continue
md=match.groupdict()
# read RAID devices
counts={'active':0,'failed':0,'spare':0,'required':0,'online':0}
devices=[]
for dev in match.group('devicesStr').split():
matchDev=re.match(r"(?P<device>\w+)\[(?P<id>\d+)\](\((?P<status>\w)\))?",dev)
devices.append(matchDev.groupdict())
if matchDev.group('status') == 'F':
counts['failed'] += 1
elif matchDev.group('status') == 'S':
counts['spare'] += 1
else:
counts['active'] += 1
md['devices']=devices
# get total/online disks:
# 63967104 blocks super 1.2 [2/2] [UU]
# ^^^
matchReq=re.search(r" \[(?P<required>\d+)/(?P<online>\d+)\] \[",section)
if matchReq:
counts['online'] = int(matchReq.group('online'))
counts['required'] = int(matchReq.group('required'))
# get RAID size:
# 15991680 blocks super 1.2 [2/2] [UU]
# 996119552 blocks super 1.2 512k chunks
# ^^^^^^^^^
matchBlcks=re.search(r"(?P<blocks>\d+) blocks",section)
if matchBlcks:
md['blocks']=long(matchBlcks.group('blocks'))*1024
# get chunk size:
# bitmap: 1/1 pages [4KB], 65536KB chunk
# 996119552 blocks super 1.2 512k chunks
# ^^^^^^^^^^^^^
matchChunk=re.search(r"\s(?P<size>\d+)\s*(?P<unit>\w+) chunk",section)
if matchChunk:
multi=MULTIPLIER.get(matchChunk.group('unit').upper(),1)
md['stripesize'] = int(matchChunk.group('size')) * multi
else:
md['stripesize'] = 0
# search for current task/progress:
# [==============>......] recovery = 72.0% (24162368/33553340) finish=3.4min speed=45458K/sec
# ^^^^^^^^ ^^^^^
# resync=DELAYED
# resync=PENDING
# ^^^^^ ^^^^^^^
matchTask=re.search(r"\s(?P<task>recovery|resync|check)\s*=\s*(?P<progress>[^ ]+)",section)
if matchTask:
md=dict(md.items()+matchTask.groupdict().items())
md['counts']=counts
# determine RAID status
status=1
if re.match('active',md['md_status'],re.I):
if counts['required']>0:
if counts['required'] != counts['online']:
status=5 #degraded
elif counts['failed']>0:
status=4 #online but some failed disks
else:
status=2 #online
elif counts['failed']>0:
status=4 #degraded (probably)
else:
status=2 #online
# task status: (pending, delayed, unknown, currently)
taskMap = {
'resync': (7,8,9,10),
'check': (11,12,13,14),
'recovery': (15,16,17,18)
}
if md.get('task'):
mapIdx = 2
if 'PENDING' in md['progress']:
mapIdx = 0
elif 'DELAYED' in md['progress']:
mapIdx = 1
elif re.match('\d+(\.\d+)?%',md['progress']):
mapIdx = 3
status=taskMap.get(md['task'],(19,19,19,19))[mapIdx]
elif re.match('inactive',md['md_status'],re.I):
status=3
else:
status=6 #TODO??? failed??
md['status']=status
mds.append(md)
return mds
| mit |
noisy/steemprojects.com | package/admin.py | 1 | 2796 | from django.contrib import admin
from reversion.admin import VersionAdmin
from package.models import (
Category,
Commit,
PackageExample,
Project,
ProjectImage,
TeamMembership,
Version,
)
class PackageExampleInline(admin.TabularInline):
model = PackageExample
class PackageAdmin(VersionAdmin):
def render_change_form(self, request, context, *args, **kwargs):
context['adminform'].form.fields['main_img'].queryset = ProjectImage.objects.filter(project_id=kwargs['obj'].id)
return super(PackageAdmin, self).render_change_form(request, context, args, kwargs)
save_on_top = True
search_fields = ("name",)
list_filter = ("category", "is_published", "is_awaiting_approval")
list_display = ("name", "created", "status", "slug", "is_published", "is_awaiting_approval")
readonly_fields = ("publication_time",)
date_hierarchy = "created"
inlines = [
PackageExampleInline,
]
fieldsets = (
(None, {
"fields": (
"name",
"url",
"description",
"announcement_post",
"main_img",
"status",
"slug",
"category",
"pypi_url",
"repo_url",
"contributors",
"usage",
"draft_added_by",
"approvers",
"last_modified_by",
"is_published",
"publication_time",
"is_awaiting_approval",
"approval_request_datetime",
)
}),
("Pulled data", {
"classes": ("collapse",),
"fields": ("repo_description", "repo_watchers", "repo_forks", "commit_list", "pypi_downloads", "participants")
}),
)
class CommitAdmin(admin.ModelAdmin):
list_filter = ("package",)
class VersionLocalAdmin(admin.ModelAdmin):
search_fields = ("package__name",)
class PackageExampleAdmin(admin.ModelAdmin):
list_display = ("title", )
search_fields = ("title",)
class TeamMembershipAdmin(admin.ModelAdmin):
list_display = ("account", "project", "role", "role_confirmed_by_account")
class ProjectImageAdmin(admin.ModelAdmin):
fields = ('project', 'img', 'image_tag_thumb', 'image_tag',)
readonly_fields = ('image_tag_thumb', 'image_tag',)
list_display = ("project", "img", "image_tag_thumb")
admin.site.register(Category, VersionAdmin)
admin.site.register(Project, PackageAdmin)
admin.site.register(Commit, CommitAdmin)
admin.site.register(Version, VersionLocalAdmin)
admin.site.register(PackageExample, PackageExampleAdmin)
admin.site.register(TeamMembership, TeamMembershipAdmin)
admin.site.register(ProjectImage, ProjectImageAdmin)
| mit |
bcroq/kansha | kansha/board/view.py | 2 | 35048 | # -*- coding:utf-8 -*-
# --
# Copyright (c) 2012-2014 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
# --
from nagare.i18n import _, _N, _L
from nagare import ajax, component, presentation, security, var
from kansha import notifications
from kansha.toolbox import overlay, remote
from kansha.board.boardconfig import WeightsSequenceEditor
from .boardsmanager import BoardsManager
from .comp import (Board, BoardDescription, BoardMember,
Icon)
from .comp import (BOARD_PRIVATE, BOARD_PUBLIC, BOARD_SHARED,
COMMENTS_OFF, COMMENTS_PUBLIC, COMMENTS_MEMBERS,
VOTES_OFF, VOTES_PUBLIC, VOTES_MEMBERS,
WEIGHTING_FREE, WEIGHTING_LIST, WEIGHTING_OFF)
from .boardconfig import BoardBackground, BoardConfig, BoardLabels, BoardProfile, BoardWeights
VISIBILITY_ICONS = {
BOARD_PRIVATE: 'icon-lock',
BOARD_PUBLIC: 'icon-unlocked',
BOARD_SHARED: 'icon-earth'
}
BACKGROUND_POSITIONS = [
('fill', _L(u"fill")),
('fit', _L(u"fit")),
('stretch', _L(u"stretch")),
('tile', _L(u"tile")),
('center', _L(u"center"))
]
@presentation.render_for(Board, model="menu")
def render_Board_menu(self, h, comp, *args):
with h.div(class_='nav-menu', onclick='YAHOO.kansha.app.toggleMainMenu(this)'):
with h.ul(class_='actions large'):
h << h.li(h.a(self.icons['preferences']).action(self.show_preferences))
if security.has_permissions('edit', self):
h << h.li(h.a(self.icons['add_list']).action(self.add_list))
h << h.li(h.a(self.icons['edit_desc']).action(self.edit_description))
if security.has_permissions('manage', self):
h << h.li(h.a(self.icons['save_template']).action(self.save_template, comp))
h << h.li(h.SyncRenderer().a(self.icons['export']).action(self.export))
h << h.li(h.a(self.icons['history']).action(self.show_actionlog))
if security.has_permissions('manage', self):
h << h.li(h.SyncRenderer().a(
self.icons['archive'],
onclick=(
'return confirm(%s)' %
ajax.py2js(
_("This board will be archived. Are you sure?")
).decode('UTF-8')
)
).action(self.archive, comp))
else:
h << h.li(h.SyncRenderer().a(
self.icons['leave'],
onclick=(
"return confirm(%s)" %
ajax.py2js(
_("You won't be able to access this board anymore. Are you sure you want to leave it anyway?")
).decode('UTF-8')
)
).action(self.leave, comp))
h << h.span(_(u'Board'), class_="menu-title", id='board-nav-menu')
h << self.modal
return h.root
@presentation.render_for(Board)
def render_Board(self, h, comp, *args):
"""Main board renderer"""
security.check_permissions('view', self)
self.refresh_on_version_mismatch()
self.card_filter.reload_search()
h.head.css_url('css/themes/board.css?v=2c')
h.head.css_url('css/themes/%s/board.css?v=2c' % self.theme)
title = '%s - %s' % (self.get_title(), self.app_title)
h.head << h.head.title(title)
if security.has_permissions('edit', self):
h << comp.render(h.AsyncRenderer(), "menu")
with h.div(class_='board'):
if self.background_image_url:
h << {'class': 'board ' + self.background_image_position,
'style': 'background-image:url(%s)' % self.background_image_url}
with h.div(class_='header'):
with h.div(class_='board-title', style='color: %s' % self.title_color):
h << self.title.render(h.AsyncRenderer(), 0 if security.has_permissions('edit', self) else 'readonly')
h << comp.render(h, 'switch')
with h.div(class_='bbody'):
h << comp.render(h.AsyncRenderer(), self.model)
return h.root
@presentation.render_for(Board, 'switch')
def render_Board_item(self, h, comp, *args):
with h.div(id='switch_zone'):
if self.model == 'columns':
# card search
h << self.search_input
# Switch view
h << h.SyncRenderer().a(
h.i(class_='icon-calendar'),
title=_('Calendar mode'),
class_='btn icon-btn ',
style='color: %s' % self.title_color).action(self.switch_view)
h << h.SyncRenderer().a(
h.i(class_='icon-list'),
title=_('Board mode'),
class_='btn icon-btn disabled selected',
style='color: %s' % self.title_color)
else:
h << h.SyncRenderer().a(
h.i(class_='icon-calendar'),
title=_('Calendar mode'),
class_='btn icon-btn disabled selected',
style='color: %s' % self.title_color)
h << h.SyncRenderer().a(
h.i(class_='icon-list'),
title=_('Board mode'),
class_='btn icon-btn',
style='color: %s' % self.title_color).action(self.switch_view)
return h.root
@presentation.render_for(Board, 'item')
def render_Board_item(self, h, comp, *args):
def answer():
comp.answer(self.data.id)
url = self.data.url
with h.li:
h << h.SyncRenderer().a(
h.i(' ', class_=VISIBILITY_ICONS[self.data.visibility]),
self.data.title,
href=url, class_="boardItemLabel", title=self.data.description)
with h.div(class_='actions'):
h << self.comp_members.render(h, 'members')
if security.has_permissions('manage', self):
h << h.a(
h.i(class_='ico-btn icon-box-add'),
class_='archive',
title=_(u'Archive "%s"') % self.data.title
).action(self.archive, comp)
elif security.has_permissions('leave', self):
onclick = 'return confirm("%s")' % _("You won't be able to access this board anymore. Are you sure you want to leave it anyway?")
h << h.SyncRenderer().a(
h.i(class_='ico-btn icon-exit'),
class_='leave',
title=_(u'Leave "%s"') % self.data.title,
onclick=onclick
).action(self.leave, comp)
else:
# place holder for alignment and for future feature 'request membership'
h << h.a(h.i(class_='ico-btn icon-user-check'), style='visibility:hidden')
return h.root
@presentation.render_for(Board, 'redirect')
def render_Board_redirect(self, h, comp, model):
h << h.script('window.location.href="%s"' % self.data.url)
return h.root
@presentation.render_for(Board, model="archived_item")
def render_Board_archived_item(self, h, comp, *args):
with h.li(class_='archived-item'):
h << h.span(
h.i(' ', class_=VISIBILITY_ICONS[self.data.visibility]),
self.data.title,
href='#', class_="boardItemLabel")
if security.has_permissions('manage', self):
with h.div(class_='actions'):
onclick = 'return confirm("%s")' % _("This board will be destroyed. Are you sure?")
h << h.SyncRenderer().a(
h.i(class_='ico-btn icon-bin'),
class_='delete',
title=_(u'Delete "%s"') % self.data.title,
onclick=onclick
).action(self.delete_clicked, comp)
h << h.a(
h.i(class_='ico-btn icon-box-remove'),
class_='restore',
title=_(u'Restore "%s"') % self.data.title
).action(self.restore, comp)
return h.root
@presentation.render_for(Board, model='members')
def render_Board_members(self, h, comp, *args):
"""Member section view for card
First "add user" icons,
Then icon "more user" if necessary
And at the end member icons
"""
with h.div(class_='members'):
if security.has_permissions('Add Users', self):
h << h.div(self.overlay_add_members, class_='add')
if len(self.all_members) > self.MAX_SHOWN_MEMBERS:
h << h.div(self.see_all_members, class_='more wide')
h << h.div(self.see_all_members_compact, class_='more compact')
with h.span(class_='wide'):
for m in self.all_members[:self.MAX_SHOWN_MEMBERS]:
h << m.on_answer(self.handle_event, comp).render(h, 'overlay')
return h.root
@presentation.render_for(Board, "members_list_overlay")
def render_Board_members_list_overlay(self, h, comp, *args):
"""Overlay to list all members"""
h << h.h2(_('All members'))
with h.form:
with h.div(class_="members"):
h << [m.on_answer(self.handle_event, comp).render(h) for m in self.all_members]
return h.root
def invite_members(board, application_url, emails):
board.invite_members(emails, application_url)
return 'reload_boards();'
@presentation.render_for(Board, "add_member_overlay")
def render_Board_add_member_overlay(self, h, comp, *args):
"""Overlay to add member"""
h << h.h2(_('Invite members'))
application_url = h.request.application_url
with h.div(class_="members search"):
h << self.new_member.on_answer(invite_members, self, application_url)
return h.root
@presentation.render_for(Icon)
def render_Icon(self, h, comp, *args):
if self.title is not None:
h << h.i(class_=self.icon, title=self.title)
h << self.title
else:
h << h.i(class_=self.icon, title=self.title)
return h.root
@presentation.render_for(Board, 'calendar')
def render_Board_columns(self, h, comp, *args):
h.head.css_url('js/fullcalendar-2.2.6/fullcalendar.min.css')
h.head.javascript_url('js/moment.js')
h.head.javascript_url('js/fullcalendar-2.2.6/fullcalendar.min.js')
lang = security.get_user().get_locale().language
if lang != 'en':
h.head.javascript_url('js/fullcalendar-2.2.6/lang/%s.js' % lang)
with h.div(id='viewport-wrapper'):
with h.div(class_='clearfix', id='viewport'):
h << h.div(id='calendar')
h << h.script("""YAHOO.kansha.app.create_board_calendar($('#calendar'), %s)""" % ajax.py2js(True, h))
for column in self.columns:
if not column().is_archive:
h << column.render(h, 'calendar')
return h.root
@presentation.render_for(Board, 'columns')
def render_Board_columns(self, h, comp, *args):
"""Render viewport containing the columns"""
update_if_version_mismatch = lambda renderer: comp.render(renderer, 'columns') if self.increase_version() else ''
with h.div(id='viewport-wrapper'):
with h.div(class_='clearfix', id='viewport'):
# On cards drag and drop
action = ajax.Update(action=self.update_card_position, render=update_if_version_mismatch)
action = '%s;_a;%s=' % (h.add_sessionid_in_url(sep=';'), action._generate_replace(1, h))
h.head.javascript(h.generate_id(), '''function _send_card_position(data) {
nagare_getAndEval(%s + YAHOO.lang.JSON.stringify(data));
}''' % ajax.py2js(action))
# On columns drag and drop
action = ajax.Update(action=self.update_column_position, render=update_if_version_mismatch)
action = '%s;_a;%s=' % (h.add_sessionid_in_url(sep=';'), action._generate_replace(1, h))
h.head.javascript(h.generate_id(), '''function _send_column_position(data) {
nagare_getAndEval(%s + YAHOO.lang.JSON.stringify(data));
}''' % ajax.py2js(action))
# Create the reload_columns function used when we need to reload
# the columns from javascript
reload_board = h.a.action(ajax.Update()).get('onclick').replace('return', "")
h.head.javascript(h.generate_id(), """function reload_columns(){%s}""" % reload_board)
increase_version = h.a.action(ajax.Update(render=update_if_version_mismatch))
h.head.javascript(h.generate_id(), """function increase_version() {%s}""" % increase_version.get('onclick'))
# Render columns
visible_cols = len(self.columns) - int(not self.show_archive)
layout = ''
if 2 < visible_cols < 6:
layout = 'list-span-{}'.format(visible_cols)
elif visible_cols < 3:
layout = 'list-span-3'
with h.div(id='lists'):
if layout:
h << {'class': layout}
h << h.div(' ', id='dnd-frame')
for column in self.columns:
if column().is_archive and not self.show_archive:
continue
model = 0 if not security.has_permissions('edit', self) else column.model or 'dnd'
h << column.on_answer(self.handle_event, comp).render(h, model)
return h.root
@presentation.render_for(BoardDescription)
def render_BoardDescription(self, h, comp, *args):
"""Render description component in edit mode"""
h << h.h2(_(u'Edit board description'))
with h.form(class_='description-form'):
txt_id, btn_id = h.generate_id(), h.generate_id()
h << h.label(_(u'Description'), for_=txt_id)
h << h.textarea(self.description(), id_=txt_id, autofocus=True).action(self.description)
with h.div(class_='buttons'):
h << h.button(_('Save'), class_='btn btn-primary', id=btn_id).action(self.commit, comp)
h << ' '
h << h.a(_('Cancel'), class_='btn').action(self.cancel, comp)
h.head.javascript(
h.generate_id(),
'YAHOO.kansha.app.addCtrlEnterHandler(%s, %s)' % (
ajax.py2js(txt_id), ajax.py2js(btn_id)
)
)
return h.root
@presentation.render_for(BoardConfig)
def render_BoardConfig(self, h, comp, *args):
return comp.render(h.AsyncRenderer(), 'edit')
@presentation.render_for(BoardConfig, model='edit')
def render_BoardConfig_edit(self, h, comp, *args):
"""Render the board configuration panel"""
h << h.h2(_(u'Board configuration'))
with h.div(class_='row board-configuration'):
with h.div(class_='menu'):
with h.div:
with h.ul:
for id_, item in self.menu.iteritems():
with h.li:
with h.a.action(lambda id_=id_: self.select(id_)):
if id_ == self.selected:
h << {'class': 'active'}
h << h.i(class_='icon ' + item.icon)
h << h.span(item.label)
with h.div:
h << self.content
h << h.script('YAHOO.kansha.app.hideOverlay();')
return h.root
@presentation.render_for(BoardLabels, model='menu')
def render_BoardLabels_menu(self, h, comp, *args):
"""Render the link leading to the label configuration"""
h << h.a(_('Labels')).action(comp.answer)
return h.root
@presentation.render_for(BoardLabels)
@presentation.render_for(BoardLabels, model='edit')
def render_BoardLabels_edit(self, h, comp, *args):
"""Render the labels configuration panel"""
with h.div(class_='panel-section'):
h << h.div(_(u'Card labels'), class_='panel-section-title')
with h.ul(class_='board-labels clearfix'):
for title, label in self.labels:
with h.li:
with h.div(class_='label-title'):
h << title.render(h.AsyncRenderer())
with h.div(class_='label-color'):
h << label
return h.root
@presentation.render_for(BoardWeights, model='menu')
def render_boardweights_menu(self, h, comp, *args):
"""Render the link leading to the weights configuration"""
h << h.a(_('Weights')).action(comp.answer)
return h.root
@presentation.render_for(BoardWeights)
@presentation.render_for(BoardWeights, model='edit')
def render_boardweights_edit(self, h, comp, *args):
"""Render the weights configuration panel"""
with h.div(class_='panel-section'):
h << h.div(_(u'Weighting cards'), class_='panel-section-title')
h << h.p(_(u'Activate cards weights'))
with h.form:
with h.div(class_='btn-group'):
action = h.a.action(self.activate_weighting, WEIGHTING_OFF).get('onclick')
if self.board.total_weight() > 0:
action = (
"if (confirm(%(message)s)){%(action)s;}return false" %
{
'action': action,
'message': ajax.py2js(
_(u'All affected weights will be reset. Are you sure?')
).decode('UTF-8')
}
)
h << h.a(
_('Disabled'),
class_='btn %s' % (
'active btn-primary'
if self.board.weighting_cards == WEIGHTING_OFF
else ''
),
onclick=action
)
h << h.button(
_('Free integer'),
class_='btn %s' % (
'active btn-primary'
if self.board.weighting_cards == WEIGHTING_FREE
else ''
),
onclick=h.a.action(
self.activate_weighting, WEIGHTING_FREE
).get('onclick'),
title=_('Card weights can be any integer')
)
action = h.a.action(self.activate_weighting, WEIGHTING_LIST).get('onclick')
if self.board.total_weight() > 0:
action = (
"if (confirm(%(message)s)){%(action)s;}return false" %
{
'action': action,
'message': ajax.py2js(
_(u'All affected weights will be reset. Are you sure?')
).decode('UTF-8')
}
)
h << h.button(
_('Integer sequence'),
class_='btn %s' % (
'active btn-primary'
if self.board.weighting_cards == WEIGHTING_LIST
else ''
),
onclick=action,
title=_('Choosen within a sequence of integers')
)
if self.board.weighting_cards == WEIGHTING_LIST:
h << h.p(_('Enter a sequence of integers'))
h << self._weights_editor
return h.root
@presentation.render_for(WeightsSequenceEditor)
def render_weightssequenceeditor_edit(self, h, comp, model):
with h.form(class_='weights-form'):
h << h.input(value=self.weights(), type='text').action(self.weights)
h << h.button(_('Save'), class_='btn btn-primary').action(self.commit)
if self.weights.error:
h << h.div(self.weights.error, class_='nagare-error-message')
elif self.feedback:
with h.div(class_='success'):
h << h.i(class_='icon-checkmark')
h << self.feedback
return h.root
@presentation.render_for(BoardProfile)
def render_BoardProfile(self, h, comp, *args):
"""Render the board profile form"""
if security.has_permissions('manage', self.board):
with h.div(class_='panel-section'):
h << h.div(_(u'Content Visibility'), class_='panel-section-title')
h << h.p(_(u'Choose whether the board is private, public (anyone with the URL can view it ) or shared (public + visible on homepages).'))
with h.form:
with h.div(class_='btn-group'):
active = 'active btn-primary' if self.board.visibility == BOARD_PRIVATE else ''
h << h.button(_('Private'), class_='btn %s' % active).action(
lambda: self.board.set_visibility(BOARD_PRIVATE))
active = 'active btn-primary' if self.board.visibility == BOARD_PUBLIC else ''
h << h.button(_('Public'), class_='btn %s' % active).action(
lambda: self.board.set_visibility(BOARD_PUBLIC))
active = 'active btn-primary' if self.board.visibility == BOARD_SHARED else ''
h << h.button(_('Shared'), class_='btn %s' % active).action(
lambda: self.board.set_visibility(BOARD_SHARED))
with h.div(class_='panel-section'):
h << h.div(_(u'Comments'), class_='panel-section-title')
h << h.p(_('Commenting allows members to make short messages on cards. You can enable or disable this feature.'))
with h.form:
with h.div(class_='btn-group'):
active = 'active btn-primary' if self.board.comments_allowed == COMMENTS_OFF else ''
h << h.button(_('Disabled'), class_='btn %s' % active).action(
lambda: self.allow_comments(COMMENTS_OFF))
active = 'active btn-primary' if self.board.comments_allowed == COMMENTS_MEMBERS else ''
h << h.button(_('Members'), class_='btn %s' % active).action(
lambda: self.allow_comments(COMMENTS_MEMBERS))
kw = {} if self.board.is_open else {
"disabled": "disabled"}
active = 'active btn-primary' if self.board.comments_allowed == COMMENTS_PUBLIC else ''
h << h.button(_('Public'), class_='btn %s' % active, **kw).action(
lambda: self.allow_comments(COMMENTS_PUBLIC))
with h.div(class_='panel-section'):
h << h.div(_(u'Votes'), class_='panel-section-title')
h << h.p(_(u'Allow votes'))
with h.form:
with h.div(class_='btn-group'):
active = 'active btn-primary' if self.board.votes_allowed == VOTES_OFF else ''
h << h.button(_('Disabled'), class_='btn %s' %
active).action(lambda: self.allow_votes(VOTES_OFF))
active = 'active btn-primary' if self.board.votes_allowed == VOTES_MEMBERS else ''
h << h.button(_('Members'), class_='btn %s' % active).action(
lambda: self.allow_votes(VOTES_MEMBERS))
kw = {} if self.board.is_open else {
"disabled": "disabled"}
active = 'active btn-primary' if self.board.votes_allowed == VOTES_PUBLIC else ''
h << h.button(_('Public'), class_='btn %s' % active,
**kw).action(lambda: self.allow_votes(VOTES_PUBLIC))
with h.div(class_='panel-section'):
h << h.div(_(u'Archive'), class_='panel-section-title')
h << h.p(_(u'View archive column'))
with h.form:
with h.div(class_='btn-group'):
active = 'active btn-primary' if self.board.show_archive else ''
h << h.button(_('Show'), class_='btn %s' % active).action(lambda: self.set_archive(1))
active = 'active btn-primary' if not self.board.show_archive else ''
h << h.button(_('Hide'), class_='btn %s' % active).action(lambda: self.set_archive(0))
with h.div(class_='panel-section'):
h << h.div(_(u'Notifications'), class_='panel-section-title')
h << h.p(_(u'You will be notified by email of changes made in this board to cards'))
with h.form:
with h.div(class_='btn-group'):
active = 'active btn-primary' if self.notifications_allowed == notifications.NOTIFY_OFF else ''
h << h.button(_('None'), class_='btn %s' % active).action(self.allow_notifications, notifications.NOTIFY_OFF)
active = 'active btn-primary' if self.notifications_allowed == notifications.NOTIFY_MINE else ''
h << h.button(_('Affected to me'), class_='btn %s' % active).action(self.allow_notifications, notifications.NOTIFY_MINE)
active = 'active btn-primary' if self.notifications_allowed == notifications.NOTIFY_ALL else ''
h << h.button(_('All'), class_='btn %s' % active).action(self.allow_notifications, notifications.NOTIFY_ALL)
return h.root
@presentation.render_for(BoardMember)
def render_BoardMember(self, h, comp, *args):
application_url = h.request.application_url
if security.has_permissions('manage', self.board):
return self.user.on_answer(
lambda action: self.dispatch(action, application_url)
).render(h, model='%s' % self.role)
else:
return h.div(self.user.render(h), class_='member')
@presentation.render_for(BoardMember, model="overlay")
def render_BoardMember_overlay(self, h, comp, *args):
application_url = h.request.application_url
if security.has_permissions('manage', self.board):
return self.user.on_answer(
lambda action: self.dispatch(action, application_url)
).render(h, model='overlay-%s' % self.role)
else:
member = self.user.render(h, "avatar")
member.attrib.update({'class': 'avatar unselectable'})
return member
@presentation.render_for(BoardBackground, model='menu')
def render_board_background_menu(self, h, comp, *args):
return h.a(_('Background image')).action(comp.answer)
@presentation.render_for(BoardBackground)
@presentation.render_for(BoardBackground, model='edit')
def render_board_background_edit(self, h, comp, *args):
"""Render the background configuration panel"""
with h.div(class_='panel-section'):
h << h.div(_(u'Background image'), class_='panel-section-title')
with h.div:
with h.div:
v_file = var.Var()
submit_id = h.generate_id("attach_submit")
input_id = h.generate_id("attach_input")
h << h.label((h.i(class_='icon-file-picture'), u' ',
_("Choose an image")), class_='btn', for_=input_id)
with h.form(class_='hidden'):
h << h.script(
u'''
function valueChanged(e) {
if (YAHOO.kansha.app.checkFileSize(this, %(max_size)s)) {
YAHOO.util.Dom.get(%(submit_id)s).click();
} else {
alert(%(error)s);
}
}
YAHOO.util.Event.onDOMReady(function() {
YAHOO.util.Event.on(%(input_id)s, 'change', valueChanged);
});''' % {
'max_size': ajax.py2js(self.board.background_max_size, h),
'input_id': ajax.py2js(input_id, h),
'submit_id': ajax.py2js(submit_id, h),
'error': ajax.py2js(
_(u'Max file size exceeded'), h
).decode('UTF-8')
}
)
h << h.input(id=input_id, class_='hidden', type="file", name="file",
multiple="multiple", maxlength="100",).action(v_file)
h << h.input(id=submit_id, class_='hidden', type="submit").action(
lambda: self.set_background(v_file()))
h << ' ' << _('or') << ' '
h << h.a(_(u'Reset background')).action(self.reset_background)
with h.p(class_='text-center'):
h << component.Component(self.board, model='background_image')
with h.div:
input_id = h.generate_id()
submit_id = h.generate_id("image_position_submit")
h << h.script(u'''YAHOO.util.Event.onDOMReady(function() {
YAHOO.util.Event.on(%(input_id)s, 'change', function() { YAHOO.util.Dom.get(%(submit_id)s).click(); });
});''' % {'input_id': ajax.py2js(input_id, h),
'submit_id': ajax.py2js(submit_id, h)})
with h.form:
h << h.label(_(u'Image position'), for_=input_id)
with h.select(id_=input_id).action(self.background_position):
for value, name in BACKGROUND_POSITIONS:
h << h.option(_(name), value=value).selected(self.background_position())
h << h.input(class_='hidden', id_=submit_id, type="submit").action(self.set_background_position)
with h.div(class_='panel-section'):
h << h.div(_(u'Board title color'), class_='panel-section-title')
with h.div:
with h.div:
h << comp.render(h, model='title-color-edit')
h << ' ' << _('or') << ' '
h << h.a(_('Reset to default color')).action(self.reset_color)
return h.root
@presentation.render_for(Board, model='background_image')
def render_board_background_image(self, h, comp, *args):
fileid = self.data.background_image
try:
metadata = self.assets_manager.get_metadata(fileid)
src = self.assets_manager.get_image_url(fileid, 'medium')
src += '?r=' + h.generate_id()
return h.img(title=metadata['filename'], alt=metadata['filename'], src=src)
except Exception:
return _(u'No background selected')
@presentation.render_for(BoardBackground, model='title-color-edit')
def render_board_background_title_color_edit(self, h, comp, *args):
"""Edit the label color"""
# If label changed reload columns
h << component.Component(overlay.Overlay(lambda r: comp.render(r, model='title-color'),
lambda r: comp.render(r,
model='title-color-overlay'),
dynamic=False,
title=_('Change color')))
return h.root
@presentation.render_for(BoardBackground, model='title-color')
def render_board_background_title_color(self, h, comp, *args):
style = 'background-color:%s' % (self.board.title_color or u'')
h << h.span(class_='board-title-color', style=style)
return h.root
@presentation.render_for(BoardBackground, model='title-color-overlay')
def render_board_background_title_color_overlay(self, h, comp, *args):
"""Color chooser contained in the overlay body"""
v = var.Var(self.board.title_color)
i = h.generate_id()
h << h.div(id=i, class_='label-color-picker clearfix')
with h.form:
h << h.input(type='hidden', value=v(), id='%s-hex-value' % i).action(v)
h << h.button(_('Save'), class_='btn btn-primary').action(
ajax.Update(action=lambda v=v: self.set_color(v())))
h << ' '
h << h.button(_('Cancel'), class_='btn').action(lambda: None)
h << h.script("YAHOO.kansha.app.addColorPicker(%s)" % ajax.py2js(i))
return h.root
##### BoardsManager
@presentation.render_for(BoardsManager)
def render_userboards(self, h, comp, *args):
template = var.Var(u'')
h.head << h.head.title(self.app_title)
h.head.css_url('css/themes/home.css?v=2c')
h.head.css_url('css/themes/%s/home.css?v=2c' % self.theme)
default_template_i18n = {
'Empty board': _(u'Empty board'),
'Basic Kanban': _(u'Basic Kanban')
}
with h.div(class_='new-board'):
with h.form:
h << h.SyncRenderer().button(_(u'Create'), type='submit', class_='btn btn-primary').action(lambda: self.create_board(template(), comp))
h << (u' ', _(u'a new'), u' ')
if len(self.templates) > 1:
with h.select.action(template):
with h.optgroup(label=_(u'Shared templates')):
h << [h.option(default_template_i18n.get(tpl, tpl), value=id_) for
id_, tpl in self.templates['public']]
if self.templates['private']:
with h.optgroup(label=_(u'My templates')):
h << [h.option(_(tpl), value=id_) for
id_, tpl in self.templates['private']]
else:
id_, tpl = self.templates.items()[0]
template(id_)
h << tpl
if self.last_modified_boards:
h << h.h1(_(u'Last modified boards'))
with h.ul(class_='board-labels'):
h << [b.on_answer(self.handle_event).render(h, 'item') for b in self.last_modified_boards]
h << h.h1(_(u'My boards'))
if self.my_boards:
with h.ul(class_='board-labels'):
h << [b.on_answer(self.handle_event).render(h, 'item') for b in self.my_boards]
else:
h << h.p(_(u'Create a board by choosing a template in the menu above, then click on the "Create" button.'))
if self.guest_boards:
h << h.h1(_(u'Guest boards'))
with h.ul(class_='board-labels'):
h << [b.on_answer(self.handle_event).render(h, 'item') for b in self.guest_boards]
if self.shared_boards:
h << h.h1(_(u'Shared boards'))
with h.ul(class_='board-labels'):
h << [b.on_answer(self.handle_event).render(h, 'item') for b in self.shared_boards]
if len(self.archived_boards):
h << h.h1(_('Archived boards'))
with h.ul(class_='board-labels'):
h << [b.on_answer(self.handle_event).render(h, 'archived_item')
for b in self.archived_boards]
with h.form:
h << h.SyncRenderer().button(
_('Delete the archived board') if len(self.archived_boards) == 1 else _('Delete the archived boards'),
class_='delete',
onclick='return confirm(%s)' % ajax.py2js(
_('Deleted boards cannot be restored. Are you sure?')
).decode('UTF-8'),
type='submit'
).action(self.purge_archived_boards)
h << h.script('YAHOO.kansha.app.hideOverlay();'
'function reload_boards() { %s; }' % h.AsyncRenderer().a.action(ajax.Update(action=self.load_user_boards, render=0)).get('onclick'))
return h.root
| bsd-3-clause |
hparik11/Deep-Learning-Nanodegree-Foundation-Repository | reinforcement/gym/gym/utils/atomic_write.py | 9 | 1951 | # Based on http://stackoverflow.com/questions/2333872/atomic-writing-to-file-with-python
import os
from contextlib import contextmanager
# We would ideally atomically replace any existing file with the new
# version. However, on Windows there's no Python-only solution prior
# to Python 3.3. (This library includes a C extension to do so:
# https://pypi.python.org/pypi/pyosreplace/0.1.)
#
# Correspondingly, we make a best effort, but on Python < 3.3 use a
# replace method which could result in the file temporarily
# disappearing.
import sys
if sys.version_info >= (3, 3):
# Python 3.3 and up have a native `replace` method
from os import replace
elif sys.platform.startswith("win"):
def replace(src, dst):
# TODO: on Windows, this will raise if the file is in use,
# which is possible. We'll need to make this more robust over
# time.
try:
os.remove(dst)
except OSError:
pass
os.rename(src, dst)
else:
# POSIX rename() is always atomic
from os import rename as replace
@contextmanager
def atomic_write(filepath, binary=False, fsync=False):
""" Writeable file object that atomically updates a file (using a temporary file). In some cases (namely Python < 3.3 on Windows), this could result in an existing file being temporarily unlinked.
:param filepath: the file path to be opened
:param binary: whether to open the file in a binary mode instead of textual
:param fsync: whether to force write the file to disk
"""
tmppath = filepath + '~'
while os.path.isfile(tmppath):
tmppath += '~'
try:
with open(tmppath, 'wb' if binary else 'w') as file:
yield file
if fsync:
file.flush()
os.fsync(file.fileno())
replace(tmppath, filepath)
finally:
try:
os.remove(tmppath)
except (IOError, OSError):
pass
| mit |
izacarias/mininet | arp_storm_control.py | 1 | 6849 | +# Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Contributor:Li Cheng @BUPT
+# Homepage:www.muzixing.com
+# Time:2014/10/19
+#
+
+from ryu.base import app_manager
+from ryu.controller import ofp_event
+from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
+from ryu.controller.handler import set_ev_cls
+from ryu.ofproto import ofproto_v1_3
+from ryu.lib.packet import packet
+from ryu.lib.packet import ethernet
+from ryu.lib.packet import arp
+from ryu.lib.packet import ipv6
+from ryu.lib import mac
+
+
+class SimpleARPProxy13(app_manager.RyuApp):
+ OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
+
+ def __init__(self, *args, **kwargs):
+ super(SimpleARPProxy13, self).__init__(*args, **kwargs)
+ self.mac_to_port = {}
+ self.arp_table = {}
+ self.sw = {}
+
+ @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
+ def switch_features_handler(self, ev):
+ datapath = ev.msg.datapath
+ ofproto = datapath.ofproto
+ parser = datapath.ofproto_parser
+
+ # install table-miss flow entry
+ #
+ # We specify NO BUFFER to max_len of the output action due to
+ # OVS bug. At this moment, if we specify a lesser number, e.g.,
+ # 128, OVS will send Packet-In with invalid buffer_id and
+ # truncated packet data. In that case, we cannot output packets
+ # correctly.
+
+ match = parser.OFPMatch()
+ actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
+ ofproto.OFPCML_NO_BUFFER)]
+ self.add_flow(datapath, 0, match, actions)
+
+ def add_flow(self, datapath, priority, match, actions):
+ ofproto = datapath.ofproto
+ parser = datapath.ofproto_parser
+
+ inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
+ actions)]
+
+ mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
+ idle_timeout=5, hard_timeout=15,
+ match=match, instructions=inst)
+ datapath.send_msg(mod)
+
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
dst = eth.dst
src = eth.src
dpid = datapath.id
if pkt.get_protocol(ipv6.ipv6): # Drop the IPV6 Packets.
match = parser.OFPMatch(eth_type=eth.ethertype)
actions = []
self.add_flow(datapath, 1, match, actions)
return None
arp_pkt = pkt.get_protocol(arp.arp)
if arp_pkt:
self.arp_table[arp_pkt.src_ip] = src # ARP learning
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# Learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
if self.arp_handler(msg): # 1:reply or drop; 0: flood
return None
else:
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
# Install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
self.add_flow(datapath, 1, match, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
def arp_handler(self, msg):
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
arp_pkt = pkt.get_protocol(arp.arp)
if eth:
eth_dst = eth.dst
eth_src = eth.src
# Break the loop for avoiding ARP broadcast storm
if eth_dst == mac.BROADCAST_STR and arp_pkt:
arp_dst_ip = arp_pkt.dst_ip
if (datapath.id, eth_src, arp_dst_ip) in self.sw:
if self.sw[(datapath.id, eth_src, arp_dst_ip)] != in_port:
datapath.send_packet_out(in_port=in_port, actions=[])
return True
else:
self.sw[(datapath.id, eth_src, arp_dst_ip)] = in_port
# Try to reply arp request
if arp_pkt:
hwtype = arp_pkt.hwtype
proto = arp_pkt.proto
hlen = arp_pkt.hlen
plen = arp_pkt.plen
opcode = arp_pkt.opcode
arp_src_ip = arp_pkt.src_ip
arp_dst_ip = arp_pkt.dst_ip
if opcode == arp.ARP_REQUEST:
if arp_dst_ip in self.arp_table:
actions = [parser.OFPActionOutput(in_port)]
ARP_Reply = packet.Packet()
ARP_Reply.add_protocol(ethernet.ethernet(
ethertype=eth.ethertype,
dst=eth_src,
src=self.arp_table[arp_dst_ip]))
ARP_Reply.add_protocol(arp.arp(
opcode=arp.ARP_REPLY,
src_mac=self.arp_table[arp_dst_ip],
src_ip=arp_dst_ip,
dst_mac=eth_src,
dst_ip=arp_src_ip))
ARP_Reply.serialize()
out = parser.OFPPacketOut(
datapath=datapath,
buffer_id=ofproto.OFP_NO_BUFFER,
in_port=ofproto.OFPP_CONTROLLER,
actions=actions, data=ARP_Reply.data)
datapath.send_msg(out)
return True
return False
| apache-2.0 |
webmasterraj/GaSiProMo | flask/lib/python2.7/site-packages/boto/sqs/attributes.py | 223 | 1718 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an SQS Attribute Name/Value set
"""
class Attributes(dict):
def __init__(self, parent):
self.parent = parent
self.current_key = None
self.current_value = None
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Attribute':
self[self.current_key] = self.current_value
elif name == 'Name':
self.current_key = value
elif name == 'Value':
self.current_value = value
else:
setattr(self, name, value)
| gpl-2.0 |
meredith-digops/ansible | lib/ansible/modules/database/mongodb/mongodb_parameter.py | 70 | 7333 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
(c) 2016, Loic Blot <loic.blot@unix-experience.fr>
Sponsored by Infopro Digital. http://www.infopro-digital.com/
Sponsored by E.T.A.I. http://www.etai.fr/
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: mongodb_parameter
short_description: Change an administrative parameter on a MongoDB server.
description:
- Change an administrative parameter on a MongoDB server.
version_added: "2.1"
options:
login_user:
description:
- The username used to authenticate with
required: false
default: null
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- The host running the database
required: false
default: localhost
login_port:
description:
- The port to connect to
required: false
default: 27017
login_database:
description:
- The database where login credentials are stored
required: false
default: null
replica_set:
description:
- Replica set to connect to (automatically connects to primary for writes)
required: false
default: null
database:
description:
- The name of the database to add/remove the user from
required: true
ssl:
description:
- Whether to use an SSL connection when connecting to the database
required: false
default: false
param:
description:
- MongoDB administrative parameter to modify
required: true
value:
description:
- MongoDB administrative parameter value to set
required: true
param_type:
description:
- Define the parameter value (str, int)
required: false
default: str
notes:
- Requires the pymongo Python package on the remote host, version 2.4.2+. This
can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html
requirements: [ "pymongo" ]
author: "Loic Blot (@nerzhul)"
'''
EXAMPLES = '''
# Set MongoDB syncdelay to 60 (this is an int)
- mongodb_parameter:
param: syncdelay
value: 60
param_type: int
'''
RETURN = '''
before:
description: value before modification
returned: success
type: string
after:
description: value after modification
returned: success
type: string
'''
import os
try:
from pymongo.errors import ConnectionFailure
from pymongo.errors import OperationFailure
from pymongo import version as PyMongoVersion
from pymongo import MongoClient
except ImportError:
try: # for older PyMongo 2.2
from pymongo import Connection as MongoClient
except ImportError:
pymongo_found = False
else:
pymongo_found = True
else:
pymongo_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.six.moves import configparser
# =========================================
# MongoDB module specific support methods.
#
def load_mongocnf():
config = configparser.RawConfigParser()
mongocnf = os.path.expanduser('~/.mongodb.cnf')
try:
config.readfp(open(mongocnf))
creds = dict(
user=config.get('client', 'user'),
password=config.get('client', 'pass')
)
except (configparser.NoOptionError, IOError):
return False
return creds
# =========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None),
login_password=dict(default=None, no_log=True),
login_host=dict(default='localhost'),
login_port=dict(default=27017, type='int'),
login_database=dict(default=None),
replica_set=dict(default=None),
param=dict(default=None, required=True),
value=dict(default=None, required=True),
param_type=dict(default="str", choices=['str', 'int']),
ssl=dict(default=False, type='bool'),
)
)
if not pymongo_found:
module.fail_json(msg='the python pymongo module is required')
login_user = module.params['login_user']
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
login_database = module.params['login_database']
replica_set = module.params['replica_set']
ssl = module.params['ssl']
param = module.params['param']
param_type = module.params['param_type']
value = module.params['value']
# Verify parameter is coherent with specified type
try:
if param_type == 'int':
value = int(value)
except ValueError:
e = get_exception()
module.fail_json(msg="value '%s' is not %s" % (value, param_type))
try:
if replica_set:
client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl)
else:
client = MongoClient(login_host, int(login_port), ssl=ssl)
if login_user is None and login_password is None:
mongocnf_creds = load_mongocnf()
if mongocnf_creds is not False:
login_user = mongocnf_creds['user']
login_password = mongocnf_creds['password']
elif login_password is None or login_user is None:
module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
if login_user is not None and login_password is not None:
client.admin.authenticate(login_user, login_password, source=login_database)
except ConnectionFailure:
e = get_exception()
module.fail_json(msg='unable to connect to database: %s' % str(e))
db = client.admin
try:
after_value = db.command("setParameter", **{param: value})
except OperationFailure:
e = get_exception()
module.fail_json(msg="unable to change parameter: %s" % str(e))
if "was" not in after_value:
module.exit_json(changed=True, msg="Unable to determine old value, assume it changed.")
else:
module.exit_json(changed=(value != after_value["was"]), before=after_value["was"],
after=value)
if __name__ == '__main__':
main()
| gpl-3.0 |
commtrack/commtrack-core | apps/django_extensions/management/jobs.py | 17 | 4853 | """
django_extensions.management.jobs
"""
import os
from imp import find_module
_jobs = None
def noneimplementation(meth):
return None
class JobError(Exception):
pass
class BaseJob(object):
help = "undefined job description."
when = None
def execute(self):
raise NotImplementedError("Job needs to implement the execute method")
class HourlyJob(BaseJob):
when = "hourly"
class DailyJob(BaseJob):
when = "daily"
class WeeklyJob(BaseJob):
when = "weekly"
class MonthlyJob(BaseJob):
when = "monthly"
def my_import(name):
imp = __import__(name)
mods = name.split('.')
if len(mods)>1:
for mod in mods[1:]:
imp = getattr(imp, mod)
return imp
def find_jobs(jobs_dir):
try:
return [f[:-3] for f in os.listdir(jobs_dir) \
if not f.startswith('_') and f.endswith(".py")]
except OSError:
return []
def find_job_module(app_name, when=None):
parts = app_name.split('.')
parts.append('jobs')
if when:
parts.append(when)
parts.reverse()
path = None
while parts:
part = parts.pop()
f, path, descr = find_module(part, path and [path] or None)
return path
def import_job(app_name, name, when=None):
jobmodule = "%s.jobs.%s%s" % (app_name, when and "%s." % when or "", name)
job_mod = my_import(jobmodule)
# todo: more friendly message for AttributeError if job_mod does not exist
try:
job = job_mod.Job
except:
raise JobError("Job module %s does not contain class instance named 'Job'" % jobmodule)
if when and not (job.when == when or job.when == None):
raise JobError("Job %s is not a %s job." % (jobmodule, when))
return job
def get_jobs(when=None, only_scheduled=False):
"""
Returns a dictionary mapping of job names together with there respective
application class.
"""
global _jobs
# FIXME: HACK: make sure the project dir is on the path when executed as ./manage.py
import sys
try:
cpath = os.path.dirname(os.path.realpath(sys.argv[0]))
ppath = os.path.dirname(cpath)
if ppath not in sys.path:
sys.path.append(ppath)
except:
pass
if _jobs is None:
_jobs = {}
if True:
from django.conf import settings
for app_name in settings.INSTALLED_APPS:
scandirs = (None, 'hourly', 'daily', 'weekly', 'monthly')
if when:
scandirs = None, when
for subdir in scandirs:
try:
path = find_job_module(app_name, subdir)
for name in find_jobs(path):
if (app_name, name) in _jobs:
raise JobError("Duplicate job %s" % name)
job = import_job(app_name, name, subdir)
if only_scheduled and job.when == None:
# only include jobs which are scheduled
continue
if when and job.when != when:
# generic job not in same schedule
continue
_jobs[(app_name, name)] = job
except ImportError:
pass # No job module -- continue scanning
return _jobs
def get_job(app_name, job_name):
jobs = get_jobs()
if app_name:
return jobs[(app_name, job_name)]
else:
for a, j in jobs.keys():
if j==job_name:
return jobs[(a, j)]
raise KeyError("Job not found: %s" % job_name)
def print_jobs(when=None, only_scheduled=False, show_when=True, \
show_appname=False, show_header=True):
jobmap = get_jobs(when, only_scheduled=only_scheduled)
print "Job List: %i jobs" % len(jobmap)
jlist = jobmap.keys()
jlist.sort()
appname_spacer = "%%-%is" % max(len(e[0]) for e in jlist)
name_spacer = "%%-%is" % max(len(e[1]) for e in jlist)
when_spacer = "%%-%is" % max(len(e.when) for e in jobmap.values() if e.when)
if show_header:
line = " "
if show_appname:
line += appname_spacer % "appname" + " - "
line += name_spacer % "jobname"
if show_when:
line += " - " + when_spacer % "when"
line += " - help"
print line
print "-"*80
for app_name, job_name in jlist:
job = jobmap[(app_name, job_name)]
line = " "
if show_appname:
line += appname_spacer % app_name + " - "
line += name_spacer % job_name
if show_when:
line += " - " + when_spacer % (job.when and job.when or "")
line += " - " + job.help
print line
| bsd-3-clause |
schwartzmx/py-tools | gitignore-grabber.py | 1 | 1989 | #!/usr/bin/env python
__author__ = 'Phil Schwartz'
# gitignore-grabber.py - Grabs gitignore generated files from gitignore.io depending on the programming language
# and/or IDE and/or OS args given (comma separated).
# python gitignore-grabber.py language,IDE,OS repoDirectory
import sys
import os
import urllib
import argparse
def grab_and_save(url, repoDir):
if repoDir[len(repoDir) - 1] == "/":
repoDir = repoDir + ".gitignore"
else:
repoDir = repoDir + "/.gitignore"
# remove old .gitignore if it exists
if os.path.isfile(repoDir):
os.remove(repoDir)
# grab text and save to repoDir
urllib.urlretrieve(url, repoDir, reporthook=None, data=None)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Grab gitignore files from gitignore.io and save to the specified directory.", usage="Usage example: ./gitignore-grabber.py -a OSX Java Eclipse -d home/User/Repo")
parser.add_argument("-a", "--args", help="Specify programming language(s), OS(s), or others.", action="store")
parser.add_argument("-r", "--repo", help="Enter the path to save .gitignore file to.", action="store")
args = parser.parse_args()
if args.args and args.repo:
try:
plang = args.args.replace(' ', ',') # grab programming language to grab gitio for
saveDir = args.repo
# append lowercase language name to url
gitio = "http://www.gitignore.io/api/" + str(plang)
if not os.path.isdir(saveDir):
print "Repo Path entered is not a directory. Retry!"
sys.exit(1)
try:
grab_and_save(gitio, saveDir)
except:
print "Error downloading and saving .gitignore file..."
sys.exit(1)
print "File saved to: " + saveDir + " successfully."
except:
print "Invalid arguments, use -h for syntax example"
else:
print parser.usage | mit |
SCOAP3/invenio | invenio/utils/autodiscovery/helpers.py | 17 | 2622 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2009, 2010, 2011, 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Autodiscovery helper functions."""
import inspect
from invenio.utils.text import wrap_text_in_a_box
def get_callable_signature_as_string(the_callable):
"""
Returns a string representing a callable as if it would have been
declared on the prompt.
>>> def foo(arg1, arg2, arg3='val1', arg4='val2', *args, **argd):
... pass
>>> get_callable_signature_as_string(foo)
def foo(arg1, arg2, arg3='val1', arg4='val2', *args, **argd)
:param the_callable: the callable to be analyzed.
:type the_callable: function/callable.
:return: the signature.
"""
args, varargs, varkw, defaults = inspect.getargspec(the_callable)
tmp_args = list(args)
args_dict = {}
if defaults:
defaults = list(defaults)
else:
defaults = []
while defaults:
args_dict[tmp_args.pop()] = defaults.pop()
while tmp_args:
args_dict[tmp_args.pop()] = None
args_list = []
for arg in args:
if args_dict[arg] is not None:
args_list.append("%s=%s" % (arg, repr(args_dict[arg])))
else:
args_list.append(arg)
if varargs:
args_list.append("*%s" % varargs)
if varkw:
args_list.append("**%s" % varkw)
args_string = ', '.join(args_list)
return "def %s(%s)" % (the_callable.__name__, args_string)
def get_callable_documentation(the_callable):
"""
Returns a string with the callable signature and its docstring.
:param the_callable: the callable to be analyzed.
:type the_callable: function/callable.
:return: the signature.
"""
return wrap_text_in_a_box(
title=get_callable_signature_as_string(the_callable),
body=(getattr(the_callable, '__doc__') or 'No documentation').replace(
'\n', '\n\n'),
style='ascii_double')
| gpl-2.0 |
bbc/kamaelia | Sketches/MH/exceptions/Axon/Microprocess.py | 3 | 23752 | # -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""
======================================================
Microprocess - A class supporting concurrent execution
======================================================
A microprocess is a class supporting parallel execution, provided by
forming a wrapper around a generator. It also provides a place for context
to be stored about the generator.
* A component is based on a microprocess - giving it its thread of execution.
* The Scheduler runs microprocesses that have been 'activated'
*This is an Axon internal. If you are writing components you do not need to
understand this as you will normally not use it directly.*
Developers wishing to use Axon in other ways or understand its implementation
shoudl read on with interest!
Basic Usage
-----------
Making and using a microprocess is easy:
1. Subclass microprocess writing your own main() generator method
2. Create and 'activate' it
3. Run the scheduler so it is executed
Specifically, classes that subclass microprocess, and implement a main()
generator function can be activated, and scheduled by the scheduler/microthread
systems. Essentially a microprocess provides a minimal runtime context for the
scheduling & thread handling system.
In more detail:
1. Subclass a microprocess, overriding the main() generator method to make your
own that yields non-zero/False values::
class Loopy(microprocess):
def __init__(self, num):
self.num = num
super(Loopy, self).__init__()
def main(self):
yield 1
while 1:
print "we loop forever", self.num
yield 1
2. Instantiate and activate a few (note these are two separate steps!)::
mp1=Loopy(1)
mp1.activate()
mp2=Loopy(2)
mp2.activate()
mp3=Loopy(3).activate() # a more convenient shorthand
3. If you haven't already, start the scheduler to cause them to be run. The
call will return when all microprocesses have finished executing (which is
*never* in this example case)::
>>> scheduler.run.runThreads()
we loop forever 1
we loop forever 2
we loop forever 3
we loop forever 1
we loop forever 2
we loop forever 3
we loop forever 1
we loop forever 2
... etc ...
Pause a microprocess whilst it is running by calling the pause() method. Wake it
up again by calling unpause(). Pausing a microprocess means that it will cease
to be executed until something else unpauses it. When unpaused it picks up from
where it left off.
More detail
-----------
Essentially a microprocess provides a context for scheduling generators,
and treating them similar to processes/threads. It provides basic facilities to
support the activation (starting), pausing, unpausing and termination of a
generator.
To start a microprocess running, you must create it and then activate it.
Activation is a separate step to allow you to control exactly when you want
a microprocess to actually start running. Once activated, running the scheduler
will cause your generator to be executed along with all other active
microprocesses.
Every yield statement in your generator hands back control, allowing Axon
to schedule other microprocesses that may be running.
You can yield any value you like except zero or False (which are reserved for
future use).
When a microprocess finishes, the scheduler calls its _closeDownMicroprocess()
method. You can either override this in your subclass, or specify a
closeDownValue when initialising microprocess. The scheduler will act on the
return value if it recognises it - see the Scheduler module for more details.
Alternative ways of defining the generator/thread
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Subclass microprocess and write your generator as a differently
named method, for example foo(), and to then specify the *name* of the
"mainmethod" when you ask the microproces to activate::
class MyMicroprocess(microprocess):
def foo(self):
yield 1
while 1:
print "we loop forever!"
yield 1
mp = MyMicroprocess()
mp.activate(mainmethod="foo")
scheduler.run.runThreads()
Alternatively, you can instantiate a microprocess providing your own generator::
def bar():
yield 1
while 1:
print "we loop forever!"
yield 1
mp = MyMicroprocess(thread=bar())
mp.activate()
scheduler.run.runThreads()
Note that this last approach removes the ability of the microprocess to be
prematurely stopped by calling its stop() method.
Microprocess lifecycle in detail
--------------------------------
In terms of runtime a microprocess can
be viewed to have 2 different life cycles - that which an external user
sees, and that which the microprocess sees.
In terms of runtime life cycle viewed externally, a microprocess is created,
activated, and then has its next method repeatedly called until a StopIteration
exception is raised, at which point the microprocess is deleted. In terms
of a more traditional approach the next call approximates to a timeslice
being allocated to a process/thread.
The value returned by next() should be non-zero (reserved for future use). The
scheduler calling next() may also recognise some specific values - see the
Axon.Scheduler.scheduler class for more information.
The runtime life cycle from the view of the microprocess stems from the
fact that a generator wraps a thread of control, by effectively treating
the program counter like a static variable. The following describes this
runtime from the microprocess's point of view.
First the '__init__' function is called during initialisation at object
creation time. This results in a non-active, non-running microprocess.
Activation has been deliberately separated from creation and initialisation.
At some point in the future, the microprocess's activate method is called,
activating the object. When the object is activated, an internal call
to a '_microprocessGenerator' occurs. This function in fact results in
the return object being a generator, which can then have its next method
called repeatedly. This generator is then stored as an attribute of the
microprocess class.
The following describe the flow of control the generator takes when the
generator is provided with a flow of control/time slice via it's next
method. Initially, it creates a local generator object - 'pc' - by calling
the object's main method. (This allows the client of the microprocess class
to provide their own generator if they wish.) This is necessary due to
the fact that any function containing a 'yield' keyword is a generator -
the 'yield' keyword cannot be abstracted away. Next, inside a loop, the
microprocess calls the next() method of its local generator object 'pc' -
effectively providing a time slice to the user of the microprocess class.
Any result provided by the timeslice is then yielded (returned) to the
client of the generator. However if the microprocess has its stopped
flag set, the microprocess generator simply yields a null value, followed
by stopping.
This all boils down to checking to see if the microprocess is not stopped
prior to running the body of a generator formed from the main method of the
class. The intent here is that users will inherit from
the microprocess class, and then reimplement the main method, which
periodically yields control. If the user/inheriting class does not implement
a main method, then the system provides a stub that simply returns.
Pausing and unpausing of microprocesses has been delegated to the scheduler to
allow Axon systems to not consume CPU cycles when idle. When a microprocess is
paused the scheduler simply never calls its next() method until it is unpaused.
As such, calls to pause() and unpause() are actually relayed to the scheduler.
The microprocess class uses a dummy scheduler _NullScheduler until it is
actually activated. This is done so pause() and unpause() calls can be silently
absorbed whilst a microprocess is not yet active.
Essentially the microprocess provides a context for scheduling generators,
and treating them similar to processes/threads.
Clients are not expected to use the microprocess class itself directly -
they are expected to subclass the microprocess class. Subclasses do need
however to call the microprocess constructor. A minimal client class could
look like this::
from microprocess import microprocess
class automaton(microprocess):
def __init__(self):
self.Microprocess() # Call superclass constructor
def main:
while 1:
yield 1
print "Hello Again"
This microprocess would then be run by a wrapper as follows::
import microprocess, scheduler
s = scheduler.scheduler()
a = automaton()
a.activate()
s.runThreads()
The component class does this, and adds further facilities for
inter-microprocess communication. Likewise, the scheduler class subclasses
microprocess so that it can be scheduled in parallel with other tasks.
As noted previously, every microprocess object has access to a debugger,
which is accessed via the local attribute self.debugger, which we shall
return to later. Likewise every microprocess object contains a reference
to a scheduler.
Internal flags/state
--------------------
* **id** and **name** - unique identifiers. No other Axon entity will have the
same name or id.
* **init** - a flag indicating if the microprocess has been correctly
initialised.
* **stopped** - Indicates that the microprocess has run and since stopped.
* **__thread** - the generator object that gets executed whenever next() is
called. Is actually an internally created generator that wraps the one
created by the main() method.
* **scheduler** - The scheduler that controls execution of this microprocess.
When not yet activated a dummy scheduler (NullScheduler) is used instead.
* **tracker** - The coordinating assistant tracker to be used by this
microprocess.
* **debugger** - A local debugging object. (See the debug class docs for more
detail)
Note that the paused/awake state of a microprocess is something maintained and
managed by the scheduler; not the microprocess itself.
"""
import time
import sys
from util import removeAll
from idGen import strId, numId, tupleId
from debug import debug
import Axon
import CoordinatingAssistantTracker as cat
class _NullScheduler(object):
"""\
A dummy scheduler, used by microprocess when it has not yet been activated
(and therefore isn't yet assigned to a real scheduler).
Provides dummy versions of the methods a microprocess may wish to call to
get stuff done.
"""
def wakeThread(self,mprocess):
"""Dummy method - does nothing."""
pass
def pauseThread(self,mprocess):
"""Dummy method - does nothing."""
pass
def isThreadPaused(self,mprocess):
"""Dummy method - does nothing."""
return False
_nullscheduler = _NullScheduler()
class microprocess(Axon.AxonObject):
"""\
microprocess([thread][,closeDownValue]) -> new microprocess object
Creates a new microprocess object (not yet activated). You can optionally
specify an alternative generator to be used instead of the one the microprocess
would ordinarily create for itself.
Keyword arguments:
- thread -- None, or an alternative generator to be the thread of execution in this microprocess.
- closeDownValue -- Value to be returned when the microprocess has finished and _closeDownMicroprocess() is called (default=0)
"""
schedulerClass = None
trackerClass = None
def setTrackerClass(cls, newTrackerClass):
"""\
Static method, for setting the default coordinating assistant tracker for
microprocesses.
XXX - Not used atm afaik? (Matt 21/03/2007)
"""
cls.trackerClass = newTrackerClass
setTrackerClass=classmethod(setTrackerClass)
def setSchedulerClass(cls, newSchedulerClass):
"""\
Static method, for setting the default scheduler for microprocesses.
"""
cls.schedulerClass = newSchedulerClass
setSchedulerClass=classmethod(setSchedulerClass)
def __init__(self, thread = None, closeDownValue = 0, tag=""):
"""\
Microprocess initialiser.
Subclasses must call this using the idiom super(TheClass, self).__init__()
"""
self.init = 1
self.id,self.name = tupleId(self)
self.name = self.name + tag
self.__stopped = 0
if thread is not None:
self.__thread = thread
else:
self.__thread = None # Explicit better than implicit
self._exceptions = []
self.closeDownValue = closeDownValue
self.scheduler = _nullscheduler
self.tracker=cat.coordinatingassistanttracker.getcat()
# If the client has defined a debugger in their class we don't want to override it.
# However if they haven't, we provide them with one
if not 'debugger' in self.__dict__.keys():
self.debugger = debug()
self.debugger.useConfig()
if self.debugger.areDebugging("microprocess.__init__", 5):
self.debugger.debugmessage("microprocess.__init__", "Defining debugger for self", self.__class__)
def __str__(self):
"""Standard function for rendering the object as a string."""
result = ""
result = result + self.name + " :"
result = result + self.id.__str__() + " :"
result = result + self.init.__str__() + " :"
result = result + self.__stopped.__str__() + " :"
return result
def next(self):
"""\
Calls next() of the internal generator - lets you drop a microprocess in
somewhere where you'd ordinarily stick a generator.
Internally this calls self.__thread.next() to pass the timeslice down to
the actual generator
"""
return self.__thread.next()
def _isStopped(self):
"""\
Returns True if this microprocess has been running but has since been
halted or terminated of its own accord. Otherwise returns False.
"""
if self.debugger.areDebugging("microprocess._isStopped", 1):
self.debugger.debugmessage("microprocess._isStopped", "self.stopped",self.__stopped)
return self.__stopped == 1
def _isRunnable(self):
"""
Returns True if the microprocess is active and awake, or paused.
This query is actually passed on to this microprocess's scheduler.
"""
if self.debugger.areDebugging("microprocess._isRunnable", 10):
self.debugger.debugmessage("microprocess._isRunnable", "self.scheduler.isMProcessPaused(self)", self.scheduler.isMProcessPaused(self))
return not self.scheduler.isThreadPaused(self)
def stop(self):
"""\
Halts the microprocess, no way to "unstop"
"""
if self.debugger.areDebugging("microprocess.stop", 1):
self.debugger.debugmessage("microprocess.stop", "Microprocess STOPPED", self.id,self.name,self)
self.__stopped = 1
self.scheduler = _nullscheduler
def pause(self):
"""\
Pauses the microprocess.
If done by the microprocess itself, the microprocess will pause at the
next point it 'yields'.
Internally, the request is forwarded to this microprocesses scheduler.
"""
if self.debugger.areDebugging("microprocess.pause", 1):
self.debugger.debugmessage("microprocess.pause", "Microprocess PAUSED", self.id,self.name,self)
self.scheduler.pauseThread(self)
def unpause(self):
"""\
Un-pauses the microprocess.
This is provided to allow other microprocesses to 'wake up' this one.
This can only be performed by an external microprocess - if you are paused
there is no way you can unpause yourself!
Does nothing if microprocess has been stopped.
Internally, the request is forwarded to this microprocess's scheduler.
"""
if self.debugger.areDebugging("microprocess.unpause", 1):
self.debugger.debugmessage("microprocess.unpause", "Microprocess UNPAUSED", self.id,self.name,self)
self.scheduler.wakeThread(self)
def _unpause(self):
"""DEPRECATED - use M.unpause() instead"""
if self.debugger.areDebugging("microprocess._unpause", 1):
self.debugger.debugmessage("microprocess._unpause", "Microprocess UNPAUSED", self.id,self.name,self)
noisydeprecationwarning = "Use self.unpause() rather than self._unpause(). self._unpause() will be deprecated."
print noisydeprecationwarning
return self.unpause()
def main(self):
"""\
'main' thread of execution stub function.
Client classes are expected to override this.
Write your replacement as a generator (a method with 'yield' statements
in it). 'Yield' any non-zero values you like regularly to hand control to
the scheduler so other microprocesses can get a turn at executing. Your
code must therefore not block - eg. waiting on a system call or event.
If you miss this off a class that directly subclass's microprocess, your program
will run, but it will not do what you want!
"""
if self.debugger.areDebugging("microprocess.main", 0):
self.debugger.debugmessage("microprocess.main", self.name,"OI! You're only supposed to blow the bloody doors off!")
self.debugger.debugmessage("microprocess.main", self.name,"You're likely to have called WaitComplete *BUT* with a function call not a generator call")
"If you ever see the above message in your debug output, you've made a big mistake!"
yield 1
return
def _microprocessGenerator(self,someobject, mainmethod="main"):
"""\
This contains the mainloop for a microprocess, returning a
generator object. Creates the thread of control by calling the
class's main method, then in a loop repeatedly calls the resulting
generator's next method providing the object with time slices.
After each time slice, the _microprocessGenerator yields control
back to its caller.
Keyword arguments:
- someobject -- the object containing the main method (usually 'self')
- mainmethod -- *name* of the method that is the generator to be run as the thread.
"""
pc = someobject.__getattribute__(mainmethod)()
while(1):
try:
# Continually try to run the code, and then release control
if someobject._isStopped():
# Microprocess has stopped
self._handleStopped()
yield None
return
else:
if self._exceptions:
v = pc.throw(self._exceptions.pop(0))
else:
v = pc.next()
yield v # Yield control back - making us into a generator function
except StopIteration:
self._handleStopped()
return
except:
exception = sys.exc_info()
self._handleException(exception)
return
def _handleStopped(self):
pass # stub
def _handleException(self, exception):
pass # stub
def activate(self, Scheduler=None, Tracker=None, mainmethod="main"):
"""\
Call to activate this microprocess, so it can start to be executed by a
scheduler. Usual usage is to simply call x.activate()
You can optionally specify a specific scheduler or tracker to use (instead of the
defaults). You can also specify that a different method is the 'main' generator.
Keyword arguments:
- Scheduler -- None to use the default scheduler; or an alternate scheduler.
- Tracker -- None to use the default coordinating assistant tracker; or an alternative one.
- mainmethod -- Optional. The name of the 'main' method of this microprocess (default="main")
"""
# call the _microprocessGenerator function to create a generator
# object, places this into the thread attribute of the microprocess
# and appends the component to the scheduler's run queue.
if self.debugger.areDebugging("microprocess.activate", 1):
self.debugger.debugmessage("microprocess.activate", "Activating microprocess",self)
if not self.__thread:
self.__thread = self._microprocessGenerator(self,mainmethod)
#
# Whilst a basic microprocess does not "need" a local scheduler,
# classes inheriting from microprocess may well wish to do so.
# (Specifically the component class needs that capability)
#
if Scheduler is not None:
if self.debugger.areDebugging("microprocess.activate", 1):
self.debugger.debugmessage("microprocess.activate", "Activating microprocess",self)
Scheduler._addThread(self)
self.scheduler = Scheduler
else:
self.__class__.schedulerClass.run._addThread(self)
self.scheduler = self.__class__.schedulerClass.run
if Tracker is not None:
self.tracker = Tracker
else:
pass
if self.debugger.areDebugging("microprocess.activate", 5):
self.debugger.debugmessage("microprocess.activate", "Using Scheduler",self.scheduler)
return self
def _closeDownMicroprocess(self):
"""\
Stub method that is overridden internally in Axon but not clients
Called by scheduler to ask microprocess to perform any desired shutdown
tasks. The scheduler also processes any IPC objects in the return value.
"""
return self.closeDownValue
def run(self):
"""\
run - starts the scheduler for this microprocess and runs it.
This is a convenient shortcut to activate and run this microprocess and
any other microprocesses that have already been activated (with the same
scheduler).
"""
self.activate()
self.__class__.schedulerClass.run.runThreads()
if __name__ == '__main__':
print "Test code currently disabled"
if 0:
def microProcessThreadTest():
class myProcess(microprocess):
def main(self):
i = 100
yield wouldblock(self)
while(i):
i = i -1
print "myProcess",self.name, ":", "hello World"
yield notify(self,None, 10, "this")
threadfactory = microthread()
r = scheduler()
for i in range(5):
p = myProcess(i)
t = threadfactory.activate(p)
r._addThread(t)
context = r.runThreads()
microProcessThreadTest()
| apache-2.0 |
rail/treeherder | treeherder/webapp/api/urls.py | 2 | 2889 | from django.conf.urls import include, patterns, url
from rest_framework import routers
from treeherder.webapp.api import (artifact, bug, job_log_url, jobs, logslice,
note, performance_data, refdata,
resultset)
# router for views that are bound to a project
# i.e. all those views that don't involve reference data
project_bound_router = routers.SimpleRouter()
project_bound_router.register(
r'jobs',
jobs.JobsViewSet,
base_name='jobs',
)
project_bound_router.register(
r'resultset',
resultset.ResultSetViewSet,
base_name='resultset',
)
project_bound_router.register(
r'artifact',
artifact.ArtifactViewSet,
base_name='artifact',
)
project_bound_router.register(
r'note',
note.NoteViewSet,
base_name='note',
)
project_bound_router.register(
r'bug-job-map',
bug.BugJobMapViewSet,
base_name='bug-job-map',
)
project_bound_router.register(
r'logslice',
logslice.LogSliceView,
base_name='logslice',
)
project_bound_router.register(
r'job-log-url',
job_log_url.JobLogUrlViewSet,
base_name='job-log-url',
)
project_bound_router.register(
r'performance/data',
performance_data.PerformanceDatumViewSet,
base_name='performance-data')
project_bound_router.register(
r'performance/signatures',
performance_data.PerformanceSignatureViewSet,
base_name='performance-signatures')
project_bound_router.register(
r'performance/platforms',
performance_data.PerformancePlatformViewSet,
base_name='performance-signatures-platforms')
# this is the default router for plain restful endpoints
# refdata endpoints:
default_router = routers.DefaultRouter()
default_router.register(r'product', refdata.ProductViewSet)
default_router.register(r'machine', refdata.MachineViewSet)
default_router.register(r'machineplatform', refdata.MachinePlatformViewSet)
default_router.register(r'buildplatform', refdata.BuildPlatformViewSet)
default_router.register(r'jobgroup', refdata.JobGroupViewSet)
default_router.register(r'jobtype', refdata.JobTypeViewSet)
default_router.register(r'repository', refdata.RepositoryViewSet)
default_router.register(r'optioncollectionhash', refdata.OptionCollectionHashViewSet,
base_name='optioncollectionhash')
default_router.register(r'bugscache', refdata.BugscacheViewSet)
default_router.register(r'failureclassification', refdata.FailureClassificationViewSet)
default_router.register(r'user', refdata.UserViewSet, base_name='user')
default_router.register(r'exclusion-profile', refdata.ExclusionProfileViewSet)
default_router.register(r'job-exclusion', refdata.JobExclusionViewSet)
urlpatterns = patterns(
'',
url(r'^project/(?P<project>[\w-]{0,50})/',
include(project_bound_router.urls)),
url(r'^',
include(default_router.urls)),
)
| mpl-2.0 |
MobinRanjbar/hue | desktop/core/ext-py/httplib2-0.8/python2/httplib2/test/functional/test_proxies.py | 305 | 2965 | import unittest
import errno
import os
import signal
import subprocess
import tempfile
import nose
import httplib2
from httplib2 import socks
from httplib2.test import miniserver
tinyproxy_cfg = """
User "%(user)s"
Port %(port)s
Listen 127.0.0.1
PidFile "%(pidfile)s"
LogFile "%(logfile)s"
MaxClients 2
StartServers 1
LogLevel Info
"""
class FunctionalProxyHttpTest(unittest.TestCase):
def setUp(self):
if not socks:
raise nose.SkipTest('socks module unavailable')
if not subprocess:
raise nose.SkipTest('subprocess module unavailable')
# start a short-lived miniserver so we can get a likely port
# for the proxy
self.httpd, self.proxyport = miniserver.start_server(
miniserver.ThisDirHandler)
self.httpd.shutdown()
self.httpd, self.port = miniserver.start_server(
miniserver.ThisDirHandler)
self.pidfile = tempfile.mktemp()
self.logfile = tempfile.mktemp()
fd, self.conffile = tempfile.mkstemp()
f = os.fdopen(fd, 'w')
our_cfg = tinyproxy_cfg % {'user': os.getlogin(),
'pidfile': self.pidfile,
'port': self.proxyport,
'logfile': self.logfile}
f.write(our_cfg)
f.close()
try:
# TODO use subprocess.check_call when 2.4 is dropped
ret = subprocess.call(['tinyproxy', '-c', self.conffile])
self.assertEqual(0, ret)
except OSError, e:
if e.errno == errno.ENOENT:
raise nose.SkipTest('tinyproxy not available')
raise
def tearDown(self):
self.httpd.shutdown()
try:
pid = int(open(self.pidfile).read())
os.kill(pid, signal.SIGTERM)
except OSError, e:
if e.errno == errno.ESRCH:
print '\n\n\nTinyProxy Failed to start, log follows:'
print open(self.logfile).read()
print 'end tinyproxy log\n\n\n'
raise
map(os.unlink, (self.pidfile,
self.logfile,
self.conffile))
def testSimpleProxy(self):
proxy_info = httplib2.ProxyInfo(socks.PROXY_TYPE_HTTP,
'localhost', self.proxyport)
client = httplib2.Http(proxy_info=proxy_info)
src = 'miniserver.py'
response, body = client.request('http://localhost:%d/%s' %
(self.port, src))
self.assertEqual(response.status, 200)
self.assertEqual(body, open(os.path.join(miniserver.HERE, src)).read())
lf = open(self.logfile).read()
expect = ('Established connection to host "127.0.0.1" '
'using file descriptor')
self.assertTrue(expect in lf,
'tinyproxy did not proxy a request for miniserver')
| apache-2.0 |
pdellaert/ansible | test/units/modules/network/netvisor/test_pn_vrouter_ospf6.py | 23 | 2555 | # Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.netvisor import pn_vrouter_ospf6
from units.modules.utils import set_module_args
from .nvos_module import TestNvosModule
class TestVrouterOSPF6Module(TestNvosModule):
module = pn_vrouter_ospf6
def setUp(self):
self.mock_run_nvos_commands = patch('ansible.modules.network.netvisor.pn_vrouter_ospf6.run_cli')
self.run_nvos_commands = self.mock_run_nvos_commands.start()
self.mock_run_check_cli = patch('ansible.modules.network.netvisor.pn_vrouter_ospf6.check_cli')
self.run_check_cli = self.mock_run_check_cli.start()
def tearDown(self):
self.mock_run_nvos_commands.stop()
self.mock_run_check_cli.stop()
def run_cli_patch(self, module, cli, state_map):
if state_map['present'] == 'vrouter-ospf6-add':
results = dict(
changed=True,
cli_cmd=cli
)
elif state_map['absent'] == 'vrouter-ospf6-remove':
results = dict(
changed=True,
cli_cmd=cli
)
module.exit_json(**results)
def load_fixtures(self, commands=None, state=None, transport='cli'):
self.run_nvos_commands.side_effect = self.run_cli_patch
if state == 'present':
self.run_check_cli.return_value = True, False
if state == 'absent':
self.run_check_cli.return_value = True, True
def test_vrouter_ospf6_add(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_vrouter_name': 'foo-vrouter',
'pn_nic': 'eth0.4092', 'pn_ospf6_area': '0.0.0.0', 'state': 'present'})
result = self.execute_module(changed=True, state='present')
expected_cmd = ' switch sw01 vrouter-ospf6-add vrouter-name foo-vrouter nic eth0.4092 ospf6-area 0.0.0.0 '
self.assertEqual(result['cli_cmd'], expected_cmd)
def test_vrouter_ospf6_remove(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_vrouter_name': 'foo-vrouter',
'pn_nic': 'eth0.4092', 'state': 'absent'})
result = self.execute_module(changed=True, state='absent')
expected_cmd = ' switch sw01 vrouter-ospf6-remove vrouter-name foo-vrouter nic eth0.4092'
self.assertEqual(result['cli_cmd'], expected_cmd)
| gpl-3.0 |
gqwest-erp/server | openerp/addons/base/res/__init__.py | 63 | 1274 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import res_country
import res_lang
import res_partner
import res_bank
import res_config
import res_currency
import res_company
import res_users
import res_request
import res_lang
import ir_property
import report
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Zentyal/samba | third_party/dnspython/dns/message.py | 47 | 41226 | # Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Messages"""
import cStringIO
import random
import struct
import sys
import time
import dns.edns
import dns.exception
import dns.flags
import dns.name
import dns.opcode
import dns.entropy
import dns.rcode
import dns.rdata
import dns.rdataclass
import dns.rdatatype
import dns.rrset
import dns.renderer
import dns.tsig
import dns.wiredata
class ShortHeader(dns.exception.FormError):
"""Raised if the DNS packet passed to from_wire() is too short."""
pass
class TrailingJunk(dns.exception.FormError):
"""Raised if the DNS packet passed to from_wire() has extra junk
at the end of it."""
pass
class UnknownHeaderField(dns.exception.DNSException):
"""Raised if a header field name is not recognized when converting from
text into a message."""
pass
class BadEDNS(dns.exception.FormError):
"""Raised if an OPT record occurs somewhere other than the start of
the additional data section."""
pass
class BadTSIG(dns.exception.FormError):
"""Raised if a TSIG record occurs somewhere other than the end of
the additional data section."""
pass
class UnknownTSIGKey(dns.exception.DNSException):
"""Raised if we got a TSIG but don't know the key."""
pass
class Message(object):
"""A DNS message.
@ivar id: The query id; the default is a randomly chosen id.
@type id: int
@ivar flags: The DNS flags of the message. @see: RFC 1035 for an
explanation of these flags.
@type flags: int
@ivar question: The question section.
@type question: list of dns.rrset.RRset objects
@ivar answer: The answer section.
@type answer: list of dns.rrset.RRset objects
@ivar authority: The authority section.
@type authority: list of dns.rrset.RRset objects
@ivar additional: The additional data section.
@type additional: list of dns.rrset.RRset objects
@ivar edns: The EDNS level to use. The default is -1, no Edns.
@type edns: int
@ivar ednsflags: The EDNS flags
@type ednsflags: long
@ivar payload: The EDNS payload size. The default is 0.
@type payload: int
@ivar options: The EDNS options
@type options: list of dns.edns.Option objects
@ivar request_payload: The associated request's EDNS payload size.
@type request_payload: int
@ivar keyring: The TSIG keyring to use. The default is None.
@type keyring: dict
@ivar keyname: The TSIG keyname to use. The default is None.
@type keyname: dns.name.Name object
@ivar keyalgorithm: The TSIG algorithm to use; defaults to
dns.tsig.default_algorithm. Constants for TSIG algorithms are defined
in dns.tsig, and the currently implemented algorithms are
HMAC_MD5, HMAC_SHA1, HMAC_SHA224, HMAC_SHA256, HMAC_SHA384, and
HMAC_SHA512.
@type keyalgorithm: string
@ivar request_mac: The TSIG MAC of the request message associated with
this message; used when validating TSIG signatures. @see: RFC 2845 for
more information on TSIG fields.
@type request_mac: string
@ivar fudge: TSIG time fudge; default is 300 seconds.
@type fudge: int
@ivar original_id: TSIG original id; defaults to the message's id
@type original_id: int
@ivar tsig_error: TSIG error code; default is 0.
@type tsig_error: int
@ivar other_data: TSIG other data.
@type other_data: string
@ivar mac: The TSIG MAC for this message.
@type mac: string
@ivar xfr: Is the message being used to contain the results of a DNS
zone transfer? The default is False.
@type xfr: bool
@ivar origin: The origin of the zone in messages which are used for
zone transfers or for DNS dynamic updates. The default is None.
@type origin: dns.name.Name object
@ivar tsig_ctx: The TSIG signature context associated with this
message. The default is None.
@type tsig_ctx: hmac.HMAC object
@ivar had_tsig: Did the message decoded from wire format have a TSIG
signature?
@type had_tsig: bool
@ivar multi: Is this message part of a multi-message sequence? The
default is false. This variable is used when validating TSIG signatures
on messages which are part of a zone transfer.
@type multi: bool
@ivar first: Is this message standalone, or the first of a multi
message sequence? This variable is used when validating TSIG signatures
on messages which are part of a zone transfer.
@type first: bool
@ivar index: An index of rrsets in the message. The index key is
(section, name, rdclass, rdtype, covers, deleting). Indexing can be
disabled by setting the index to None.
@type index: dict
"""
def __init__(self, id=None):
if id is None:
self.id = dns.entropy.random_16()
else:
self.id = id
self.flags = 0
self.question = []
self.answer = []
self.authority = []
self.additional = []
self.edns = -1
self.ednsflags = 0
self.payload = 0
self.options = []
self.request_payload = 0
self.keyring = None
self.keyname = None
self.keyalgorithm = dns.tsig.default_algorithm
self.request_mac = ''
self.other_data = ''
self.tsig_error = 0
self.fudge = 300
self.original_id = self.id
self.mac = ''
self.xfr = False
self.origin = None
self.tsig_ctx = None
self.had_tsig = False
self.multi = False
self.first = True
self.index = {}
def __repr__(self):
return '<DNS message, ID ' + `self.id` + '>'
def __str__(self):
return self.to_text()
def to_text(self, origin=None, relativize=True, **kw):
"""Convert the message to text.
The I{origin}, I{relativize}, and any other keyword
arguments are passed to the rrset to_wire() method.
@rtype: string
"""
s = cStringIO.StringIO()
print >> s, 'id %d' % self.id
print >> s, 'opcode %s' % \
dns.opcode.to_text(dns.opcode.from_flags(self.flags))
rc = dns.rcode.from_flags(self.flags, self.ednsflags)
print >> s, 'rcode %s' % dns.rcode.to_text(rc)
print >> s, 'flags %s' % dns.flags.to_text(self.flags)
if self.edns >= 0:
print >> s, 'edns %s' % self.edns
if self.ednsflags != 0:
print >> s, 'eflags %s' % \
dns.flags.edns_to_text(self.ednsflags)
print >> s, 'payload', self.payload
is_update = dns.opcode.is_update(self.flags)
if is_update:
print >> s, ';ZONE'
else:
print >> s, ';QUESTION'
for rrset in self.question:
print >> s, rrset.to_text(origin, relativize, **kw)
if is_update:
print >> s, ';PREREQ'
else:
print >> s, ';ANSWER'
for rrset in self.answer:
print >> s, rrset.to_text(origin, relativize, **kw)
if is_update:
print >> s, ';UPDATE'
else:
print >> s, ';AUTHORITY'
for rrset in self.authority:
print >> s, rrset.to_text(origin, relativize, **kw)
print >> s, ';ADDITIONAL'
for rrset in self.additional:
print >> s, rrset.to_text(origin, relativize, **kw)
#
# We strip off the final \n so the caller can print the result without
# doing weird things to get around eccentricities in Python print
# formatting
#
return s.getvalue()[:-1]
def __eq__(self, other):
"""Two messages are equal if they have the same content in the
header, question, answer, and authority sections.
@rtype: bool"""
if not isinstance(other, Message):
return False
if self.id != other.id:
return False
if self.flags != other.flags:
return False
for n in self.question:
if n not in other.question:
return False
for n in other.question:
if n not in self.question:
return False
for n in self.answer:
if n not in other.answer:
return False
for n in other.answer:
if n not in self.answer:
return False
for n in self.authority:
if n not in other.authority:
return False
for n in other.authority:
if n not in self.authority:
return False
return True
def __ne__(self, other):
"""Are two messages not equal?
@rtype: bool"""
return not self.__eq__(other)
def is_response(self, other):
"""Is other a response to self?
@rtype: bool"""
if other.flags & dns.flags.QR == 0 or \
self.id != other.id or \
dns.opcode.from_flags(self.flags) != \
dns.opcode.from_flags(other.flags):
return False
if dns.rcode.from_flags(other.flags, other.ednsflags) != \
dns.rcode.NOERROR:
return True
if dns.opcode.is_update(self.flags):
return True
for n in self.question:
if n not in other.question:
return False
for n in other.question:
if n not in self.question:
return False
return True
def section_number(self, section):
if section is self.question:
return 0
elif section is self.answer:
return 1
elif section is self.authority:
return 2
elif section is self.additional:
return 3
else:
raise ValueError('unknown section')
def find_rrset(self, section, name, rdclass, rdtype,
covers=dns.rdatatype.NONE, deleting=None, create=False,
force_unique=False):
"""Find the RRset with the given attributes in the specified section.
@param section: the section of the message to look in, e.g.
self.answer.
@type section: list of dns.rrset.RRset objects
@param name: the name of the RRset
@type name: dns.name.Name object
@param rdclass: the class of the RRset
@type rdclass: int
@param rdtype: the type of the RRset
@type rdtype: int
@param covers: the covers value of the RRset
@type covers: int
@param deleting: the deleting value of the RRset
@type deleting: int
@param create: If True, create the RRset if it is not found.
The created RRset is appended to I{section}.
@type create: bool
@param force_unique: If True and create is also True, create a
new RRset regardless of whether a matching RRset exists already.
@type force_unique: bool
@raises KeyError: the RRset was not found and create was False
@rtype: dns.rrset.RRset object"""
key = (self.section_number(section),
name, rdclass, rdtype, covers, deleting)
if not force_unique:
if not self.index is None:
rrset = self.index.get(key)
if not rrset is None:
return rrset
else:
for rrset in section:
if rrset.match(name, rdclass, rdtype, covers, deleting):
return rrset
if not create:
raise KeyError
rrset = dns.rrset.RRset(name, rdclass, rdtype, covers, deleting)
section.append(rrset)
if not self.index is None:
self.index[key] = rrset
return rrset
def get_rrset(self, section, name, rdclass, rdtype,
covers=dns.rdatatype.NONE, deleting=None, create=False,
force_unique=False):
"""Get the RRset with the given attributes in the specified section.
If the RRset is not found, None is returned.
@param section: the section of the message to look in, e.g.
self.answer.
@type section: list of dns.rrset.RRset objects
@param name: the name of the RRset
@type name: dns.name.Name object
@param rdclass: the class of the RRset
@type rdclass: int
@param rdtype: the type of the RRset
@type rdtype: int
@param covers: the covers value of the RRset
@type covers: int
@param deleting: the deleting value of the RRset
@type deleting: int
@param create: If True, create the RRset if it is not found.
The created RRset is appended to I{section}.
@type create: bool
@param force_unique: If True and create is also True, create a
new RRset regardless of whether a matching RRset exists already.
@type force_unique: bool
@rtype: dns.rrset.RRset object or None"""
try:
rrset = self.find_rrset(section, name, rdclass, rdtype, covers,
deleting, create, force_unique)
except KeyError:
rrset = None
return rrset
def to_wire(self, origin=None, max_size=0, **kw):
"""Return a string containing the message in DNS compressed wire
format.
Additional keyword arguments are passed to the rrset to_wire()
method.
@param origin: The origin to be appended to any relative names.
@type origin: dns.name.Name object
@param max_size: The maximum size of the wire format output; default
is 0, which means 'the message's request payload, if nonzero, or
65536'.
@type max_size: int
@raises dns.exception.TooBig: max_size was exceeded
@rtype: string
"""
if max_size == 0:
if self.request_payload != 0:
max_size = self.request_payload
else:
max_size = 65535
if max_size < 512:
max_size = 512
elif max_size > 65535:
max_size = 65535
r = dns.renderer.Renderer(self.id, self.flags, max_size, origin)
for rrset in self.question:
r.add_question(rrset.name, rrset.rdtype, rrset.rdclass)
for rrset in self.answer:
r.add_rrset(dns.renderer.ANSWER, rrset, **kw)
for rrset in self.authority:
r.add_rrset(dns.renderer.AUTHORITY, rrset, **kw)
if self.edns >= 0:
r.add_edns(self.edns, self.ednsflags, self.payload, self.options)
for rrset in self.additional:
r.add_rrset(dns.renderer.ADDITIONAL, rrset, **kw)
r.write_header()
if not self.keyname is None:
r.add_tsig(self.keyname, self.keyring[self.keyname],
self.fudge, self.original_id, self.tsig_error,
self.other_data, self.request_mac,
self.keyalgorithm)
self.mac = r.mac
return r.get_wire()
def use_tsig(self, keyring, keyname=None, fudge=300,
original_id=None, tsig_error=0, other_data='',
algorithm=dns.tsig.default_algorithm):
"""When sending, a TSIG signature using the specified keyring
and keyname should be added.
@param keyring: The TSIG keyring to use; defaults to None.
@type keyring: dict
@param keyname: The name of the TSIG key to use; defaults to None.
The key must be defined in the keyring. If a keyring is specified
but a keyname is not, then the key used will be the first key in the
keyring. Note that the order of keys in a dictionary is not defined,
so applications should supply a keyname when a keyring is used, unless
they know the keyring contains only one key.
@type keyname: dns.name.Name or string
@param fudge: TSIG time fudge; default is 300 seconds.
@type fudge: int
@param original_id: TSIG original id; defaults to the message's id
@type original_id: int
@param tsig_error: TSIG error code; default is 0.
@type tsig_error: int
@param other_data: TSIG other data.
@type other_data: string
@param algorithm: The TSIG algorithm to use; defaults to
dns.tsig.default_algorithm
"""
self.keyring = keyring
if keyname is None:
self.keyname = self.keyring.keys()[0]
else:
if isinstance(keyname, (str, unicode)):
keyname = dns.name.from_text(keyname)
self.keyname = keyname
self.keyalgorithm = algorithm
self.fudge = fudge
if original_id is None:
self.original_id = self.id
else:
self.original_id = original_id
self.tsig_error = tsig_error
self.other_data = other_data
def use_edns(self, edns=0, ednsflags=0, payload=1280, request_payload=None, options=None):
"""Configure EDNS behavior.
@param edns: The EDNS level to use. Specifying None, False, or -1
means 'do not use EDNS', and in this case the other parameters are
ignored. Specifying True is equivalent to specifying 0, i.e. 'use
EDNS0'.
@type edns: int or bool or None
@param ednsflags: EDNS flag values.
@type ednsflags: int
@param payload: The EDNS sender's payload field, which is the maximum
size of UDP datagram the sender can handle.
@type payload: int
@param request_payload: The EDNS payload size to use when sending
this message. If not specified, defaults to the value of payload.
@type request_payload: int or None
@param options: The EDNS options
@type options: None or list of dns.edns.Option objects
@see: RFC 2671
"""
if edns is None or edns is False:
edns = -1
if edns is True:
edns = 0
if request_payload is None:
request_payload = payload
if edns < 0:
ednsflags = 0
payload = 0
request_payload = 0
options = []
else:
# make sure the EDNS version in ednsflags agrees with edns
ednsflags &= 0xFF00FFFFL
ednsflags |= (edns << 16)
if options is None:
options = []
self.edns = edns
self.ednsflags = ednsflags
self.payload = payload
self.options = options
self.request_payload = request_payload
def want_dnssec(self, wanted=True):
"""Enable or disable 'DNSSEC desired' flag in requests.
@param wanted: Is DNSSEC desired? If True, EDNS is enabled if
required, and then the DO bit is set. If False, the DO bit is
cleared if EDNS is enabled.
@type wanted: bool
"""
if wanted:
if self.edns < 0:
self.use_edns()
self.ednsflags |= dns.flags.DO
elif self.edns >= 0:
self.ednsflags &= ~dns.flags.DO
def rcode(self):
"""Return the rcode.
@rtype: int
"""
return dns.rcode.from_flags(self.flags, self.ednsflags)
def set_rcode(self, rcode):
"""Set the rcode.
@param rcode: the rcode
@type rcode: int
"""
(value, evalue) = dns.rcode.to_flags(rcode)
self.flags &= 0xFFF0
self.flags |= value
self.ednsflags &= 0x00FFFFFFL
self.ednsflags |= evalue
if self.ednsflags != 0 and self.edns < 0:
self.edns = 0
def opcode(self):
"""Return the opcode.
@rtype: int
"""
return dns.opcode.from_flags(self.flags)
def set_opcode(self, opcode):
"""Set the opcode.
@param opcode: the opcode
@type opcode: int
"""
self.flags &= 0x87FF
self.flags |= dns.opcode.to_flags(opcode)
class _WireReader(object):
"""Wire format reader.
@ivar wire: the wire-format message.
@type wire: string
@ivar message: The message object being built
@type message: dns.message.Message object
@ivar current: When building a message object from wire format, this
variable contains the offset from the beginning of wire of the next octet
to be read.
@type current: int
@ivar updating: Is the message a dynamic update?
@type updating: bool
@ivar one_rr_per_rrset: Put each RR into its own RRset?
@type one_rr_per_rrset: bool
@ivar zone_rdclass: The class of the zone in messages which are
DNS dynamic updates.
@type zone_rdclass: int
"""
def __init__(self, wire, message, question_only=False,
one_rr_per_rrset=False):
self.wire = dns.wiredata.maybe_wrap(wire)
self.message = message
self.current = 0
self.updating = False
self.zone_rdclass = dns.rdataclass.IN
self.question_only = question_only
self.one_rr_per_rrset = one_rr_per_rrset
def _get_question(self, qcount):
"""Read the next I{qcount} records from the wire data and add them to
the question section.
@param qcount: the number of questions in the message
@type qcount: int"""
if self.updating and qcount > 1:
raise dns.exception.FormError
for i in xrange(0, qcount):
(qname, used) = dns.name.from_wire(self.wire, self.current)
if not self.message.origin is None:
qname = qname.relativize(self.message.origin)
self.current = self.current + used
(rdtype, rdclass) = \
struct.unpack('!HH',
self.wire[self.current:self.current + 4])
self.current = self.current + 4
self.message.find_rrset(self.message.question, qname,
rdclass, rdtype, create=True,
force_unique=True)
if self.updating:
self.zone_rdclass = rdclass
def _get_section(self, section, count):
"""Read the next I{count} records from the wire data and add them to
the specified section.
@param section: the section of the message to which to add records
@type section: list of dns.rrset.RRset objects
@param count: the number of records to read
@type count: int"""
if self.updating or self.one_rr_per_rrset:
force_unique = True
else:
force_unique = False
seen_opt = False
for i in xrange(0, count):
rr_start = self.current
(name, used) = dns.name.from_wire(self.wire, self.current)
absolute_name = name
if not self.message.origin is None:
name = name.relativize(self.message.origin)
self.current = self.current + used
(rdtype, rdclass, ttl, rdlen) = \
struct.unpack('!HHIH',
self.wire[self.current:self.current + 10])
self.current = self.current + 10
if rdtype == dns.rdatatype.OPT:
if not section is self.message.additional or seen_opt:
raise BadEDNS
self.message.payload = rdclass
self.message.ednsflags = ttl
self.message.edns = (ttl & 0xff0000) >> 16
self.message.options = []
current = self.current
optslen = rdlen
while optslen > 0:
(otype, olen) = \
struct.unpack('!HH',
self.wire[current:current + 4])
current = current + 4
opt = dns.edns.option_from_wire(otype, self.wire, current, olen)
self.message.options.append(opt)
current = current + olen
optslen = optslen - 4 - olen
seen_opt = True
elif rdtype == dns.rdatatype.TSIG:
if not (section is self.message.additional and
i == (count - 1)):
raise BadTSIG
if self.message.keyring is None:
raise UnknownTSIGKey('got signed message without keyring')
secret = self.message.keyring.get(absolute_name)
if secret is None:
raise UnknownTSIGKey("key '%s' unknown" % name)
self.message.tsig_ctx = \
dns.tsig.validate(self.wire,
absolute_name,
secret,
int(time.time()),
self.message.request_mac,
rr_start,
self.current,
rdlen,
self.message.tsig_ctx,
self.message.multi,
self.message.first)
self.message.had_tsig = True
else:
if ttl < 0:
ttl = 0
if self.updating and \
(rdclass == dns.rdataclass.ANY or
rdclass == dns.rdataclass.NONE):
deleting = rdclass
rdclass = self.zone_rdclass
else:
deleting = None
if deleting == dns.rdataclass.ANY or \
(deleting == dns.rdataclass.NONE and \
section is self.message.answer):
covers = dns.rdatatype.NONE
rd = None
else:
rd = dns.rdata.from_wire(rdclass, rdtype, self.wire,
self.current, rdlen,
self.message.origin)
covers = rd.covers()
if self.message.xfr and rdtype == dns.rdatatype.SOA:
force_unique = True
rrset = self.message.find_rrset(section, name,
rdclass, rdtype, covers,
deleting, True, force_unique)
if not rd is None:
rrset.add(rd, ttl)
self.current = self.current + rdlen
def read(self):
"""Read a wire format DNS message and build a dns.message.Message
object."""
l = len(self.wire)
if l < 12:
raise ShortHeader
(self.message.id, self.message.flags, qcount, ancount,
aucount, adcount) = struct.unpack('!HHHHHH', self.wire[:12])
self.current = 12
if dns.opcode.is_update(self.message.flags):
self.updating = True
self._get_question(qcount)
if self.question_only:
return
self._get_section(self.message.answer, ancount)
self._get_section(self.message.authority, aucount)
self._get_section(self.message.additional, adcount)
if self.current != l:
raise TrailingJunk
if self.message.multi and self.message.tsig_ctx and \
not self.message.had_tsig:
self.message.tsig_ctx.update(self.wire)
def from_wire(wire, keyring=None, request_mac='', xfr=False, origin=None,
tsig_ctx = None, multi = False, first = True,
question_only = False, one_rr_per_rrset = False):
"""Convert a DNS wire format message into a message
object.
@param keyring: The keyring to use if the message is signed.
@type keyring: dict
@param request_mac: If the message is a response to a TSIG-signed request,
I{request_mac} should be set to the MAC of that request.
@type request_mac: string
@param xfr: Is this message part of a zone transfer?
@type xfr: bool
@param origin: If the message is part of a zone transfer, I{origin}
should be the origin name of the zone.
@type origin: dns.name.Name object
@param tsig_ctx: The ongoing TSIG context, used when validating zone
transfers.
@type tsig_ctx: hmac.HMAC object
@param multi: Is this message part of a multiple message sequence?
@type multi: bool
@param first: Is this message standalone, or the first of a multi
message sequence?
@type first: bool
@param question_only: Read only up to the end of the question section?
@type question_only: bool
@param one_rr_per_rrset: Put each RR into its own RRset
@type one_rr_per_rrset: bool
@raises ShortHeader: The message is less than 12 octets long.
@raises TrailingJunk: There were octets in the message past the end
of the proper DNS message.
@raises BadEDNS: An OPT record was in the wrong section, or occurred more
than once.
@raises BadTSIG: A TSIG record was not the last record of the additional
data section.
@rtype: dns.message.Message object"""
m = Message(id=0)
m.keyring = keyring
m.request_mac = request_mac
m.xfr = xfr
m.origin = origin
m.tsig_ctx = tsig_ctx
m.multi = multi
m.first = first
reader = _WireReader(wire, m, question_only, one_rr_per_rrset)
reader.read()
return m
class _TextReader(object):
"""Text format reader.
@ivar tok: the tokenizer
@type tok: dns.tokenizer.Tokenizer object
@ivar message: The message object being built
@type message: dns.message.Message object
@ivar updating: Is the message a dynamic update?
@type updating: bool
@ivar zone_rdclass: The class of the zone in messages which are
DNS dynamic updates.
@type zone_rdclass: int
@ivar last_name: The most recently read name when building a message object
from text format.
@type last_name: dns.name.Name object
"""
def __init__(self, text, message):
self.message = message
self.tok = dns.tokenizer.Tokenizer(text)
self.last_name = None
self.zone_rdclass = dns.rdataclass.IN
self.updating = False
def _header_line(self, section):
"""Process one line from the text format header section."""
token = self.tok.get()
what = token.value
if what == 'id':
self.message.id = self.tok.get_int()
elif what == 'flags':
while True:
token = self.tok.get()
if not token.is_identifier():
self.tok.unget(token)
break
self.message.flags = self.message.flags | \
dns.flags.from_text(token.value)
if dns.opcode.is_update(self.message.flags):
self.updating = True
elif what == 'edns':
self.message.edns = self.tok.get_int()
self.message.ednsflags = self.message.ednsflags | \
(self.message.edns << 16)
elif what == 'eflags':
if self.message.edns < 0:
self.message.edns = 0
while True:
token = self.tok.get()
if not token.is_identifier():
self.tok.unget(token)
break
self.message.ednsflags = self.message.ednsflags | \
dns.flags.edns_from_text(token.value)
elif what == 'payload':
self.message.payload = self.tok.get_int()
if self.message.edns < 0:
self.message.edns = 0
elif what == 'opcode':
text = self.tok.get_string()
self.message.flags = self.message.flags | \
dns.opcode.to_flags(dns.opcode.from_text(text))
elif what == 'rcode':
text = self.tok.get_string()
self.message.set_rcode(dns.rcode.from_text(text))
else:
raise UnknownHeaderField
self.tok.get_eol()
def _question_line(self, section):
"""Process one line from the text format question section."""
token = self.tok.get(want_leading = True)
if not token.is_whitespace():
self.last_name = dns.name.from_text(token.value, None)
name = self.last_name
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
# Class
try:
rdclass = dns.rdataclass.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except:
rdclass = dns.rdataclass.IN
# Type
rdtype = dns.rdatatype.from_text(token.value)
self.message.find_rrset(self.message.question, name,
rdclass, rdtype, create=True,
force_unique=True)
if self.updating:
self.zone_rdclass = rdclass
self.tok.get_eol()
def _rr_line(self, section):
"""Process one line from the text format answer, authority, or
additional data sections.
"""
deleting = None
# Name
token = self.tok.get(want_leading = True)
if not token.is_whitespace():
self.last_name = dns.name.from_text(token.value, None)
name = self.last_name
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
# TTL
try:
ttl = int(token.value, 0)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except:
ttl = 0
# Class
try:
rdclass = dns.rdataclass.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
if rdclass == dns.rdataclass.ANY or rdclass == dns.rdataclass.NONE:
deleting = rdclass
rdclass = self.zone_rdclass
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except:
rdclass = dns.rdataclass.IN
# Type
rdtype = dns.rdatatype.from_text(token.value)
token = self.tok.get()
if not token.is_eol_or_eof():
self.tok.unget(token)
rd = dns.rdata.from_text(rdclass, rdtype, self.tok, None)
covers = rd.covers()
else:
rd = None
covers = dns.rdatatype.NONE
rrset = self.message.find_rrset(section, name,
rdclass, rdtype, covers,
deleting, True, self.updating)
if not rd is None:
rrset.add(rd, ttl)
def read(self):
"""Read a text format DNS message and build a dns.message.Message
object."""
line_method = self._header_line
section = None
while 1:
token = self.tok.get(True, True)
if token.is_eol_or_eof():
break
if token.is_comment():
u = token.value.upper()
if u == 'HEADER':
line_method = self._header_line
elif u == 'QUESTION' or u == 'ZONE':
line_method = self._question_line
section = self.message.question
elif u == 'ANSWER' or u == 'PREREQ':
line_method = self._rr_line
section = self.message.answer
elif u == 'AUTHORITY' or u == 'UPDATE':
line_method = self._rr_line
section = self.message.authority
elif u == 'ADDITIONAL':
line_method = self._rr_line
section = self.message.additional
self.tok.get_eol()
continue
self.tok.unget(token)
line_method(section)
def from_text(text):
"""Convert the text format message into a message object.
@param text: The text format message.
@type text: string
@raises UnknownHeaderField:
@raises dns.exception.SyntaxError:
@rtype: dns.message.Message object"""
# 'text' can also be a file, but we don't publish that fact
# since it's an implementation detail. The official file
# interface is from_file().
m = Message()
reader = _TextReader(text, m)
reader.read()
return m
def from_file(f):
"""Read the next text format message from the specified file.
@param f: file or string. If I{f} is a string, it is treated
as the name of a file to open.
@raises UnknownHeaderField:
@raises dns.exception.SyntaxError:
@rtype: dns.message.Message object"""
if sys.hexversion >= 0x02030000:
# allow Unicode filenames; turn on universal newline support
str_type = basestring
opts = 'rU'
else:
str_type = str
opts = 'r'
if isinstance(f, str_type):
f = file(f, opts)
want_close = True
else:
want_close = False
try:
m = from_text(f)
finally:
if want_close:
f.close()
return m
def make_query(qname, rdtype, rdclass = dns.rdataclass.IN, use_edns=None,
want_dnssec=False):
"""Make a query message.
The query name, type, and class may all be specified either
as objects of the appropriate type, or as strings.
The query will have a randomly choosen query id, and its DNS flags
will be set to dns.flags.RD.
@param qname: The query name.
@type qname: dns.name.Name object or string
@param rdtype: The desired rdata type.
@type rdtype: int
@param rdclass: The desired rdata class; the default is class IN.
@type rdclass: int
@param use_edns: The EDNS level to use; the default is None (no EDNS).
See the description of dns.message.Message.use_edns() for the possible
values for use_edns and their meanings.
@type use_edns: int or bool or None
@param want_dnssec: Should the query indicate that DNSSEC is desired?
@type want_dnssec: bool
@rtype: dns.message.Message object"""
if isinstance(qname, (str, unicode)):
qname = dns.name.from_text(qname)
if isinstance(rdtype, (str, unicode)):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(rdclass, (str, unicode)):
rdclass = dns.rdataclass.from_text(rdclass)
m = Message()
m.flags |= dns.flags.RD
m.find_rrset(m.question, qname, rdclass, rdtype, create=True,
force_unique=True)
m.use_edns(use_edns)
m.want_dnssec(want_dnssec)
return m
def make_response(query, recursion_available=False, our_payload=8192):
"""Make a message which is a response for the specified query.
The message returned is really a response skeleton; it has all
of the infrastructure required of a response, but none of the
content.
The response's question section is a shallow copy of the query's
question section, so the query's question RRsets should not be
changed.
@param query: the query to respond to
@type query: dns.message.Message object
@param recursion_available: should RA be set in the response?
@type recursion_available: bool
@param our_payload: payload size to advertise in EDNS responses; default
is 8192.
@type our_payload: int
@rtype: dns.message.Message object"""
if query.flags & dns.flags.QR:
raise dns.exception.FormError('specified query message is not a query')
response = dns.message.Message(query.id)
response.flags = dns.flags.QR | (query.flags & dns.flags.RD)
if recursion_available:
response.flags |= dns.flags.RA
response.set_opcode(query.opcode())
response.question = list(query.question)
if query.edns >= 0:
response.use_edns(0, 0, our_payload, query.payload)
if not query.keyname is None:
response.keyname = query.keyname
response.keyring = query.keyring
response.request_mac = query.mac
return response
| gpl-3.0 |
thundertrick/imagePicker | imageProcesser.py | 1 | 16389 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016, Xuyang Hu <xuyanghu@yahoo.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 2.1
# as published by the Free Software Foundation
"""
imageProcesser is used to process images received from UI.
This file can be test standalone using cmd:
python imageProcesser.py
"""
import cv2
import os
import re
import sys
import matplotlib.pyplot as plt
import numpy as np
from PySide import QtGui, QtCore
import math
import time
# pylint: disable=C0103,R0904,W0102,W0201
testPath = './lena.jpeg'
def fileExp(matchedSuffixes=['bmp', 'jpg', 'jpeg', 'png']):
"""
Returns a compiled regexp matcher object for given list of suffixes.
"""
# Create a regular expression string to match all the suffixes
matchedString = r'|'.join([r'^.*\.' + s + '$' for s in matchedSuffixes])
return re.compile(matchedString, re.IGNORECASE)
class SingleImageProcess(QtCore.QObject):
"""
Process single image.
Note: Batch process will use the class,
so less `print` is recommoned.
"""
# Public
sel = None # can be set from outside
selSignal = QtCore.Signal(list)
def __init__(self, fileName=testPath, isGray=False, parent=None):
"""
Load the image in gray scale (isGray=False)
"""
super(SingleImageProcess, self).__init__(parent)
self.fileName = fileName
self.img = cv2.imread(fileName, isGray)
# private for safty
self.dragStart = None
self.roiNeedUpadte = False
self.isInWaitLoop = False
def simpleDemo(self):
"""
Print image shape and gray level info
And show the image with highgui.
Usage: press esc to quit image window.
"""
width, height = self.img.shape
meanVal, meanStdDevVal = cv2.meanStdDev(self.img)
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(self.img)
print "Size:"
print (width, height)
print "(min, max, mean, meanStdDev):"
print (minVal, maxVal, meanVal[0][0], meanStdDevVal[0][0])
cv2.imshow("SingleImageWindow", self.img)
cv2.setMouseCallback("SingleImageWindow", self.onMouse)
print "Press esc to exit" # any key except q in fact
self.isInWaitLoop = True
while True:
ch = cv2.waitKey()
if ch == 27: # ESC
break
elif self.roiNeedUpadte and ch == 97: # selection is made
print "Accept ROI (minX, minY, maxX, maxY): " + str(self.sel)
self.selSignal.emit(self.sel)
self.setROI()
self.roiNeedUpadte = False
break
elif ch == ord('b'):
self.getButterworthBlur(stopband2=35, showResult=True)
cv2.destroyAllWindows()
self.isInWaitLoop = False
def setROI(self, showPatch=False):
if not(self.sel):
return self.img
patch = self.img[self.sel[1]:self.sel[3],self.sel[0]:self.sel[2]]
if showPatch:
cv2.imshow("patch", patch)
self.enterWaitLoop()
self.roiNeedUpadte = False
return patch
def saveFile(self):
"""
Save the file with the time stamp.
"""
# TODO: make it work!!
print "This function has not been implemented yet. It is recommand to "+
" use matplotlib instead."
return False
# newName = time.strftime('%Y%m%d_%H%M%S') + self.fileName
# if cv2.imwrite(newName, self.img):
# print "Image is saved @ " + newName
# return True
# else:
# print "Error: Fasiled to save image"
# return False
# --------------------------------------------------- Get image info
def getCenterPoint(self):
"""
Blur image and return the center point of image.
"""
gaussianImg = cv2.GaussianBlur(self.img, (9,9), 3)
centerPoint = self.getAvgIn4x4rect(
self.img.shape[0]/2 - 2,
self.img.shape[1]/2 - 2)
return centerPoint
def getAvgIn4x4rect(self, LocX=2, LocY=2):
"""
Calculate average value of a 4x4 rect in the image.
Note: this function do not check if the rect is fully
inside the image!
@param (LocX, LocY) start point of rect
@reutrn retval average value in float
"""
imROI = self.img[LocX:LocX+4, LocY:LocY+4]
return cv2.mean(imROI)[0]
def getGaussaianBlur(self, size=(33,33)):
"""
Return the blurred image with size and sigmaX=9
"""
blurImg = cv2.GaussianBlur(self.img, size, 9)
# self.showImage(blurImg)
return blurImg
def getButterworthBlur(self, stopband2=5, showResult=False):
"""
Apply Butterworth filter to image.
@param stopband2 stopband^2
"""
dft4img = self.getDFT()
bwfilter = self.getButterworthFilter(stopband2=stopband2)
dstimg = dft4img * bwfilter
dstimg = cv2.idft(np.fft.ifftshift(dstimg))
dstimg = np.uint8(cv2.magnitude(dstimg[:,:,0], dstimg[:,:,1]))
if showResult:
# cv2.imshow("test", dstimg)
# self.enterWaitLoop()
plt.imshow(dstimg)
plt.show()
return dstimg
def getAverageValue(self):
return cv2.mean(self.img)[0]
def getDFT(self, img2dft=None, showdft=False):
"""
Return the spectrum in log scale.
"""
if img2dft == None:
img2dft = self.img
dft_A = cv2.dft(np.float32(self.img),flags = cv2.DFT_COMPLEX_OUTPUT|cv2.DFT_SCALE)
dft_A = np.fft.fftshift(dft_A)
if showdft:
self.showSpecturm(dft_A)
return dft_A
def getButterworthFilter(self, stopband2=5, order=3, showdft=False):
"""
Get Butterworth filter in frequency domain.
"""
h, w = self.img.shape[0], self.img.shape[1] # no optimization
P = h/2
Q = w/2
dst = np.zeros((h, w, 2), np.float64)
for i in range(h):
for j in range(w):
r2 = float((i-P)**2+(j-Q)**2)
if r2 == 0:
r2 = 1.0
dst[i,j] = 1/(1+(r2/stopband2)**order)
dst = np.float64(dst)
if showdft:
f = cv2.magnitude(dst[:,:,0], dst[:,:,1])
# cv2.imshow("butterworth", f)
# self.enterWaitLoop()
plt.imshow(f)
plt.show()
return dst
def getShannonEntropy(self, srcImage=None):
"""
calculate the shannon entropy for an image
"""
if not(srcImage):
srcImage = self.img
histogram = cv2.calcHist(srcImage, [0],None,[256],[0,256])
histLen = sum(histogram)
samplesPossiblity = [float(h) / histLen for h in histogram]
return -sum([p * math.log(p, 2) for p in samplesPossiblity if p != 0])
# ------------------------------------------------ Highgui functions
def showImage(self, img):
"""
Show input image with highgui.
"""
cv2.imshow("test", img)
self.enterWaitLoop()
def showSpecturm(self, dft_result):
"""
Show spectrun graph.
"""
cv2.normalize(dft_result, dft_result, 0.0, 1.0, cv2.cv.CV_MINMAX)
# Split fourier into real and imaginary parts
image_Re, image_Im = cv2.split(dft_result)
# Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
magnitude = cv2.sqrt(image_Re ** 2.0 + image_Im ** 2.0)
# Compute log(1 + Mag)
log_spectrum = cv2.log(1.0 + magnitude)
# normalize and display the results as rgb
cv2.normalize(log_spectrum, log_spectrum, 0.0, 1.0, cv2.cv.CV_MINMAX)
cv2.imshow("Spectrum", log_spectrum)
self.enterWaitLoop()
def onMouse(self, event, x, y, flags, param):
"""
Mouse callback funtion for setting ROI.
"""
if event == cv2.EVENT_LBUTTONDOWN:
self.dragStart = x, y
self.sel = 0,0,0,0
elif self.dragStart:
#print flags
if flags & cv2.EVENT_FLAG_LBUTTON:
minpos = min(self.dragStart[0], x), min(self.dragStart[1], y)
maxpos = max(self.dragStart[0], x), max(self.dragStart[1], y)
self.sel = minpos[0], minpos[1], maxpos[0], maxpos[1]
img = cv2.cvtColor(self.img, cv2.COLOR_GRAY2BGR)
cv2.rectangle(img, (self.sel[0], self.sel[1]), (self.sel[2], self.sel[3]), (0,255,255), 1)
cv2.imshow("SingleImageWindow", img)
else:
print "selection is complete. Press a to accept."
self.roiNeedUpadte = True
self.dragStart = None
def enterWaitLoop(self):
"""
Enter waitKey loop.
This function can make sure that there is only 1 wait loop running.
"""
if not(self.isInWaitLoop):
self.isInWaitLoop = True
print "DO NOT close the window directly. Press Esc to enter next step!!!"
while self.isInWaitLoop:
ch = cv2.waitKey()
if ch == 27:
break
if ch == ord('s'):
self.saveFile()
break
cv2.destroyAllWindows()
self.isInWaitLoop = False
class BatchProcessing():
"""
Process all the images in the given folder.
"""
resultArray = []
globalROI = None
def __init__(self, rootPath='./', roi=None):
print "Batch path: " + rootPath
if not os.path.isdir(rootPath):
rootPath = repr(rootPath)[2:-1]
if not os.path.isdir(rootPath):
return
self.rootPath = rootPath
self.listPaths = []
self.listFileNames = []
for fileName in os.listdir(rootPath):
if fileExp().match(fileName):
absPath = os.path.join(self.rootPath, fileName)
self.listPaths.append(absPath)
self.listFileNames.append(fileName)
print "Files count: " + str(len(self.listFileNames))
print self.listFileNames
self.processQueue = []
if roi:
self.globalROI = roi
self.loadImages()
def loadImages(self):
"""
Load all the images in the selected folder.
"""
for path in self.listPaths:
im = SingleImageProcess(fileName=path)
im.sel = self.globalROI
im.img = im.setROI()
# im.img = im.getGaussaianBlur()
im.img = im.getButterworthBlur()
self.processQueue.append(im)
def getCenterPoints(self, showResult=False):
"""
Calculate center points of all the iamges and save them into resultArray
"""
print "============== Getting Center Point =========="
centerPoints = []
for im in self.processQueue:
pcenter = im.getCenterPoint()
centerPoints.append(pcenter)
if showResult:
plt.plot(self.resultArray)
plt.title('Center Points')
plt.xlabel('Picture numbers')
plt.ylabel('Gray scale')
plt.show()
self.resultArray = centerPoints
return centerPoints
def getPointsInACol(self, LocX=0, pointCount=10, showResult=False):
"""
Return value of pointCount=10 points when x = LocX
resultArray includes pointCount=10 arrays, each array
has len(self.processQueue) numbers in float.
"""
print "========================= getPointsInACol =========================="
self.resultArray = [[]]*pointCount
height = self.processQueue[0].img.shape[1]
yInterval = height/pointCount
for i in range(pointCount):
tmpArr = []
for im in self.processQueue:
avg4x4Val = im.getAvgIn4x4rect(LocX, i*yInterval)
tmpArr.append(avg4x4Val)
self.resultArray[i] = tmpArr
if showResult:
plt.plot(range(0,height,yInterval), self.resultArray)
plt.title('Points in a col when x==' + str(LocX) )
plt.xlabel('Y position')
plt.ylabel('Gray scale')
plt.show()
return self.resultArray
def getPointsInARow(self, LocY=0, pointCount=10, showResult=False):
"""
Return value of pointCount=10 points when y = LocY
resultArray includes pointCount=10 arrays, each array
has len(self.processQueue) numbers in float.
"""
print "========================= getPointsInARow =========================="
self.resultArray = [[]]*pointCount
width = self.processQueue[0].img.shape[0]
xInterval = width/pointCount
for i in range(pointCount):
tmpArr = []
for im in self.processQueue:
avg4x4Val = im.getAvgIn4x4rect(i*xInterval, LocY)
tmpArr.append(avg4x4Val)
self.resultArray[i] = tmpArr
if showResult:
plt.plot(range(0,width,xInterval), self.resultArray)
plt.title('Points in a row when y==' + str(LocY) )
plt.xlabel('X position')
plt.ylabel('Gray scale')
plt.show()
return self.resultArray
def getAverageValues(self, showResult=False):
"""
Return average value of all images.
"""
averageArr = []
for im in self.processQueue:
averageArr.append(im.getAverageValue())
if showResult:
plt.plot(range(len(self.processQueue)), averageArr)
plt.title('Average value')
plt.xlabel('Picture numbers')
plt.ylabel('Gray scale')
plt.show()
return averageArr
def getCenterPointsWithoutShift(self, LocX=0, pointCount=10, showResult=False):
"""
Return gray scale of center points removing average value
as global shift.
"""
centerPoints = self.getCenterPoints()
avgPoints = self.getAverageValues()
dstPoints = np.subtract(centerPoints, avgPoints)
self.resultArray = dstPoints
if showResult:
plt.plot(dstPoints)
plt.title('Center value without shift')
plt.xlabel('Picture numbers')
plt.ylabel('Center Point\'s Gray scale')
plt.show()
return dstPoints
def getShannonEntropies(self, showResult=False):
"""
Return average value of all images.
"""
entropyArr = []
for im in self.processQueue:
entropyArr.append(im.getShannonEntropy())
if showResult:
plt.plot(range(len(self.processQueue)), entropyArr)
plt.title('Entropy value')
plt.xlabel('Picture numbers')
plt.ylabel('Entropy')
plt.show()
return entropyArr
def plotGraphs(dataArr):
dataCount = len(dataArr)
graphLayout = 2 * 100 + (dataCount / 2)*10 + 1
for i,data in enumerate(dataArr):
plt.subplot(graphLayout + i)
plt.plot(data)
plt.show()
if __name__ == "__main__":
"""
Following codes are for test.
"""
singleTest = SingleImageProcess()
singleTest.simpleDemo()
print "Entropy: " + str(singleTest.getShannonEntropy())
singleTest.getGaussaianBlur()
singleTest.getDFT(showdft=True)
singleTest.getButterworthFilter(showdft=True)
singleTest.getButterworthBlur(stopband2=100,showResult=True)
print "avg=" + str(singleTest.getAverageValue())
print singleTest.getAvgIn4x4rect()
print singleTest.getCenterPoint()
batchTest = BatchProcessing()
batchTest.getCenterPoints(showResult=True)
batchTest.getShannonEntropies(showResult=True)
batchTest.getPointsInACol(100, showResult=True)
avgArr = batchTest.getAverageValues(showResult=True)
batchTest.getCenterPointsWithoutShift(50, showResult=True)
entpArr = batchTest.getShannonEntropies(showResult=True)
plotGraphs([avgArr, entpArr]) | lgpl-2.1 |
aabbox/kbengine | kbe/res/scripts/common/Lib/lib2to3/pgen2/parse.py | 569 | 8053 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Parser engine for the grammar tables generated by pgen.
The grammar table must be loaded first.
See Parser/parser.c in the Python distribution for additional info on
how this parsing engine works.
"""
# Local imports
from . import token
class ParseError(Exception):
"""Exception to signal the parser is stuck."""
def __init__(self, msg, type, value, context):
Exception.__init__(self, "%s: type=%r, value=%r, context=%r" %
(msg, type, value, context))
self.msg = msg
self.type = type
self.value = value
self.context = context
class Parser(object):
"""Parser engine.
The proper usage sequence is:
p = Parser(grammar, [converter]) # create instance
p.setup([start]) # prepare for parsing
<for each input token>:
if p.addtoken(...): # parse a token; may raise ParseError
break
root = p.rootnode # root of abstract syntax tree
A Parser instance may be reused by calling setup() repeatedly.
A Parser instance contains state pertaining to the current token
sequence, and should not be used concurrently by different threads
to parse separate token sequences.
See driver.py for how to get input tokens by tokenizing a file or
string.
Parsing is complete when addtoken() returns True; the root of the
abstract syntax tree can then be retrieved from the rootnode
instance variable. When a syntax error occurs, addtoken() raises
the ParseError exception. There is no error recovery; the parser
cannot be used after a syntax error was reported (but it can be
reinitialized by calling setup()).
"""
def __init__(self, grammar, convert=None):
"""Constructor.
The grammar argument is a grammar.Grammar instance; see the
grammar module for more information.
The parser is not ready yet for parsing; you must call the
setup() method to get it started.
The optional convert argument is a function mapping concrete
syntax tree nodes to abstract syntax tree nodes. If not
given, no conversion is done and the syntax tree produced is
the concrete syntax tree. If given, it must be a function of
two arguments, the first being the grammar (a grammar.Grammar
instance), and the second being the concrete syntax tree node
to be converted. The syntax tree is converted from the bottom
up.
A concrete syntax tree node is a (type, value, context, nodes)
tuple, where type is the node type (a token or symbol number),
value is None for symbols and a string for tokens, context is
None or an opaque value used for error reporting (typically a
(lineno, offset) pair), and nodes is a list of children for
symbols, and None for tokens.
An abstract syntax tree node may be anything; this is entirely
up to the converter function.
"""
self.grammar = grammar
self.convert = convert or (lambda grammar, node: node)
def setup(self, start=None):
"""Prepare for parsing.
This *must* be called before starting to parse.
The optional argument is an alternative start symbol; it
defaults to the grammar's start symbol.
You can use a Parser instance to parse any number of programs;
each time you call setup() the parser is reset to an initial
state determined by the (implicit or explicit) start symbol.
"""
if start is None:
start = self.grammar.start
# Each stack entry is a tuple: (dfa, state, node).
# A node is a tuple: (type, value, context, children),
# where children is a list of nodes or None, and context may be None.
newnode = (start, None, None, [])
stackentry = (self.grammar.dfas[start], 0, newnode)
self.stack = [stackentry]
self.rootnode = None
self.used_names = set() # Aliased to self.rootnode.used_names in pop()
def addtoken(self, type, value, context):
"""Add a token; return True iff this is the end of the program."""
# Map from token to label
ilabel = self.classify(type, value, context)
# Loop until the token is shifted; may raise exceptions
while True:
dfa, state, node = self.stack[-1]
states, first = dfa
arcs = states[state]
# Look for a state with this label
for i, newstate in arcs:
t, v = self.grammar.labels[i]
if ilabel == i:
# Look it up in the list of labels
assert t < 256
# Shift a token; we're done with it
self.shift(type, value, newstate, context)
# Pop while we are in an accept-only state
state = newstate
while states[state] == [(0, state)]:
self.pop()
if not self.stack:
# Done parsing!
return True
dfa, state, node = self.stack[-1]
states, first = dfa
# Done with this token
return False
elif t >= 256:
# See if it's a symbol and if we're in its first set
itsdfa = self.grammar.dfas[t]
itsstates, itsfirst = itsdfa
if ilabel in itsfirst:
# Push a symbol
self.push(t, self.grammar.dfas[t], newstate, context)
break # To continue the outer while loop
else:
if (0, state) in arcs:
# An accepting state, pop it and try something else
self.pop()
if not self.stack:
# Done parsing, but another token is input
raise ParseError("too much input",
type, value, context)
else:
# No success finding a transition
raise ParseError("bad input", type, value, context)
def classify(self, type, value, context):
"""Turn a token into a label. (Internal)"""
if type == token.NAME:
# Keep a listing of all used names
self.used_names.add(value)
# Check for reserved words
ilabel = self.grammar.keywords.get(value)
if ilabel is not None:
return ilabel
ilabel = self.grammar.tokens.get(type)
if ilabel is None:
raise ParseError("bad token", type, value, context)
return ilabel
def shift(self, type, value, newstate, context):
"""Shift a token. (Internal)"""
dfa, state, node = self.stack[-1]
newnode = (type, value, context, None)
newnode = self.convert(self.grammar, newnode)
if newnode is not None:
node[-1].append(newnode)
self.stack[-1] = (dfa, newstate, node)
def push(self, type, newdfa, newstate, context):
"""Push a nonterminal. (Internal)"""
dfa, state, node = self.stack[-1]
newnode = (type, None, context, [])
self.stack[-1] = (dfa, newstate, node)
self.stack.append((newdfa, 0, newnode))
def pop(self):
"""Pop a nonterminal. (Internal)"""
popdfa, popstate, popnode = self.stack.pop()
newnode = self.convert(self.grammar, popnode)
if newnode is not None:
if self.stack:
dfa, state, node = self.stack[-1]
node[-1].append(newnode)
else:
self.rootnode = newnode
self.rootnode.used_names = self.used_names
| lgpl-3.0 |
DirtyUnicorns/android_external_chromium_org | tools/telemetry/telemetry/core/bitmap_unittest.py | 33 | 8154 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import tempfile
import unittest
from telemetry import benchmark
from telemetry.core import bitmap
from telemetry.core import util
# This is a simple base64 encoded 2x2 PNG which contains, in order, a single
# Red, Yellow, Blue, and Green pixel.
test_png = """
iVBORw0KGgoAAAANSUhEUgAAAAIAAAACCAIAAAD91
JpzAAAAAXNSR0IArs4c6QAAAAlwSFlzAAALEwAACx
MBAJqcGAAAABZJREFUCNdj/M/AwPCfgYGB4T/DfwY
AHAAD/iOWZXsAAAAASUVORK5CYII=
"""
test_png_path = os.path.join(util.GetUnittestDataDir(), 'test_png.png')
test_png_2_path = os.path.join(util.GetUnittestDataDir(), 'test_png_2.png')
class HistogramDistanceTest(unittest.TestCase):
def testNoData(self):
hist1 = []
hist2 = []
self.assertRaises(
ValueError, lambda: bitmap.HistogramDistance(hist1, hist2))
hist1 = [0, 0, 0]
hist2 = [0, 0, 0]
self.assertRaises(
ValueError, lambda: bitmap.HistogramDistance(hist1, hist2))
def testWrongSizes(self):
hist1 = [1]
hist2 = [1, 0]
self.assertRaises(
ValueError, lambda: bitmap.HistogramDistance(hist1, hist2))
def testNoDistance(self):
hist1 = [2, 4, 1, 8, 0, -1]
hist2 = [2, 4, 1, 8, 0, -1]
self.assertEqual(bitmap.HistogramDistance(hist1, hist2), 0)
def testNormalizeCounts(self):
hist1 = [0, 0, 1, 0, 0]
hist2 = [0, 0, 0, 0, 7]
self.assertEqual(bitmap.HistogramDistance(hist1, hist2), 2)
self.assertEqual(bitmap.HistogramDistance(hist2, hist1), 2)
def testDistance(self):
hist1 = [2, 0, 1, 3, 4]
hist2 = [3, 1, 2, 4, 0]
self.assertEqual(bitmap.HistogramDistance(hist1, hist2), 1)
self.assertEqual(bitmap.HistogramDistance(hist2, hist1), 1)
hist1 = [0, 1, 3, 1]
hist2 = [2, 2, 1, 0]
self.assertEqual(bitmap.HistogramDistance(hist1, hist2), 1.2)
self.assertEqual(bitmap.HistogramDistance(hist2, hist1), 1.2)
class BitmapTest(unittest.TestCase):
# pylint: disable=C0324
def testReadFromBase64Png(self):
bmp = bitmap.Bitmap.FromBase64Png(test_png)
self.assertEquals(2, bmp.width)
self.assertEquals(2, bmp.height)
bmp.GetPixelColor(0, 0).AssertIsRGB(255, 0, 0)
bmp.GetPixelColor(1, 1).AssertIsRGB(0, 255, 0)
bmp.GetPixelColor(0, 1).AssertIsRGB(0, 0, 255)
bmp.GetPixelColor(1, 0).AssertIsRGB(255, 255, 0)
def testReadFromPngFile(self):
file_bmp = bitmap.Bitmap.FromPngFile(test_png_path)
self.assertEquals(2, file_bmp.width)
self.assertEquals(2, file_bmp.height)
file_bmp.GetPixelColor(0, 0).AssertIsRGB(255, 0, 0)
file_bmp.GetPixelColor(1, 1).AssertIsRGB(0, 255, 0)
file_bmp.GetPixelColor(0, 1).AssertIsRGB(0, 0, 255)
file_bmp.GetPixelColor(1, 0).AssertIsRGB(255, 255, 0)
def testWritePngToPngFile(self):
orig = bitmap.Bitmap.FromPngFile(test_png_path)
temp_file = tempfile.NamedTemporaryFile().name
orig.WritePngFile(temp_file)
new_file = bitmap.Bitmap.FromPngFile(temp_file)
self.assertTrue(orig.IsEqual(new_file))
@benchmark.Disabled
def testWriteCroppedBmpToPngFile(self):
pixels = [255,0,0, 255,255,0, 0,0,0,
255,255,0, 0,255,0, 0,0,0]
orig = bitmap.Bitmap(3, 3, 2, pixels)
orig.Crop(0, 0, 2, 2)
temp_file = tempfile.NamedTemporaryFile().name
orig.WritePngFile(temp_file)
new_file = bitmap.Bitmap.FromPngFile(temp_file)
self.assertTrue(orig.IsEqual(new_file))
def testIsEqual(self):
bmp = bitmap.Bitmap.FromBase64Png(test_png)
file_bmp = bitmap.Bitmap.FromPngFile(test_png_path)
self.assertTrue(bmp.IsEqual(file_bmp))
def testDiff(self):
file_bmp = bitmap.Bitmap.FromPngFile(test_png_path)
file_bmp_2 = bitmap.Bitmap.FromPngFile(test_png_2_path)
diff_bmp = file_bmp.Diff(file_bmp)
self.assertEquals(2, diff_bmp.width)
self.assertEquals(2, diff_bmp.height)
diff_bmp.GetPixelColor(0, 0).AssertIsRGB(0, 0, 0)
diff_bmp.GetPixelColor(1, 1).AssertIsRGB(0, 0, 0)
diff_bmp.GetPixelColor(0, 1).AssertIsRGB(0, 0, 0)
diff_bmp.GetPixelColor(1, 0).AssertIsRGB(0, 0, 0)
diff_bmp = file_bmp.Diff(file_bmp_2)
self.assertEquals(3, diff_bmp.width)
self.assertEquals(3, diff_bmp.height)
diff_bmp.GetPixelColor(0, 0).AssertIsRGB(0, 255, 255)
diff_bmp.GetPixelColor(1, 1).AssertIsRGB(255, 0, 255)
diff_bmp.GetPixelColor(0, 1).AssertIsRGB(255, 255, 0)
diff_bmp.GetPixelColor(1, 0).AssertIsRGB(0, 0, 255)
diff_bmp.GetPixelColor(0, 2).AssertIsRGB(255, 255, 255)
diff_bmp.GetPixelColor(1, 2).AssertIsRGB(255, 255, 255)
diff_bmp.GetPixelColor(2, 0).AssertIsRGB(255, 255, 255)
diff_bmp.GetPixelColor(2, 1).AssertIsRGB(255, 255, 255)
diff_bmp.GetPixelColor(2, 2).AssertIsRGB(255, 255, 255)
@benchmark.Disabled
def testGetBoundingBox(self):
pixels = [0,0,0, 0,0,0, 0,0,0, 0,0,0,
0,0,0, 1,0,0, 1,0,0, 0,0,0,
0,0,0, 0,0,0, 0,0,0, 0,0,0]
bmp = bitmap.Bitmap(3, 4, 3, pixels)
box, count = bmp.GetBoundingBox(bitmap.RgbaColor(1, 0, 0))
self.assertEquals(box, (1, 1, 2, 1))
self.assertEquals(count, 2)
box, count = bmp.GetBoundingBox(bitmap.RgbaColor(0, 1, 0))
self.assertEquals(box, None)
self.assertEquals(count, 0)
@benchmark.Disabled
def testCrop(self):
pixels = [0,0,0, 1,0,0, 2,0,0, 3,0,0,
0,1,0, 1,1,0, 2,1,0, 3,1,0,
0,2,0, 1,2,0, 2,2,0, 3,2,0]
bmp = bitmap.Bitmap(3, 4, 3, pixels)
bmp.Crop(1, 2, 2, 1)
self.assertEquals(bmp.width, 2)
self.assertEquals(bmp.height, 1)
bmp.GetPixelColor(0, 0).AssertIsRGB(1, 2, 0)
bmp.GetPixelColor(1, 0).AssertIsRGB(2, 2, 0)
self.assertEquals(bmp.pixels, bytearray([1,2,0, 2,2,0]))
@benchmark.Disabled
def testHistogram(self):
pixels = [1,2,3, 1,2,3, 1,2,3, 1,2,3,
1,2,3, 8,7,6, 5,4,6, 1,2,3,
1,2,3, 8,7,6, 5,4,6, 1,2,3]
bmp = bitmap.Bitmap(3, 4, 3, pixels)
bmp.Crop(1, 1, 2, 2)
histogram = bmp.ColorHistogram()
for i in xrange(3):
self.assertEquals(sum(histogram[i]), bmp.width * bmp.height)
self.assertEquals(histogram.r[1], 0)
self.assertEquals(histogram.r[5], 2)
self.assertEquals(histogram.r[8], 2)
self.assertEquals(histogram.g[2], 0)
self.assertEquals(histogram.g[4], 2)
self.assertEquals(histogram.g[7], 2)
self.assertEquals(histogram.b[3], 0)
self.assertEquals(histogram.b[6], 4)
@benchmark.Disabled
def testHistogramIgnoreColor(self):
pixels = [1,2,3, 1,2,3, 1,2,3, 1,2,3,
1,2,3, 8,7,6, 5,4,6, 1,2,3,
1,2,3, 8,7,6, 5,4,6, 1,2,3]
bmp = bitmap.Bitmap(3, 4, 3, pixels)
histogram = bmp.ColorHistogram(ignore_color=bitmap.RgbaColor(1, 2, 3))
self.assertEquals(histogram.r[1], 0)
self.assertEquals(histogram.r[5], 2)
self.assertEquals(histogram.r[8], 2)
self.assertEquals(histogram.g[2], 0)
self.assertEquals(histogram.g[4], 2)
self.assertEquals(histogram.g[7], 2)
self.assertEquals(histogram.b[3], 0)
self.assertEquals(histogram.b[6], 4)
@benchmark.Disabled
def testHistogramIgnoreColorTolerance(self):
pixels = [1,2,3, 4,5,6,
7,8,9, 8,7,6]
bmp = bitmap.Bitmap(3, 2, 2, pixels)
histogram = bmp.ColorHistogram(ignore_color=bitmap.RgbaColor(0, 1, 2),
tolerance=1)
self.assertEquals(histogram.r[1], 0)
self.assertEquals(histogram.r[4], 1)
self.assertEquals(histogram.r[7], 1)
self.assertEquals(histogram.r[8], 1)
self.assertEquals(histogram.g[2], 0)
self.assertEquals(histogram.g[5], 1)
self.assertEquals(histogram.g[7], 1)
self.assertEquals(histogram.g[8], 1)
self.assertEquals(histogram.b[3], 0)
self.assertEquals(histogram.b[6], 2)
self.assertEquals(histogram.b[9], 1)
@benchmark.Disabled
def testHistogramDistanceIgnoreColor(self):
pixels = [1,2,3, 1,2,3,
1,2,3, 1,2,3]
bmp = bitmap.Bitmap(3, 2, 2, pixels)
hist1 = bmp.ColorHistogram(ignore_color=bitmap.RgbaColor(1, 2, 3))
hist2 = bmp.ColorHistogram()
self.assertEquals(hist1.Distance(hist2), 0)
| bsd-3-clause |
public-ink/public-ink | server/appengine/lib/matplotlib/gridspec.py | 10 | 16112 | """
:mod:`~matplotlib.gridspec` is a module which specifies the location
of the subplot in the figure.
``GridSpec``
specifies the geometry of the grid that a subplot will be
placed. The number of rows and number of columns of the grid
need to be set. Optionally, the subplot layout parameters
(e.g., left, right, etc.) can be tuned.
``SubplotSpec``
specifies the location of the subplot in the given *GridSpec*.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.transforms as mtransforms
import numpy as np
import warnings
class GridSpecBase(object):
"""
A base class of GridSpec that specifies the geometry of the grid
that a subplot will be placed.
"""
def __init__(self, nrows, ncols,
height_ratios=None, width_ratios=None):
"""
The number of rows and number of columns of the grid need to
be set. Optionally, the ratio of heights and widths of rows and
columns can be specified.
"""
#self.figure = figure
self._nrows , self._ncols = nrows, ncols
self.set_height_ratios(height_ratios)
self.set_width_ratios(width_ratios)
def get_geometry(self):
'get the geometry of the grid, e.g., 2,3'
return self._nrows, self._ncols
def get_subplot_params(self, fig=None):
pass
def new_subplotspec(self, loc, rowspan=1, colspan=1):
"""
create and return a SuplotSpec instance.
"""
loc1, loc2 = loc
subplotspec = self[loc1:loc1+rowspan, loc2:loc2+colspan]
return subplotspec
def set_width_ratios(self, width_ratios):
if width_ratios is not None and len(width_ratios) != self._ncols:
raise ValueError('Expected the given number of width ratios to '
'match the number of columns of the grid')
self._col_width_ratios = width_ratios
def get_width_ratios(self):
return self._col_width_ratios
def set_height_ratios(self, height_ratios):
if height_ratios is not None and len(height_ratios) != self._nrows:
raise ValueError('Expected the given number of height ratios to '
'match the number of rows of the grid')
self._row_height_ratios = height_ratios
def get_height_ratios(self):
return self._row_height_ratios
def get_grid_positions(self, fig):
"""
return lists of bottom and top position of rows, left and
right positions of columns.
"""
nrows, ncols = self.get_geometry()
subplot_params = self.get_subplot_params(fig)
left = subplot_params.left
right = subplot_params.right
bottom = subplot_params.bottom
top = subplot_params.top
wspace = subplot_params.wspace
hspace = subplot_params.hspace
totWidth = right-left
totHeight = top-bottom
# calculate accumulated heights of columns
cellH = totHeight/(nrows + hspace*(nrows-1))
sepH = hspace*cellH
if self._row_height_ratios is not None:
netHeight = cellH * nrows
tr = float(sum(self._row_height_ratios))
cellHeights = [netHeight*r/tr for r in self._row_height_ratios]
else:
cellHeights = [cellH] * nrows
sepHeights = [0] + ([sepH] * (nrows-1))
cellHs = np.add.accumulate(np.ravel(list(zip(sepHeights, cellHeights))))
# calculate accumulated widths of rows
cellW = totWidth/(ncols + wspace*(ncols-1))
sepW = wspace*cellW
if self._col_width_ratios is not None:
netWidth = cellW * ncols
tr = float(sum(self._col_width_ratios))
cellWidths = [netWidth*r/tr for r in self._col_width_ratios]
else:
cellWidths = [cellW] * ncols
sepWidths = [0] + ([sepW] * (ncols-1))
cellWs = np.add.accumulate(np.ravel(list(zip(sepWidths, cellWidths))))
figTops = [top - cellHs[2*rowNum] for rowNum in range(nrows)]
figBottoms = [top - cellHs[2*rowNum+1] for rowNum in range(nrows)]
figLefts = [left + cellWs[2*colNum] for colNum in range(ncols)]
figRights = [left + cellWs[2*colNum+1] for colNum in range(ncols)]
return figBottoms, figTops, figLefts, figRights
def __getitem__(self, key):
"""
create and return a SuplotSpec instance.
"""
nrows, ncols = self.get_geometry()
total = nrows*ncols
if isinstance(key, tuple):
try:
k1, k2 = key
except ValueError:
raise ValueError("unrecognized subplot spec")
if isinstance(k1, slice):
row1, row2, _ = k1.indices(nrows)
else:
if k1 < 0:
k1 += nrows
if k1 >= nrows or k1 < 0 :
raise IndexError("index out of range")
row1, row2 = k1, k1+1
if isinstance(k2, slice):
col1, col2, _ = k2.indices(ncols)
else:
if k2 < 0:
k2 += ncols
if k2 >= ncols or k2 < 0 :
raise IndexError("index out of range")
col1, col2 = k2, k2+1
num1 = row1*ncols + col1
num2 = (row2-1)*ncols + (col2-1)
# single key
else:
if isinstance(key, slice):
num1, num2, _ = key.indices(total)
num2 -= 1
else:
if key < 0:
key += total
if key >= total or key < 0 :
raise IndexError("index out of range")
num1, num2 = key, None
return SubplotSpec(self, num1, num2)
class GridSpec(GridSpecBase):
"""
A class that specifies the geometry of the grid that a subplot
will be placed. The location of grid is determined by similar way
as the SubplotParams.
"""
def __init__(self, nrows, ncols,
left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None,
width_ratios=None, height_ratios=None):
"""
The number of rows and number of columns of the
grid need to be set. Optionally, the subplot layout parameters
(e.g., left, right, etc.) can be tuned.
"""
#self.figure = figure
self.left=left
self.bottom=bottom
self.right=right
self.top=top
self.wspace=wspace
self.hspace=hspace
GridSpecBase.__init__(self, nrows, ncols,
width_ratios=width_ratios,
height_ratios=height_ratios)
#self.set_width_ratios(width_ratios)
#self.set_height_ratios(height_ratios)
_AllowedKeys = ["left", "bottom", "right", "top", "wspace", "hspace"]
def update(self, **kwargs):
"""
Update the current values. If any kwarg is None, default to
the current value, if set, otherwise to rc.
"""
for k, v in six.iteritems(kwargs):
if k in self._AllowedKeys:
setattr(self, k, v)
else:
raise AttributeError("%s is unknown keyword" % (k,))
from matplotlib import _pylab_helpers
from matplotlib.axes import SubplotBase
for figmanager in six.itervalues(_pylab_helpers.Gcf.figs):
for ax in figmanager.canvas.figure.axes:
# copied from Figure.subplots_adjust
if not isinstance(ax, SubplotBase):
# Check if sharing a subplots axis
if ax._sharex is not None and isinstance(ax._sharex, SubplotBase):
if ax._sharex.get_subplotspec().get_gridspec() == self:
ax._sharex.update_params()
ax.set_position(ax._sharex.figbox)
elif ax._sharey is not None and isinstance(ax._sharey,SubplotBase):
if ax._sharey.get_subplotspec().get_gridspec() == self:
ax._sharey.update_params()
ax.set_position(ax._sharey.figbox)
else:
ss = ax.get_subplotspec().get_topmost_subplotspec()
if ss.get_gridspec() == self:
ax.update_params()
ax.set_position(ax.figbox)
def get_subplot_params(self, fig=None):
"""
return a dictionary of subplot layout parameters. The default
parameters are from rcParams unless a figure attribute is set.
"""
from matplotlib.figure import SubplotParams
import copy
if fig is None:
kw = dict([(k, rcParams["figure.subplot."+k]) \
for k in self._AllowedKeys])
subplotpars = SubplotParams(**kw)
else:
subplotpars = copy.copy(fig.subplotpars)
update_kw = dict([(k, getattr(self, k)) for k in self._AllowedKeys])
subplotpars.update(**update_kw)
return subplotpars
def locally_modified_subplot_params(self):
return [k for k in self._AllowedKeys if getattr(self, k)]
def tight_layout(self, fig, renderer=None, pad=1.08, h_pad=None, w_pad=None, rect=None):
"""
Adjust subplot parameters to give specified padding.
Parameters:
pad : float
padding between the figure edge and the edges of subplots, as a fraction of the font-size.
h_pad, w_pad : float
padding (height/width) between edges of adjacent subplots.
Defaults to `pad_inches`.
rect : if rect is given, it is interpreted as a rectangle
(left, bottom, right, top) in the normalized figure
coordinate that the whole subplots area (including
labels) will fit into. Default is (0, 0, 1, 1).
"""
from .tight_layout import (get_subplotspec_list,
get_tight_layout_figure,
get_renderer)
subplotspec_list = get_subplotspec_list(fig.axes, grid_spec=self)
if None in subplotspec_list:
warnings.warn("This figure includes Axes that are not "
"compatible with tight_layout, so its "
"results might be incorrect.")
if renderer is None:
renderer = get_renderer(fig)
kwargs = get_tight_layout_figure(fig, fig.axes, subplotspec_list,
renderer,
pad=pad, h_pad=h_pad, w_pad=w_pad,
rect=rect,
)
self.update(**kwargs)
class GridSpecFromSubplotSpec(GridSpecBase):
"""
GridSpec whose subplot layout parameters are inherited from the
location specified by a given SubplotSpec.
"""
def __init__(self, nrows, ncols,
subplot_spec,
wspace=None, hspace=None,
height_ratios=None, width_ratios=None):
"""
The number of rows and number of columns of the grid need to
be set. An instance of SubplotSpec is also needed to be set
from which the layout parameters will be inherited. The wspace
and hspace of the layout can be optionally specified or the
default values (from the figure or rcParams) will be used.
"""
self._wspace=wspace
self._hspace=hspace
self._subplot_spec = subplot_spec
GridSpecBase.__init__(self, nrows, ncols,
width_ratios=width_ratios,
height_ratios=height_ratios)
def get_subplot_params(self, fig=None):
"""
return a dictionary of subplot layout parameters.
"""
if fig is None:
hspace = rcParams["figure.subplot.hspace"]
wspace = rcParams["figure.subplot.wspace"]
else:
hspace = fig.subplotpars.hspace
wspace = fig.subplotpars.wspace
if self._hspace is not None:
hspace = self._hspace
if self._wspace is not None:
wspace = self._wspace
figbox = self._subplot_spec.get_position(fig, return_all=False)
left, bottom, right, top = figbox.extents
from matplotlib.figure import SubplotParams
sp = SubplotParams(left=left,
right=right,
bottom=bottom,
top=top,
wspace=wspace,
hspace=hspace)
return sp
def get_topmost_subplotspec(self):
'get the topmost SubplotSpec instance associated with the subplot'
return self._subplot_spec.get_topmost_subplotspec()
class SubplotSpec(object):
"""
specifies the location of the subplot in the given *GridSpec*.
"""
def __init__(self, gridspec, num1, num2=None):
"""
The subplot will occupy the num1-th cell of the given
gridspec. If num2 is provided, the subplot will span between
num1-th cell and num2-th cell.
The index stars from 0.
"""
rows, cols = gridspec.get_geometry()
total = rows*cols
self._gridspec = gridspec
self.num1 = num1
self.num2 = num2
def get_gridspec(self):
return self._gridspec
def get_geometry(self):
"""
get the subplot geometry, e.g., 2,2,3. Unlike SuplorParams,
index is 0-based
"""
rows, cols = self.get_gridspec().get_geometry()
return rows, cols, self.num1, self.num2
def get_position(self, fig, return_all=False):
"""
update the subplot position from fig.subplotpars
"""
gridspec = self.get_gridspec()
nrows, ncols = gridspec.get_geometry()
figBottoms, figTops, figLefts, figRights = \
gridspec.get_grid_positions(fig)
rowNum, colNum = divmod(self.num1, ncols)
figBottom = figBottoms[rowNum]
figTop = figTops[rowNum]
figLeft = figLefts[colNum]
figRight = figRights[colNum]
if self.num2 is not None:
rowNum2, colNum2 = divmod(self.num2, ncols)
figBottom2 = figBottoms[rowNum2]
figTop2 = figTops[rowNum2]
figLeft2 = figLefts[colNum2]
figRight2 = figRights[colNum2]
figBottom = min(figBottom, figBottom2)
figLeft = min(figLeft, figLeft2)
figTop = max(figTop, figTop2)
figRight = max(figRight, figRight2)
figbox = mtransforms.Bbox.from_extents(figLeft, figBottom,
figRight, figTop)
if return_all:
return figbox, rowNum, colNum, nrows, ncols
else:
return figbox
def get_topmost_subplotspec(self):
'get the topmost SubplotSpec instance associated with the subplot'
gridspec = self.get_gridspec()
if hasattr(gridspec, "get_topmost_subplotspec"):
return gridspec.get_topmost_subplotspec()
else:
return self
def __eq__(self, other):
# check to make sure other has the attributes
# we need to do the comparison
if not (hasattr(other, '_gridspec') and
hasattr(other, 'num1') and
hasattr(other, 'num2')):
return False
return all((self._gridspec == other._gridspec,
self.num1 == other.num1,
self.num2 == other.num2))
def __hash__(self):
return (hash(self._gridspec) ^
hash(self.num1) ^
hash(self.num2))
| gpl-3.0 |
GbalsaC/bitnamiP | cms/djangoapps/course_creators/tests/test_admin.py | 25 | 8182 | """
Tests course_creators.admin.py.
"""
from django.test import TestCase
from django.contrib.auth.models import User
from django.contrib.admin.sites import AdminSite
from django.http import HttpRequest
import mock
from course_creators.admin import CourseCreatorAdmin
from course_creators.models import CourseCreator
from django.core import mail
from student.roles import CourseCreatorRole
from student import auth
def mock_render_to_string(template_name, context):
"""Return a string that encodes template_name and context"""
return str((template_name, context))
class CourseCreatorAdminTest(TestCase):
"""
Tests for course creator admin.
"""
def setUp(self):
""" Test case setup """
super(CourseCreatorAdminTest, self).setUp()
self.user = User.objects.create_user('test_user', 'test_user+courses@edx.org', 'foo')
self.table_entry = CourseCreator(user=self.user)
self.table_entry.save()
self.admin = User.objects.create_user('Mark', 'admin+courses@edx.org', 'foo')
self.admin.is_staff = True
self.request = HttpRequest()
self.request.user = self.admin
self.creator_admin = CourseCreatorAdmin(self.table_entry, AdminSite())
self.studio_request_email = 'mark@marky.mark'
self.enable_creator_group_patch = {
"ENABLE_CREATOR_GROUP": True,
"STUDIO_REQUEST_EMAIL": self.studio_request_email
}
@mock.patch('course_creators.admin.render_to_string', mock.Mock(side_effect=mock_render_to_string, autospec=True))
@mock.patch('django.contrib.auth.models.User.email_user')
def test_change_status(self, email_user):
"""
Tests that updates to state impact the creator group maintained in authz.py and that e-mails are sent.
"""
def change_state_and_verify_email(state, is_creator):
""" Changes user state, verifies creator status, and verifies e-mail is sent based on transition """
self._change_state(state)
self.assertEqual(is_creator, auth.has_access(self.user, CourseCreatorRole()))
context = {'studio_request_email': self.studio_request_email}
if state == CourseCreator.GRANTED:
template = 'emails/course_creator_granted.txt'
elif state == CourseCreator.DENIED:
template = 'emails/course_creator_denied.txt'
else:
template = 'emails/course_creator_revoked.txt'
email_user.assert_called_with(
mock_render_to_string('emails/course_creator_subject.txt', context),
mock_render_to_string(template, context),
self.studio_request_email
)
with mock.patch.dict('django.conf.settings.FEATURES', self.enable_creator_group_patch):
# User is initially unrequested.
self.assertFalse(auth.has_access(self.user, CourseCreatorRole()))
change_state_and_verify_email(CourseCreator.GRANTED, True)
change_state_and_verify_email(CourseCreator.DENIED, False)
change_state_and_verify_email(CourseCreator.GRANTED, True)
change_state_and_verify_email(CourseCreator.PENDING, False)
change_state_and_verify_email(CourseCreator.GRANTED, True)
change_state_and_verify_email(CourseCreator.UNREQUESTED, False)
change_state_and_verify_email(CourseCreator.DENIED, False)
@mock.patch('course_creators.admin.render_to_string', mock.Mock(side_effect=mock_render_to_string, autospec=True))
def test_mail_admin_on_pending(self):
"""
Tests that the admin account is notified when a user is in the 'pending' state.
"""
def check_admin_message_state(state, expect_sent_to_admin, expect_sent_to_user):
""" Changes user state and verifies e-mail sent to admin address only when pending. """
mail.outbox = []
self._change_state(state)
# If a message is sent to the user about course creator status change, it will be the first
# message sent. Admin message will follow.
base_num_emails = 1 if expect_sent_to_user else 0
if expect_sent_to_admin:
context = {'user_name': "test_user", 'user_email': 'test_user+courses@edx.org'}
self.assertEquals(base_num_emails + 1, len(mail.outbox), 'Expected admin message to be sent')
sent_mail = mail.outbox[base_num_emails]
self.assertEquals(
mock_render_to_string('emails/course_creator_admin_subject.txt', context),
sent_mail.subject
)
self.assertEquals(
mock_render_to_string('emails/course_creator_admin_user_pending.txt', context),
sent_mail.body
)
self.assertEquals(self.studio_request_email, sent_mail.from_email)
self.assertEqual([self.studio_request_email], sent_mail.to)
else:
self.assertEquals(base_num_emails, len(mail.outbox))
with mock.patch.dict('django.conf.settings.FEATURES', self.enable_creator_group_patch):
# E-mail message should be sent to admin only when new state is PENDING, regardless of what
# previous state was (unless previous state was already PENDING).
# E-mail message sent to user only on transition into and out of GRANTED state.
check_admin_message_state(CourseCreator.UNREQUESTED, expect_sent_to_admin=False, expect_sent_to_user=False)
check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=True, expect_sent_to_user=False)
check_admin_message_state(CourseCreator.GRANTED, expect_sent_to_admin=False, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.DENIED, expect_sent_to_admin=False, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.GRANTED, expect_sent_to_admin=False, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=True, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=False, expect_sent_to_user=False)
check_admin_message_state(CourseCreator.DENIED, expect_sent_to_admin=False, expect_sent_to_user=True)
def _change_state(self, state):
""" Helper method for changing state """
self.table_entry.state = state
self.creator_admin.save_model(self.request, self.table_entry, None, True)
def test_add_permission(self):
"""
Tests that staff cannot add entries
"""
self.assertFalse(self.creator_admin.has_add_permission(self.request))
def test_delete_permission(self):
"""
Tests that staff cannot delete entries
"""
self.assertFalse(self.creator_admin.has_delete_permission(self.request))
def test_change_permission(self):
"""
Tests that only staff can change entries
"""
self.assertTrue(self.creator_admin.has_change_permission(self.request))
self.request.user = self.user
self.assertFalse(self.creator_admin.has_change_permission(self.request))
def test_rate_limit_login(self):
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_CREATOR_GROUP': True}):
post_params = {'username': self.user.username, 'password': 'wrong_password'}
# try logging in 30 times, the default limit in the number of failed
# login attempts in one 5 minute period before the rate gets limited
for _ in xrange(30):
response = self.client.post('/admin/', post_params)
self.assertEquals(response.status_code, 200)
response = self.client.post('/admin/', post_params)
# Since we are using the default rate limit behavior, we are
# expecting this to return a 403 error to indicate that there have
# been too many attempts
self.assertEquals(response.status_code, 403)
| agpl-3.0 |
goodlang/good | src/runtime/runtime-gdb.py | 15 | 12446 | # Copyright 2010 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""GDB Pretty printers and convenience functions for Go's runtime structures.
This script is loaded by GDB when it finds a .debug_gdb_scripts
section in the compiled binary. The [68]l linkers emit this with a
path to this file based on the path to the runtime package.
"""
# Known issues:
# - pretty printing only works for the 'native' strings. E.g. 'type
# foo string' will make foo a plain struct in the eyes of gdb,
# circumventing the pretty print triggering.
from __future__ import print_function
import re
import sys
print("Loading Go Runtime support.", file=sys.stderr)
#http://python3porting.com/differences.html
if sys.version > '3':
xrange = range
# allow to manually reload while developing
goobjfile = gdb.current_objfile() or gdb.objfiles()[0]
goobjfile.pretty_printers = []
#
# Value wrappers
#
class SliceValue:
"Wrapper for slice values."
def __init__(self, val):
self.val = val
@property
def len(self):
return int(self.val['len'])
@property
def cap(self):
return int(self.val['cap'])
def __getitem__(self, i):
if i < 0 or i >= self.len:
raise IndexError(i)
ptr = self.val["array"]
return (ptr + i).dereference()
#
# Pretty Printers
#
class StringTypePrinter:
"Pretty print Go strings."
pattern = re.compile(r'^struct string( \*)?$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'string'
def to_string(self):
l = int(self.val['len'])
return self.val['str'].string("utf-8", "ignore", l)
class SliceTypePrinter:
"Pretty print slices."
pattern = re.compile(r'^struct \[\]')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'array'
def to_string(self):
return str(self.val.type)[6:] # skip 'struct '
def children(self):
sval = SliceValue(self.val)
if sval.len > sval.cap:
return
for idx, item in enumerate(sval):
yield ('[{0}]'.format(idx), item)
class MapTypePrinter:
"""Pretty print map[K]V types.
Map-typed go variables are really pointers. dereference them in gdb
to inspect their contents with this pretty printer.
"""
pattern = re.compile(r'^map\[.*\].*$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'map'
def to_string(self):
return str(self.val.type)
def children(self):
B = self.val['B']
buckets = self.val['buckets']
oldbuckets = self.val['oldbuckets']
flags = self.val['flags']
inttype = self.val['hash0'].type
cnt = 0
for bucket in xrange(2 ** int(B)):
bp = buckets + bucket
if oldbuckets:
oldbucket = bucket & (2 ** (B - 1) - 1)
oldbp = oldbuckets + oldbucket
oldb = oldbp.dereference()
if (oldb['overflow'].cast(inttype) & 1) == 0: # old bucket not evacuated yet
if bucket >= 2 ** (B - 1):
continue # already did old bucket
bp = oldbp
while bp:
b = bp.dereference()
for i in xrange(8):
if b['tophash'][i] != 0:
k = b['keys'][i]
v = b['values'][i]
if flags & 1:
k = k.dereference()
if flags & 2:
v = v.dereference()
yield str(cnt), k
yield str(cnt + 1), v
cnt += 2
bp = b['overflow']
class ChanTypePrinter:
"""Pretty print chan[T] types.
Chan-typed go variables are really pointers. dereference them in gdb
to inspect their contents with this pretty printer.
"""
pattern = re.compile(r'^struct hchan<.*>$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'array'
def to_string(self):
return str(self.val.type)
def children(self):
# see chan.c chanbuf(). et is the type stolen from hchan<T>::recvq->first->elem
et = [x.type for x in self.val['recvq']['first'].type.target().fields() if x.name == 'elem'][0]
ptr = (self.val.address + 1).cast(et.pointer())
for i in range(self.val["qcount"]):
j = (self.val["recvx"] + i) % self.val["dataqsiz"]
yield ('[{0}]'.format(i), (ptr + j).dereference())
#
# Register all the *Printer classes above.
#
def makematcher(klass):
def matcher(val):
try:
if klass.pattern.match(str(val.type)):
return klass(val)
except Exception:
pass
return matcher
goobjfile.pretty_printers.extend([makematcher(var) for var in vars().values() if hasattr(var, 'pattern')])
#
# For reference, this is what we're trying to do:
# eface: p *(*(struct 'runtime.rtype'*)'main.e'->type_->data)->string
# iface: p *(*(struct 'runtime.rtype'*)'main.s'->tab->Type->data)->string
#
# interface types can't be recognized by their name, instead we check
# if they have the expected fields. Unfortunately the mapping of
# fields to python attributes in gdb.py isn't complete: you can't test
# for presence other than by trapping.
def is_iface(val):
try:
return str(val['tab'].type) == "struct runtime.itab *" and str(val['data'].type) == "void *"
except gdb.error:
pass
def is_eface(val):
try:
return str(val['_type'].type) == "struct runtime._type *" and str(val['data'].type) == "void *"
except gdb.error:
pass
def lookup_type(name):
try:
return gdb.lookup_type(name)
except gdb.error:
pass
try:
return gdb.lookup_type('struct ' + name)
except gdb.error:
pass
try:
return gdb.lookup_type('struct ' + name[1:]).pointer()
except gdb.error:
pass
def iface_commontype(obj):
if is_iface(obj):
go_type_ptr = obj['tab']['_type']
elif is_eface(obj):
go_type_ptr = obj['_type']
else:
return
return go_type_ptr.cast(gdb.lookup_type("struct reflect.rtype").pointer()).dereference()
def iface_dtype(obj):
"Decode type of the data field of an eface or iface struct."
# known issue: dtype_name decoded from runtime.rtype is "nested.Foo"
# but the dwarf table lists it as "full/path/to/nested.Foo"
dynamic_go_type = iface_commontype(obj)
if dynamic_go_type is None:
return
dtype_name = dynamic_go_type['string'].dereference()['str'].string()
dynamic_gdb_type = lookup_type(dtype_name)
if dynamic_gdb_type is None:
return
type_size = int(dynamic_go_type['size'])
uintptr_size = int(dynamic_go_type['size'].type.sizeof) # size is itself an uintptr
if type_size > uintptr_size:
dynamic_gdb_type = dynamic_gdb_type.pointer()
return dynamic_gdb_type
def iface_dtype_name(obj):
"Decode type name of the data field of an eface or iface struct."
dynamic_go_type = iface_commontype(obj)
if dynamic_go_type is None:
return
return dynamic_go_type['string'].dereference()['str'].string()
class IfacePrinter:
"""Pretty print interface values
Casts the data field to the appropriate dynamic type."""
def __init__(self, val):
self.val = val
def display_hint(self):
return 'string'
def to_string(self):
if self.val['data'] == 0:
return 0x0
try:
dtype = iface_dtype(self.val)
except Exception:
return "<bad dynamic type>"
if dtype is None: # trouble looking up, print something reasonable
return "({0}){0}".format(iface_dtype_name(self.val), self.val['data'])
try:
return self.val['data'].cast(dtype).dereference()
except Exception:
pass
return self.val['data'].cast(dtype)
def ifacematcher(val):
if is_iface(val) or is_eface(val):
return IfacePrinter(val)
goobjfile.pretty_printers.append(ifacematcher)
#
# Convenience Functions
#
class GoLenFunc(gdb.Function):
"Length of strings, slices, maps or channels"
how = ((StringTypePrinter, 'len'), (SliceTypePrinter, 'len'), (MapTypePrinter, 'count'), (ChanTypePrinter, 'qcount'))
def __init__(self):
gdb.Function.__init__(self, "len")
def invoke(self, obj):
typename = str(obj.type)
for klass, fld in self.how:
if klass.pattern.match(typename):
return obj[fld]
class GoCapFunc(gdb.Function):
"Capacity of slices or channels"
how = ((SliceTypePrinter, 'cap'), (ChanTypePrinter, 'dataqsiz'))
def __init__(self):
gdb.Function.__init__(self, "cap")
def invoke(self, obj):
typename = str(obj.type)
for klass, fld in self.how:
if klass.pattern.match(typename):
return obj[fld]
class DTypeFunc(gdb.Function):
"""Cast Interface values to their dynamic type.
For non-interface types this behaves as the identity operation.
"""
def __init__(self):
gdb.Function.__init__(self, "dtype")
def invoke(self, obj):
try:
return obj['data'].cast(iface_dtype(obj))
except gdb.error:
pass
return obj
#
# Commands
#
sts = ('idle', 'runnable', 'running', 'syscall', 'waiting', 'moribund', 'dead', 'recovery')
def linked_list(ptr, linkfield):
while ptr:
yield ptr
ptr = ptr[linkfield]
class GoroutinesCmd(gdb.Command):
"List all goroutines."
def __init__(self):
gdb.Command.__init__(self, "info goroutines", gdb.COMMAND_STACK, gdb.COMPLETE_NONE)
def invoke(self, _arg, _from_tty):
# args = gdb.string_to_argv(arg)
vp = gdb.lookup_type('void').pointer()
for ptr in SliceValue(gdb.parse_and_eval("'runtime.allgs'")):
if ptr['atomicstatus'] == 6: # 'gdead'
continue
s = ' '
if ptr['m']:
s = '*'
pc = ptr['sched']['pc'].cast(vp)
# python2 will not cast pc (type void*) to an int cleanly
# instead python2 and python3 work with the hex string representation
# of the void pointer which we can parse back into an int.
# int(pc) will not work.
try:
#python3 / newer versions of gdb
pc = int(pc)
except gdb.error:
# str(pc) can return things like
# "0x429d6c <runtime.gopark+284>", so
# chop at first space.
pc = int(str(pc).split(None, 1)[0], 16)
blk = gdb.block_for_pc(pc)
print(s, ptr['goid'], "{0:8s}".format(sts[int(ptr['atomicstatus'])]), blk.function)
def find_goroutine(goid):
"""
find_goroutine attempts to find the goroutine identified by goid.
It returns a touple of gdv.Value's representing the stack pointer
and program counter pointer for the goroutine.
@param int goid
@return tuple (gdb.Value, gdb.Value)
"""
vp = gdb.lookup_type('void').pointer()
for ptr in SliceValue(gdb.parse_and_eval("'runtime.allgs'")):
if ptr['atomicstatus'] == 6: # 'gdead'
continue
if ptr['goid'] == goid:
return (ptr['sched'][x].cast(vp) for x in ('pc', 'sp'))
return None, None
class GoroutineCmd(gdb.Command):
"""Execute gdb command in the context of goroutine <goid>.
Switch PC and SP to the ones in the goroutine's G structure,
execute an arbitrary gdb command, and restore PC and SP.
Usage: (gdb) goroutine <goid> <gdbcmd>
Note that it is ill-defined to modify state in the context of a goroutine.
Restrict yourself to inspecting values.
"""
def __init__(self):
gdb.Command.__init__(self, "goroutine", gdb.COMMAND_STACK, gdb.COMPLETE_NONE)
def invoke(self, arg, _from_tty):
goid, cmd = arg.split(None, 1)
goid = gdb.parse_and_eval(goid)
pc, sp = find_goroutine(int(goid))
if not pc:
print("No such goroutine: ", goid)
return
try:
#python3 / newer versions of gdb
pc = int(pc)
except gdb.error:
pc = int(str(pc).split(None, 1)[0], 16)
save_frame = gdb.selected_frame()
gdb.parse_and_eval('$save_sp = $sp')
gdb.parse_and_eval('$save_pc = $pc')
gdb.parse_and_eval('$sp = {0}'.format(str(sp)))
gdb.parse_and_eval('$pc = {0}'.format(str(pc)))
try:
gdb.execute(cmd)
finally:
gdb.parse_and_eval('$sp = $save_sp')
gdb.parse_and_eval('$pc = $save_pc')
save_frame.select()
class GoIfaceCmd(gdb.Command):
"Print Static and dynamic interface types"
def __init__(self):
gdb.Command.__init__(self, "iface", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL)
def invoke(self, arg, _from_tty):
for obj in gdb.string_to_argv(arg):
try:
#TODO fix quoting for qualified variable names
obj = gdb.parse_and_eval(str(obj))
except Exception as e:
print("Can't parse ", obj, ": ", e)
continue
if obj['data'] == 0:
dtype = "nil"
else:
dtype = iface_dtype(obj)
if dtype is None:
print("Not an interface: ", obj.type)
continue
print("{0}: {1}".format(obj.type, dtype))
# TODO: print interface's methods and dynamic type's func pointers thereof.
#rsc: "to find the number of entries in the itab's Fn field look at
# itab.inter->numMethods
# i am sure i have the names wrong but look at the interface type
# and its method count"
# so Itype will start with a commontype which has kind = interface
#
# Register all convenience functions and CLI commands
#
GoLenFunc()
GoCapFunc()
DTypeFunc()
GoroutinesCmd()
GoroutineCmd()
GoIfaceCmd()
| bsd-3-clause |
SebasSBM/django | django/utils/html.py | 136 | 14667 | """HTML utilities suitable for global use."""
from __future__ import unicode_literals
import re
import warnings
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_str, force_text
from django.utils.functional import allow_lazy
from django.utils.http import RFC3986_GENDELIMS, RFC3986_SUBDELIMS
from django.utils.safestring import SafeData, SafeText, mark_safe
from django.utils.six.moves.urllib.parse import (
parse_qsl, quote, unquote, urlencode, urlsplit, urlunsplit,
)
from django.utils.text import normalize_newlines
from .html_parser import HTMLParseError, HTMLParser
# Configuration for urlize() function.
TRAILING_PUNCTUATION = ['.', ',', ':', ';', '.)', '"', '\'', '!']
WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('[', ']'), ('<', '>'), ('"', '"'), ('\'', '\'')]
# List of possible strings used for bullets in bulleted lists.
DOTS = ['·', '*', '\u2022', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'''([\s<>"']+)''')
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)($|/.*)$', re.IGNORECASE)
simple_email_re = re.compile(r'^\S+@\S+\.\S+$')
link_target_attribute_re = re.compile(r'(<a [^>]*?)target=[^\s>]+')
html_gunk_re = re.compile(
r'(?:<br clear="all">|<i><\/i>|<b><\/b>|<em><\/em>|<strong><\/strong>|'
'<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE)
hard_coded_bullets_re = re.compile(
r'((?:<p>(?:%s).*?[a-zA-Z].*?</p>\s*)+)' % '|'.join(re.escape(x)
for x in DOTS), re.DOTALL)
trailing_empty_content_re = re.compile(r'(?:<p>(?: |\s|<br \/>)*?</p>\s*)+\Z')
def escape(text):
"""
Returns the given text with ampersands, quotes and angle brackets encoded
for use in HTML.
This function always escapes its input, even if it's already escaped and
marked as such. This may result in double-escaping. If this is a concern,
use conditional_escape() instead.
"""
return mark_safe(force_text(text).replace('&', '&').replace('<', '<')
.replace('>', '>').replace('"', '"').replace("'", '''))
escape = allow_lazy(escape, six.text_type, SafeText)
_js_escapes = {
ord('\\'): '\\u005C',
ord('\''): '\\u0027',
ord('"'): '\\u0022',
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('='): '\\u003D',
ord('-'): '\\u002D',
ord(';'): '\\u003B',
ord('\u2028'): '\\u2028',
ord('\u2029'): '\\u2029'
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
return mark_safe(force_text(value).translate(_js_escapes))
escapejs = allow_lazy(escapejs, six.text_type, SafeText)
def conditional_escape(text):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
This function relies on the __html__ convention used both by Django's
SafeData class and by third-party libraries like markupsafe.
"""
if hasattr(text, '__html__'):
return text.__html__()
else:
return escape(text)
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but passes all arguments through conditional_escape,
and calls 'mark_safe' on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = {k: conditional_escape(v) for (k, v) in six.iteritems(kwargs)}
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def format_html_join(sep, format_string, args_generator):
"""
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{} {}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(conditional_escape(sep).join(
format_html(format_string, *tuple(args))
for args in args_generator))
def linebreaks(value, autoescape=False):
"""Converts newlines into <p> and <br />s."""
value = normalize_newlines(value)
paras = re.split('\n{2,}', value)
if autoescape:
paras = ['<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras]
else:
paras = ['<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
return '\n\n'.join(paras)
linebreaks = allow_lazy(linebreaks, six.text_type)
class MLStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
self.fed.append('&%s;' % name)
def handle_charref(self, name):
self.fed.append('&#%s;' % name)
def get_data(self):
return ''.join(self.fed)
def _strip_once(value):
"""
Internal tag stripping utility used by strip_tags.
"""
s = MLStripper()
try:
s.feed(value)
except HTMLParseError:
return value
try:
s.close()
except HTMLParseError:
return s.get_data() + s.rawdata
else:
return s.get_data()
def strip_tags(value):
"""Returns the given HTML with all tags stripped."""
# Note: in typical case this loop executes _strip_once once. Loop condition
# is redundant, but helps to reduce number of executions of _strip_once.
while '<' in value and '>' in value:
new_value = _strip_once(value)
if len(new_value) >= len(value):
# _strip_once was not able to detect more tags or length increased
# due to http://bugs.python.org/issue20288
# (affects Python 2 < 2.7.7 and Python 3 < 3.3.5)
break
value = new_value
return value
strip_tags = allow_lazy(strip_tags)
def remove_tags(html, tags):
"""Returns the given HTML with given tags removed."""
warnings.warn(
"django.utils.html.remove_tags() and the removetags template filter "
"are deprecated. Consider using the bleach library instead.",
RemovedInDjango110Warning, stacklevel=3
)
tags = [re.escape(tag) for tag in tags.split()]
tags_re = '(%s)' % '|'.join(tags)
starttag_re = re.compile(r'<%s(/?>|(\s+[^>]*>))' % tags_re, re.U)
endtag_re = re.compile('</%s>' % tags_re)
html = starttag_re.sub('', html)
html = endtag_re.sub('', html)
return html
remove_tags = allow_lazy(remove_tags, six.text_type)
def strip_spaces_between_tags(value):
"""Returns the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', force_text(value))
strip_spaces_between_tags = allow_lazy(strip_spaces_between_tags, six.text_type)
def strip_entities(value):
"""Returns the given HTML with all entities (&something;) stripped."""
warnings.warn(
"django.utils.html.strip_entities() is deprecated.",
RemovedInDjango110Warning, stacklevel=2
)
return re.sub(r'&(?:\w+|#\d+);', '', force_text(value))
strip_entities = allow_lazy(strip_entities, six.text_type)
def smart_urlquote(url):
"Quotes a URL if it isn't already quoted."
def unquote_quote(segment):
segment = unquote(force_str(segment))
# Tilde is part of RFC3986 Unreserved Characters
# http://tools.ietf.org/html/rfc3986#section-2.3
# See also http://bugs.python.org/issue16285
segment = quote(segment, safe=RFC3986_SUBDELIMS + RFC3986_GENDELIMS + str('~'))
return force_text(segment)
# Handle IDN before quoting.
try:
scheme, netloc, path, query, fragment = urlsplit(url)
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
return unquote_quote(url)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
return unquote_quote(url)
if query:
# Separately unquoting key/value, so as to not mix querystring separators
# included in query values. See #22267.
query_parts = [(unquote(force_str(q[0])), unquote(force_str(q[1])))
for q in parse_qsl(query, keep_blank_values=True)]
# urlencode will take care of quoting
query = urlencode(query_parts)
path = unquote_quote(path)
fragment = unquote_quote(fragment)
return urlunsplit((scheme, netloc, path, query, fragment))
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Converts any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, the URLs in the link text longer than this
limit will be truncated to trim_url_limit-3 characters and appended with
an ellipsis.
If nofollow is True, the links will get a rel="nofollow" attribute.
If autoescape is True, the link text and URLs will be autoescaped.
"""
safe_input = isinstance(text, SafeData)
def trim_url(x, limit=trim_url_limit):
if limit is None or len(x) <= limit:
return x
return '%s...' % x[:max(0, limit - 3)]
def unescape(text, trail):
"""
If input URL is HTML-escaped, unescape it so as we can safely feed it to
smart_urlquote. For example:
http://example.com?x=1&y=<2> => http://example.com?x=1&y=<2>
"""
unescaped = (text + trail).replace(
'&', '&').replace('<', '<').replace(
'>', '>').replace('"', '"').replace(''', "'")
if trail and unescaped.endswith(trail):
# Remove trail for unescaped if it was not consumed by unescape
unescaped = unescaped[:-len(trail)]
elif trail == ';':
# Trail was consumed by unescape (as end-of-entity marker), move it to text
text += trail
trail = ''
return text, unescaped, trail
words = word_split_re.split(force_text(text))
for i, word in enumerate(words):
if '.' in word or '@' in word or ':' in word:
# Deal with punctuation.
lead, middle, trail = '', word, ''
for punctuation in TRAILING_PUNCTUATION:
if middle.endswith(punctuation):
middle = middle[:-len(punctuation)]
trail = punctuation + trail
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead = lead + opening
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing)
and middle.count(closing) == middle.count(opening) + 1):
middle = middle[:-len(closing)]
trail = closing + trail
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ''
if simple_url_re.match(middle):
middle, middle_unescaped, trail = unescape(middle, trail)
url = smart_urlquote(middle_unescaped)
elif simple_url_2_re.match(middle):
middle, middle_unescaped, trail = unescape(middle, trail)
url = smart_urlquote('http://%s' % middle_unescaped)
elif ':' not in middle and simple_email_re.match(middle):
local, domain = middle.rsplit('@', 1)
try:
domain = domain.encode('idna').decode('ascii')
except UnicodeError:
continue
url = 'mailto:%s@%s' % (local, domain)
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
trimmed = escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (escape(url), nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return ''.join(words)
urlize = allow_lazy(urlize, six.text_type)
def avoid_wrapping(value):
"""
Avoid text wrapping in the middle of a phrase by adding non-breaking
spaces where there previously were normal spaces.
"""
return value.replace(" ", "\xa0")
def html_safe(klass):
"""
A decorator that defines the __html__ method. This helps non-Django
templates to detect classes whose __str__ methods return SafeText.
"""
if '__html__' in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it defines "
"__html__()." % klass.__name__
)
if six.PY2:
if '__unicode__' not in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it doesn't "
"define __unicode__()." % klass.__name__
)
klass_unicode = klass.__unicode__
klass.__unicode__ = lambda self: mark_safe(klass_unicode(self))
klass.__html__ = lambda self: unicode(self) # NOQA: unicode undefined on PY3
else:
if '__str__' not in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it doesn't "
"define __str__()." % klass.__name__
)
klass_str = klass.__str__
klass.__str__ = lambda self: mark_safe(klass_str(self))
klass.__html__ = lambda self: str(self)
return klass
| bsd-3-clause |
CiscoSystems/jujucharm-n1k | charms/precise/swift-proxy/hooks/swift_hooks.py | 1 | 8764 | #!/usr/bin/python
import os
import sys
import shutil
import uuid
import subprocess
import charmhelpers.contrib.openstack.utils as openstack
import charmhelpers.contrib.hahelpers.cluster as cluster
from swift_utils import (
register_configs,
restart_map,
determine_packages,
ensure_swift_dir,
SWIFT_RINGS, WWW_DIR,
initialize_ring,
swift_user,
SWIFT_HA_RES,
balance_ring,
SWIFT_CONF_DIR,
get_zone,
exists_in_ring,
add_to_ring,
should_balance,
do_openstack_upgrade,
write_rc_script
)
from swift_context import get_swift_hash
from charmhelpers.core.hookenv import (
config,
unit_get,
relation_set,
relation_ids,
relation_get,
log, ERROR,
Hooks, UnregisteredHookError,
open_port
)
from charmhelpers.core.host import (
service_restart,
restart_on_change
)
from charmhelpers.fetch import (
apt_install,
apt_update
)
from charmhelpers.payload.execd import execd_preinstall
extra_pkgs = [
"haproxy",
"python-jinja2"
]
hooks = Hooks()
CONFIGS = register_configs()
@hooks.hook('install')
def install():
execd_preinstall()
src = config('openstack-origin')
if src != 'distro':
openstack.configure_installation_source(src)
apt_update(fatal=True)
rel = openstack.get_os_codename_install_source(src)
pkgs = determine_packages(rel)
apt_install(pkgs, fatal=True)
apt_install(extra_pkgs, fatal=True)
ensure_swift_dir()
# initialize new storage rings.
for ring in SWIFT_RINGS.iteritems():
initialize_ring(ring[1],
config('partition-power'),
config('replicas'),
config('min-hours'))
# configure a directory on webserver for distributing rings.
if not os.path.isdir(WWW_DIR):
os.mkdir(WWW_DIR, 0755)
uid, gid = swift_user()
os.chown(WWW_DIR, uid, gid)
@hooks.hook('identity-service-relation-joined')
def keystone_joined(relid=None):
if not cluster.eligible_leader(SWIFT_HA_RES):
return
if cluster.is_clustered():
hostname = config('vip')
else:
hostname = unit_get('private-address')
port = config('bind-port')
if cluster.https():
proto = 'https'
else:
proto = 'http'
admin_url = '%s://%s:%s' % (proto, hostname, port)
internal_url = public_url = '%s/v1/AUTH_$(tenant_id)s' % admin_url
relation_set(service='swift',
region=config('region'),
public_url=public_url, internal_url=internal_url,
admin_url=admin_url,
requested_roles=config('operator-roles'),
relation_id=relid)
@hooks.hook('identity-service-relation-changed')
@restart_on_change(restart_map())
def keystone_changed():
configure_https()
def balance_rings():
'''handle doing ring balancing and distribution.'''
new_ring = False
for ring in SWIFT_RINGS.itervalues():
if balance_ring(ring):
log('Balanced ring %s' % ring)
new_ring = True
if not new_ring:
return
for ring in SWIFT_RINGS.keys():
f = '%s.ring.gz' % ring
shutil.copyfile(os.path.join(SWIFT_CONF_DIR, f),
os.path.join(WWW_DIR, f))
if cluster.eligible_leader(SWIFT_HA_RES):
msg = 'Broadcasting notification to all storage nodes that new '\
'ring is ready for consumption.'
log(msg)
path = WWW_DIR.split('/var/www/')[1]
trigger = uuid.uuid4()
if cluster.is_clustered():
hostname = config('vip')
else:
hostname = unit_get('private-address')
rings_url = 'http://%s/%s' % (hostname, path)
# notify storage nodes that there is a new ring to fetch.
for relid in relation_ids('swift-storage'):
relation_set(relation_id=relid, swift_hash=get_swift_hash(),
rings_url=rings_url, trigger=trigger)
service_restart('swift-proxy')
@hooks.hook('swift-storage-relation-changed')
@restart_on_change(restart_map())
def storage_changed():
zone = get_zone(config('zone-assignment'))
node_settings = {
'ip': openstack.get_host_ip(relation_get('private-address')),
'zone': zone,
'account_port': relation_get('account_port'),
'object_port': relation_get('object_port'),
'container_port': relation_get('container_port'),
}
if None in node_settings.itervalues():
log('storage_changed: Relation not ready.')
return None
for k in ['zone', 'account_port', 'object_port', 'container_port']:
node_settings[k] = int(node_settings[k])
CONFIGS.write_all()
# allow for multiple devs per unit, passed along as a : separated list
devs = relation_get('device').split(':')
for dev in devs:
node_settings['device'] = dev
for ring in SWIFT_RINGS.itervalues():
if not exists_in_ring(ring, node_settings):
add_to_ring(ring, node_settings)
if should_balance([r for r in SWIFT_RINGS.itervalues()]):
balance_rings()
@hooks.hook('swift-storage-relation-broken')
@restart_on_change(restart_map())
def storage_broken():
CONFIGS.write_all()
@hooks.hook('config-changed')
@restart_on_change(restart_map())
def config_changed():
configure_https()
open_port(config('bind-port'))
# Determine whether or not we should do an upgrade, based on the
# the version offered in keyston-release.
src = config('openstack-origin')
available = openstack.get_os_codename_install_source(src)
installed = openstack.get_os_codename_package('python-swift')
if (available and
openstack.get_os_version_codename(available) >
openstack.get_os_version_codename(installed)):
pkgs = determine_packages(available)
do_openstack_upgrade(src, pkgs)
@hooks.hook('cluster-relation-changed',
'cluster-relation-joined')
@restart_on_change(restart_map())
def cluster_changed():
CONFIGS.write_all()
@hooks.hook('ha-relation-changed')
def ha_relation_changed():
clustered = relation_get('clustered')
if clustered and cluster.is_leader(SWIFT_HA_RES):
log('Cluster configured, notifying other services and'
'updating keystone endpoint configuration')
# Tell all related services to start using
# the VIP instead
for r_id in relation_ids('identity-service'):
keystone_joined(relid=r_id)
@hooks.hook('ha-relation-joined')
def ha_relation_joined():
# Obtain the config values necessary for the cluster config. These
# include multicast port and interface to bind to.
corosync_bindiface = config('ha-bindiface')
corosync_mcastport = config('ha-mcastport')
vip = config('vip')
vip_cidr = config('vip_cidr')
vip_iface = config('vip_iface')
if not vip:
log('Unable to configure hacluster as vip not provided',
level=ERROR)
sys.exit(1)
# Obtain resources
resources = {
'res_swift_vip': 'ocf:heartbeat:IPaddr2',
'res_swift_haproxy': 'lsb:haproxy'
}
resource_params = {
'res_swift_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' %
(vip, vip_cidr, vip_iface),
'res_swift_haproxy': 'op monitor interval="5s"'
}
init_services = {
'res_swift_haproxy': 'haproxy'
}
clones = {
'cl_swift_haproxy': 'res_swift_haproxy'
}
relation_set(init_services=init_services,
corosync_bindiface=corosync_bindiface,
corosync_mcastport=corosync_mcastport,
resources=resources,
resource_params=resource_params,
clones=clones)
def configure_https():
'''
Enables SSL API Apache config if appropriate and kicks identity-service
with any required api updates.
'''
# need to write all to ensure changes to the entire request pipeline
# propagate (c-api, haprxy, apache)
CONFIGS.write_all()
if 'https' in CONFIGS.complete_contexts():
cmd = ['a2ensite', 'openstack_https_frontend']
subprocess.check_call(cmd)
else:
cmd = ['a2dissite', 'openstack_https_frontend']
subprocess.check_call(cmd)
# Apache 2.4 required enablement of configuration
if os.path.exists('/usr/sbin/a2enconf'):
subprocess.check_call(['a2enconf', 'swift-rings'])
for rid in relation_ids('identity-service'):
keystone_joined(relid=rid)
write_rc_script()
def main():
try:
hooks.execute(sys.argv)
except UnregisteredHookError as e:
log('Unknown hook {} - skipping.'.format(e))
if __name__ == '__main__':
main()
| apache-2.0 |
yugang/crosswalk-test-suite | webapi/tct-application-tizen-tests/inst.wgt.py | 1 | 6853 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = "/home/app/content"
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
pkg_infos = line.split()
if len(pkg_infos) == 4:
continue
name = pkg_infos[5]
name = name.lstrip('[').rstrip(']')
print "name is: %s" % name
if pkg_name == name:
test_pkg_id = pkg_infos[3]
test_pkg_id = test_pkg_id.lstrip('[').rstrip(']')
print test_pkg_id
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
(return_code, output) = doRemoteCMD(
"rm -rf %s/Others" % SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
if not doRemoteCopy(os.path.join(SCRIPT_DIR, "mediasrc"), "%s/Others" % SRC_DIR):
action_status = False
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
gdementen/numba | numba/hsa/tests/hsapy/test_decorator.py | 6 | 1505 | from __future__ import print_function, absolute_import
import numpy as np
import numba.unittest_support as unittest
from numba import hsa
class TestDecorators(unittest.TestCase):
def test_kernel_jit(self):
@hsa.jit("(float32[:], float32[:])")
def copy_vector(dst, src):
tid = hsa.get_global_id(0)
if tid < dst.size:
dst[tid] = src[tid]
src = np.arange(10, dtype=np.uint32)
dst = np.zeros_like(src)
copy_vector[10, 1](dst, src)
np.testing.assert_equal(dst, src)
def test_device_jit(self):
@hsa.jit("float32(float32[:], intp)", device=True)
def inner(src, idx):
return src[idx]
@hsa.jit("(float32[:], float32[:])")
def outer(dst, src):
tid = hsa.get_global_id(0)
if tid < dst.size:
dst[tid] = inner(src, tid)
src = np.arange(10, dtype=np.uint32)
dst = np.zeros_like(src)
outer[10, 1](dst, src)
np.testing.assert_equal(dst, src)
def test_autojit_kernel(self):
@hsa.jit
def copy_vector(dst, src):
tid = hsa.get_global_id(0)
if tid < dst.size:
dst[tid] = src[tid]
for dtype in [np.uint32, np.float32]:
src = np.arange(10, dtype=dtype)
dst = np.zeros_like(src)
copy_vector[10, 1](dst, src)
np.testing.assert_equal(dst, src)
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
ForgottenKahz/CloudOPC | app/main.py | 1 | 2316 | from flask import Flask, render_template, session, redirect, url_for, g
from flask.ext.script import Manager
from flask.ext.bootstrap import Bootstrap
from flask.ext.moment import Moment
from datetime import datetime
import os
from flask.ext.wtf import Form
from wtforms import StringField, SubmitField
from wtforms.validators import Required
from flask.ext.sqlalchemy import SQLAlchemy
#form stuff
class NameForm(Form):
name = StringField('What is your name?', validators=[Required()])
submit = SubmitField('Submit')
#config stuff
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SECRET_KEY']='123'
#Database stuff
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'data.sqlite')
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
db = SQLAlchemy(app)
class Role(db.Model):
__tablename__='roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
users = db.relationship('User', backref='role')
def __repr__(self):
return '<Role %r>' % self.name
class User(db.Model):
__tablename__='users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
def __repr__(self):
return '<User %r>' % self.username
bootstrap = Bootstrap(app)
moment = Moment(app)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'),404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'),500
@app.route('/', methods=['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.name.data).first()
if user is None:
user = User(username = form.name.data)
db.session.add(user)
session['known'] = False
else:
session['known'] = True
session['name'] = form.name.data
return redirect(url_for('index'))
return render_template('index.html', current_time=datetime.utcnow(), form=form, name=session.get('name'), known = session.get('known', False))
@app.route('/user/<name>')
def user(name):
return render_template('user.html', name=name)
@app.route('/temp')
def temp():
return render_template('temp.html')
if __name__ == '__main__':
app.run(debug=True) | mit |
Bysmyyr/chromium-crosswalk | tools/telemetry/telemetry/internal/backends/chrome_inspector/inspector_runtime.py | 15 | 2668 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import exceptions
class InspectorRuntime(object):
def __init__(self, inspector_websocket):
self._inspector_websocket = inspector_websocket
self._inspector_websocket.RegisterDomain('Runtime', self._OnNotification)
self._contexts_enabled = False
self._max_context_id = None
def _OnNotification(self, msg):
if (self._contexts_enabled and
msg['method'] == 'Runtime.executionContextCreated'):
self._max_context_id = max(self._max_context_id,
msg['params']['context']['id'])
def Execute(self, expr, context_id, timeout):
self.Evaluate(expr + '; 0;', context_id, timeout)
def Evaluate(self, expr, context_id, timeout):
"""Evaluates a javascript expression and returns the result.
|context_id| can refer to an iframe. The main page has context_id=1, the
first iframe context_id=2, etc.
Raises:
exceptions.EvaluateException
exceptions.WebSocketDisconnected
websocket.WebSocketException
socket.error
"""
request = {
'method': 'Runtime.evaluate',
'params': {
'expression': expr,
'returnByValue': True
}
}
if context_id is not None:
self.EnableAllContexts()
request['params']['contextId'] = context_id
res = self._inspector_websocket.SyncRequest(request, timeout)
if 'error' in res:
raise exceptions.EvaluateException(res['error']['message'])
if 'wasThrown' in res['result'] and res['result']['wasThrown']:
# TODO(nduca): propagate stacks from javascript up to the python
# exception.
raise exceptions.EvaluateException(res['result']['result']['description'])
if res['result']['result']['type'] == 'undefined':
return None
return res['result']['result']['value']
def EnableAllContexts(self):
"""Allow access to iframes.
Raises:
exceptions.WebSocketDisconnected
websocket.WebSocketException
socket.error
"""
if not self._contexts_enabled:
self._contexts_enabled = True
self._inspector_websocket.SyncRequest({'method': 'Runtime.enable'},
timeout=30)
return self._max_context_id
def RunInspectorCommand(self, command, timeout):
"""Runs an inspector command.
Raises:
exceptions.WebSocketDisconnected
websocket.WebSocketException
socket.error
"""
res = self._inspector_websocket.SyncRequest(command, timeout)
return res
| bsd-3-clause |
dekomote/mezzanine-modeltranslation-backport | mezzanine/pages/templatetags/pages_tags.py | 9 | 8119 | from __future__ import unicode_literals
from future.builtins import str
from collections import defaultdict
from django.core.exceptions import ImproperlyConfigured
from django.template import Context, TemplateSyntaxError, Variable
from django.template.loader import get_template
from django.utils.translation import ugettext_lazy as _
from mezzanine.pages.models import Page
from mezzanine.utils.urls import home_slug
from mezzanine import template
register = template.Library()
@register.render_tag
def page_menu(context, token):
"""
Return a list of child pages for the given parent, storing all
pages in a dict in the context when first called using parents as keys
for retrieval on subsequent recursive calls from the menu template.
"""
# First arg could be the menu template file name, or the parent page.
# Also allow for both to be used.
template_name = None
parent_page = None
parts = token.split_contents()[1:]
for part in parts:
part = Variable(part).resolve(context)
if isinstance(part, str):
template_name = part
elif isinstance(part, Page):
parent_page = part
if template_name is None:
try:
template_name = context["menu_template_name"]
except KeyError:
error = "No template found for page_menu in: %s" % parts
raise TemplateSyntaxError(error)
context["menu_template_name"] = template_name
if "menu_pages" not in context:
try:
user = context["request"].user
slug = context["request"].path
except KeyError:
user = None
slug = ""
num_children = lambda id: lambda: len(context["menu_pages"][id])
has_children = lambda id: lambda: num_children(id)() > 0
rel = [m.__name__.lower() for m in Page.get_content_models()]
published = Page.objects.published(for_user=user).select_related(*rel)
# Store the current page being viewed in the context. Used
# for comparisons in page.set_menu_helpers.
if "page" not in context:
try:
context["_current_page"] = published.get(slug=slug)
except Page.DoesNotExist:
context["_current_page"] = None
elif slug:
context["_current_page"] = context["page"]
# Some homepage related context flags. on_home is just a helper
# indicated we're on the homepage. has_home indicates an actual
# page object exists for the homepage, which can be used to
# determine whether or not to show a hard-coded homepage link
# in the page menu.
home = home_slug()
context["on_home"] = slug == home
context["has_home"] = False
# Maintain a dict of page IDs -> parent IDs for fast
# lookup in setting page.is_current_or_ascendant in
# page.set_menu_helpers.
context["_parent_page_ids"] = {}
pages = defaultdict(list)
for page in published.order_by("_order"):
page.set_helpers(context)
context["_parent_page_ids"][page.id] = page.parent_id
setattr(page, "num_children", num_children(page.id))
setattr(page, "has_children", has_children(page.id))
pages[page.parent_id].append(page)
if page.slug == home:
context["has_home"] = True
context["menu_pages"] = pages
# ``branch_level`` must be stored against each page so that the
# calculation of it is correctly applied. This looks weird but if we do
# the ``branch_level`` as a separate arg to the template tag with the
# addition performed on it, the addition occurs each time the template
# tag is called rather than once per level.
context["branch_level"] = 0
parent_page_id = None
if parent_page is not None:
context["branch_level"] = getattr(parent_page, "branch_level", 0) + 1
parent_page_id = parent_page.id
# Build the ``page_branch`` template variable, which is the list of
# pages for the current parent. Here we also assign the attributes
# to the page object that determines whether it belongs in the
# current menu template being rendered.
context["page_branch"] = context["menu_pages"].get(parent_page_id, [])
context["page_branch_in_menu"] = False
for page in context["page_branch"]:
page.in_menu = page.in_menu_template(template_name)
page.num_children_in_menu = 0
if page.in_menu:
context["page_branch_in_menu"] = True
for child in context["menu_pages"].get(page.id, []):
if child.in_menu_template(template_name):
page.num_children_in_menu += 1
page.has_children_in_menu = page.num_children_in_menu > 0
page.branch_level = context["branch_level"]
page.parent = parent_page
context["parent_page"] = page.parent
# Prior to pages having the ``in_menus`` field, pages had two
# boolean fields ``in_navigation`` and ``in_footer`` for
# controlling menu inclusion. Attributes and variables
# simulating these are maintained here for backwards
# compatibility in templates, but will be removed eventually.
page.in_navigation = page.in_menu
page.in_footer = not (not page.in_menu and "footer" in template_name)
if page.in_navigation:
context["page_branch_in_navigation"] = True
if page.in_footer:
context["page_branch_in_footer"] = True
t = get_template(template_name)
return t.render(Context(context))
@register.as_tag
def models_for_pages(*args):
"""
Create a select list containing each of the models that subclass the
``Page`` model.
"""
from warnings import warn
warn("template tag models_for_pages is deprectaed, use "
"PageAdmin.get_content_models instead")
from mezzanine.pages.admin import PageAdmin
return PageAdmin.get_content_models()
@register.render_tag
def set_model_permissions(context, token):
"""
Assigns a permissions dict to the given model, much like Django
does with its dashboard app list.
Used within the change list for pages, to implement permission
checks for the navigation tree.
"""
model = context[token.split_contents()[1]]
opts = model._meta
perm_name = opts.app_label + ".%s_" + opts.object_name.lower()
request = context["request"]
setattr(model, "perms", {})
for perm_type in ("add", "change", "delete"):
model.perms[perm_type] = request.user.has_perm(perm_name % perm_type)
return ""
@register.render_tag
def set_page_permissions(context, token):
"""
Assigns a permissions dict to the given page instance, combining
Django's permission for the page's model and a permission check
against the instance itself calling the page's ``can_add``,
``can_change`` and ``can_delete`` custom methods.
Used within the change list for pages, to implement permission
checks for the navigation tree.
"""
page = context[token.split_contents()[1]]
model = page.get_content_model()
try:
opts = model._meta
except AttributeError:
if model is None:
error = _("Could not load the model for the following page, "
"was it removed?")
obj = page
else:
# A missing inner Meta class usually means the Page model
# hasn't been directly subclassed.
error = _("An error occured with the following class. Does "
"it subclass Page directly?")
obj = model.__class__.__name__
raise ImproperlyConfigured(error + " '%s'" % obj)
perm_name = opts.app_label + ".%s_" + opts.object_name.lower()
request = context["request"]
setattr(page, "perms", {})
for perm_type in ("add", "change", "delete"):
perm = request.user.has_perm(perm_name % perm_type)
perm = perm and getattr(model, "can_%s" % perm_type)(request)
page.perms[perm_type] = perm
return ""
| bsd-2-clause |
gangadhar-kadam/latestchurcherp | erpnext/shopping_cart/doctype/shopping_cart_settings/shopping_cart_settings.py | 6 | 8588 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, msgprint
from frappe.utils import comma_and
from frappe.model.document import Document
from frappe.utils.nestedset import get_ancestors_of
from erpnext.utilities.doctype.address.address import get_territory_from_address
class ShoppingCartSetupError(frappe.ValidationError): pass
class ShoppingCartSettings(Document):
def onload(self):
self.get("__onload").quotation_series = frappe.get_meta("Quotation").get_options("naming_series")
def validate(self):
if self.enabled:
self.validate_price_lists()
self.validate_tax_masters()
self.validate_exchange_rates_exist()
def on_update(self):
frappe.db.set_default("shopping_cart_enabled", self.get("enabled") or 0)
frappe.db.set_default("shopping_cart_quotation_series", self.get("quotation_series"))
def validate_overlapping_territories(self, parentfield, fieldname):
# for displaying message
doctype = self.meta.get_field(parentfield).options
# specify atleast one entry in the table
self.validate_table_has_rows(parentfield, raise_exception=ShoppingCartSetupError)
territory_name_map = self.get_territory_name_map(parentfield, fieldname)
for territory, names in territory_name_map.items():
if len(names) > 1:
frappe.throw(_("{0} {1} has a common territory {2}").format(_(doctype), comma_and(names), territory), ShoppingCartSetupError)
return territory_name_map
def validate_price_lists(self):
territory_name_map = self.validate_overlapping_territories("price_lists", "selling_price_list")
# validate that a Shopping Cart Price List exists for the default territory as a catch all!
price_list_for_default_territory = self.get_name_from_territory(self.default_territory, "price_lists",
"selling_price_list")
if not price_list_for_default_territory:
msgprint(_("Please specify a Price List which is valid for Territory") +
": " + self.default_territory, raise_exception=ShoppingCartSetupError)
def validate_tax_masters(self):
self.validate_overlapping_territories("sales_taxes_and_charges_masters",
"sales_taxes_and_charges_master")
def get_territory_name_map(self, parentfield, fieldname):
territory_name_map = {}
# entries in table
names = [doc.get(fieldname) for doc in self.get(parentfield)]
if names:
# for condition in territory check
parenttype = frappe.get_meta(self.meta.get_options(parentfield)).get_options(fieldname)
# to validate territory overlap
# make a map of territory: [list of names]
# if list against each territory has more than one element, raise exception
territory_name = frappe.db.sql("""select `territory`, `parent`
from `tabApplicable Territory`
where `parenttype`=%s and `parent` in (%s)""" %
("%s", ", ".join(["%s"]*len(names))), tuple([parenttype] + names))
for territory, name in territory_name:
territory_name_map.setdefault(territory, []).append(name)
if len(territory_name_map[territory]) > 1:
territory_name_map[territory].sort(key=lambda val: names.index(val))
return territory_name_map
def validate_exchange_rates_exist(self):
"""check if exchange rates exist for all Price List currencies (to company's currency)"""
company_currency = frappe.db.get_value("Company", self.company, "default_currency")
if not company_currency:
msgprint(_("Please specify currency in Company") + ": " + self.company,
raise_exception=ShoppingCartSetupError)
price_list_currency_map = frappe.db.get_values("Price List",
[d.selling_price_list for d in self.get("price_lists")],
"currency")
# check if all price lists have a currency
for price_list, currency in price_list_currency_map.items():
if not currency:
frappe.throw(_("Currency is required for Price List {0}").format(price_list))
expected_to_exist = [currency + "-" + company_currency
for currency in price_list_currency_map.values()
if currency != company_currency]
if expected_to_exist:
exists = frappe.db.sql_list("""select name from `tabCurrency Exchange`
where name in (%s)""" % (", ".join(["%s"]*len(expected_to_exist)),),
tuple(expected_to_exist))
missing = list(set(expected_to_exist).difference(exists))
if missing:
msgprint(_("Missing Currency Exchange Rates for {0}").format(comma_and(missing)),
raise_exception=ShoppingCartSetupError)
def get_name_from_territory(self, territory, parentfield, fieldname):
name = None
territory_name_map = self.get_territory_name_map(parentfield, fieldname)
if territory_name_map.get(territory):
name = territory_name_map.get(territory)
else:
territory_ancestry = self.get_territory_ancestry(territory)
for ancestor in territory_ancestry:
if territory_name_map.get(ancestor):
name = territory_name_map.get(ancestor)
break
return name
def get_price_list(self, billing_territory):
price_list = self.get_name_from_territory(billing_territory, "price_lists", "selling_price_list")
return price_list and price_list[0] or None
def get_tax_master(self, billing_territory):
tax_master = self.get_name_from_territory(billing_territory, "sales_taxes_and_charges_masters",
"sales_taxes_and_charges_master")
return tax_master and tax_master[0] or None
def get_shipping_rules(self, shipping_territory):
return self.get_name_from_territory(shipping_territory, "shipping_rules", "shipping_rule")
def get_territory_ancestry(self, territory):
if not hasattr(self, "_territory_ancestry"):
self._territory_ancestry = {}
if not self._territory_ancestry.get(territory):
self._territory_ancestry[territory] = get_ancestors_of("Territory", territory)
return self._territory_ancestry[territory]
def validate_cart_settings(doc, method):
frappe.get_doc("Shopping Cart Settings", "Shopping Cart Settings").run_method("validate")
def get_shopping_cart_settings():
if not getattr(frappe.local, "shopping_cart_settings", None):
frappe.local.shopping_cart_settings = frappe.get_doc("Shopping Cart Settings", "Shopping Cart Settings")
return frappe.local.shopping_cart_settings
def is_cart_enabled():
return get_shopping_cart_settings().enabled
def get_default_territory():
return get_shopping_cart_settings().default_territory
def check_shopping_cart_enabled():
if not get_shopping_cart_settings().enabled:
frappe.throw(_("You need to enable Shopping Cart"), ShoppingCartSetupError)
def apply_shopping_cart_settings(quotation, method):
"""Called via a validate hook on Quotation"""
from erpnext.shopping_cart import get_party
if quotation.order_type != "Shopping Cart":
return
quotation.billing_territory = (get_territory_from_address(quotation.customer_address)
or get_party(quotation.contact_email).territory or get_default_territory())
quotation.shipping_territory = (get_territory_from_address(quotation.shipping_address_name)
or get_party(quotation.contact_email).territory or get_default_territory())
set_price_list(quotation)
set_taxes_and_charges(quotation)
quotation.calculate_taxes_and_totals()
set_shipping_rule(quotation)
def set_price_list(quotation):
previous_selling_price_list = quotation.selling_price_list
quotation.selling_price_list = get_shopping_cart_settings().get_price_list(quotation.billing_territory)
if not quotation.selling_price_list:
quotation.selling_price_list = get_shopping_cart_settings().get_price_list(get_default_territory())
if previous_selling_price_list != quotation.selling_price_list:
quotation.price_list_currency = quotation.currency = quotation.plc_conversion_rate = quotation.conversion_rate = None
for d in quotation.get("items"):
d.price_list_rate = d.discount_percentage = d.rate = d.amount = None
quotation.set_price_list_and_item_details()
def set_taxes_and_charges(quotation):
previous_taxes_and_charges = quotation.taxes_and_charges
quotation.taxes_and_charges = get_shopping_cart_settings().get_tax_master(quotation.billing_territory)
if previous_taxes_and_charges != quotation.taxes_and_charges:
quotation.set_other_charges()
def set_shipping_rule(quotation):
shipping_rules = get_shopping_cart_settings().get_shipping_rules(quotation.shipping_territory)
if not shipping_rules:
quotation.remove_shipping_charge()
return
if quotation.shipping_rule not in shipping_rules:
quotation.remove_shipping_charge()
quotation.shipping_rule = shipping_rules[0]
quotation.apply_shipping_rule()
| agpl-3.0 |
developerworks/scrapy | scrapy/tests/test_squeue.py | 24 | 4074 | from scrapy.tests import test_utils_queue as t
from scrapy.squeue import MarshalFifoDiskQueue, MarshalLifoDiskQueue, PickleFifoDiskQueue, PickleLifoDiskQueue
from scrapy.item import Item, Field
from scrapy.http import Request
from scrapy.contrib.loader import ItemLoader
class TestItem(Item):
name = Field()
def test_processor(x):
return x + x
class TestLoader(ItemLoader):
default_item_class = TestItem
name_out = staticmethod(test_processor)
class MarshalFifoDiskQueueTest(t.FifoDiskQueueTest):
chunksize = 100000
def queue(self):
return MarshalFifoDiskQueue(self.qdir, chunksize=self.chunksize)
def test_serialize(self):
q = self.queue()
q.push('a')
q.push(123)
q.push({'a': 'dict'})
self.assertEqual(q.pop(), 'a')
self.assertEqual(q.pop(), 123)
self.assertEqual(q.pop(), {'a': 'dict'})
def test_nonserializable_object(self):
q = self.queue()
self.assertRaises(ValueError, q.push, lambda x: x)
class ChunkSize1MarshalFifoDiskQueueTest(MarshalFifoDiskQueueTest):
chunksize = 1
class ChunkSize2MarshalFifoDiskQueueTest(MarshalFifoDiskQueueTest):
chunksize = 2
class ChunkSize3MarshalFifoDiskQueueTest(MarshalFifoDiskQueueTest):
chunksize = 3
class ChunkSize4MarshalFifoDiskQueueTest(MarshalFifoDiskQueueTest):
chunksize = 4
class PickleFifoDiskQueueTest(MarshalFifoDiskQueueTest):
chunksize = 100000
def queue(self):
return PickleFifoDiskQueue(self.qdir, chunksize=self.chunksize)
def test_serialize_item(self):
q = self.queue()
i = TestItem(name='foo')
q.push(i)
i2 = q.pop()
assert isinstance(i2, TestItem)
self.assertEqual(i, i2)
def test_serialize_loader(self):
q = self.queue()
l = TestLoader()
q.push(l)
l2 = q.pop()
assert isinstance(l2, TestLoader)
assert l2.default_item_class is TestItem
self.assertEqual(l2.name_out('x'), 'xx')
def test_serialize_request_recursive(self):
q = self.queue()
r = Request('http://www.example.com')
r.meta['request'] = r
q.push(r)
r2 = q.pop()
assert isinstance(r2, Request)
self.assertEqual(r.url, r2.url)
assert r2.meta['request'] is r2
class ChunkSize1PickleFifoDiskQueueTest(PickleFifoDiskQueueTest):
chunksize = 1
class ChunkSize2PickleFifoDiskQueueTest(PickleFifoDiskQueueTest):
chunksize = 2
class ChunkSize3PickleFifoDiskQueueTest(PickleFifoDiskQueueTest):
chunksize = 3
class ChunkSize4PickleFifoDiskQueueTest(PickleFifoDiskQueueTest):
chunksize = 4
class MarshalLifoDiskQueueTest(t.LifoDiskQueueTest):
def queue(self):
return MarshalLifoDiskQueue(self.path)
def test_serialize(self):
q = self.queue()
q.push('a')
q.push(123)
q.push({'a': 'dict'})
self.assertEqual(q.pop(), {'a': 'dict'})
self.assertEqual(q.pop(), 123)
self.assertEqual(q.pop(), 'a')
def test_nonserializable_object(self):
q = self.queue()
self.assertRaises(ValueError, q.push, lambda x: x)
class PickleLifoDiskQueueTest(MarshalLifoDiskQueueTest):
def queue(self):
return PickleLifoDiskQueue(self.path)
def test_serialize_item(self):
q = self.queue()
i = TestItem(name='foo')
q.push(i)
i2 = q.pop()
assert isinstance(i2, TestItem)
self.assertEqual(i, i2)
def test_serialize_loader(self):
q = self.queue()
l = TestLoader()
q.push(l)
l2 = q.pop()
assert isinstance(l2, TestLoader)
assert l2.default_item_class is TestItem
self.assertEqual(l2.name_out('x'), 'xx')
def test_serialize_request_recursive(self):
q = self.queue()
r = Request('http://www.example.com')
r.meta['request'] = r
q.push(r)
r2 = q.pop()
assert isinstance(r2, Request)
self.assertEqual(r.url, r2.url)
assert r2.meta['request'] is r2
| bsd-3-clause |
suneeth51/neutron | neutron/tests/functional/test_service.py | 30 | 1108 | # Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency import processutils
from oslo_config import cfg
from neutron import service
from neutron.tests import base
class TestService(base.BaseTestCase):
def test_api_workers_default(self):
self.assertEqual(processutils.get_worker_count(),
service._get_api_workers())
def test_api_workers_from_config(self):
cfg.CONF.set_override('api_workers', 1234)
self.assertEqual(1234,
service._get_api_workers())
| apache-2.0 |
NetApp/cinder | cinder/scheduler/filters/__init__.py | 9 | 1373 | # Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Scheduler host filters
"""
from cinder.scheduler import base_filter
class BaseHostFilter(base_filter.BaseFilter):
"""Base class for host filters."""
def _filter_one(self, obj, filter_properties):
"""Return True if the object passes the filter, otherwise False."""
return self.host_passes(obj, filter_properties)
def host_passes(self, host_state, filter_properties):
"""Return True if the HostState passes the filter, otherwise False.
Override this in a subclass.
"""
raise NotImplementedError()
class HostFilterHandler(base_filter.BaseFilterHandler):
def __init__(self, namespace):
super(HostFilterHandler, self).__init__(BaseHostFilter, namespace)
| apache-2.0 |
flyfei/python-for-android | python-build/python-libs/gdata/tests/gdata_tests/data_test.py | 87 | 20692 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import gdata.data
from gdata import test_data
import gdata.test_config as conf
import atom.core
import atom.data
SIMPLE_V2_FEED_TEST_DATA = """<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:gd='http://schemas.google.com/g/2005'
gd:etag='W/"CUMBRHo_fip7ImA9WxRbGU0."'>
<title>Elizabeth Bennet's Contacts</title>
<link rel='next' type='application/atom+xml'
href='http://www.google.com/m8/feeds/contacts/.../more' />
<entry gd:etag='"Qn04eTVSLyp7ImA9WxRbGEUORAQ."'>
<id>http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base/c9e</id>
<title>Fitzwilliam</title>
<link rel='http://schemas.google.com/contacts/2008/rel#photo'
type='image/*'
href='http://www.google.com/m8/feeds/photos/media/liz%40gmail.com/c9e'
gd:etag='"KTlcZWs1bCp7ImBBPV43VUV4LXEZCXERZAc."' />
<link rel='self' type='application/atom+xml'
href='Changed to ensure we are really getting the edit URL.'/>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/m8/feeds/contacts/liz%40gmail.com/full/c9e'/>
</entry>
<entry gd:etag='"123456"'>
<link rel='edit' href='http://example.com/1' />
</entry>
</feed>"""
XML_ENTRY_1 = """<?xml version='1.0'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:g='http://base.google.com/ns/1.0'>
<category scheme="http://base.google.com/categories/itemtypes"
term="products"/>
<id> http://www.google.com/test/id/url </id>
<title type='text'>Testing 2000 series laptop</title>
<content type='xhtml'>
<div xmlns='http://www.w3.org/1999/xhtml'>A Testing Laptop</div>
</content>
<link rel='alternate' type='text/html'
href='http://www.provider-host.com/123456789'/>
<link rel='license'
href='http://creativecommons.org/licenses/by-nc/2.5/rdf'/>
<g:label>Computer</g:label>
<g:label>Laptop</g:label>
<g:label>testing laptop</g:label>
<g:item_type>products</g:item_type>
</entry>"""
def parse(xml_string, target_class):
"""Convenience wrapper for converting an XML string to an XmlElement."""
return atom.core.xml_element_from_string(xml_string, target_class)
class StartIndexTest(unittest.TestCase):
def setUp(self):
self.start_index = gdata.data.StartIndex()
def testToAndFromString(self):
self.start_index.text = '1'
self.assert_(self.start_index.text == '1')
new_start_index = parse(self.start_index.ToString(),
gdata.data.StartIndex)
self.assert_(self.start_index.text == new_start_index.text)
class ItemsPerPageTest(unittest.TestCase):
def setUp(self):
self.items_per_page = gdata.data.ItemsPerPage()
def testToAndFromString(self):
self.items_per_page.text = '10'
self.assert_(self.items_per_page.text == '10')
new_items_per_page = parse(self.items_per_page.ToString(),
gdata.data.ItemsPerPage)
self.assert_(self.items_per_page.text == new_items_per_page.text)
class GDataEntryTest(unittest.TestCase):
def testIdShouldBeCleaned(self):
entry = parse(XML_ENTRY_1, gdata.data.GDEntry)
tree = parse(XML_ENTRY_1, atom.core.XmlElement)
self.assertFalse(tree.get_elements('id',
'http://www.w3.org/2005/Atom' == entry.id.text))
self.assertEqual(entry.get_id(), 'http://www.google.com/test/id/url')
def testGeneratorShouldBeCleaned(self):
feed = parse(test_data.GBASE_FEED, gdata.data.GDFeed)
tree = parse(test_data.GBASE_FEED, atom.core.XmlElement)
self.assertFalse(tree.get_elements('generator',
'http://www.w3.org/2005/Atom')[0].text == feed.get_generator())
self.assertEqual(feed.get_generator(), 'GoogleBase')
def testAllowsEmptyId(self):
entry = gdata.data.GDEntry()
try:
entry.id = atom.data.Id()
except AttributeError:
self.fail('Empty id should not raise an attribute error.')
class LinkFinderTest(unittest.TestCase):
def setUp(self):
self.entry = parse(XML_ENTRY_1, gdata.data.GDEntry)
def testLinkFinderGetsLicenseLink(self):
self.assertEquals(isinstance(self.entry.FindLicenseLink(), str),
True)
self.assertEquals(self.entry.FindLicenseLink(),
'http://creativecommons.org/licenses/by-nc/2.5/rdf')
def testLinkFinderGetsAlternateLink(self):
self.assertTrue(isinstance(self.entry.FindAlternateLink(), str))
self.assertEquals(self.entry.FindAlternateLink(),
'http://www.provider-host.com/123456789')
class GDataFeedTest(unittest.TestCase):
def testCorrectConversionToElementTree(self):
test_feed = parse(test_data.GBASE_FEED, gdata.data.GDFeed)
self.assert_(test_feed.total_results is not None)
self.assertTrue(test_feed.get_elements('totalResults',
'http://a9.com/-/spec/opensearchrss/1.0/') is not None)
self.assertTrue(len(test_feed.get_elements('totalResults',
'http://a9.com/-/spec/opensearchrss/1.0/')) > 0)
def testAllowsEmptyId(self):
feed = gdata.data.GDFeed()
try:
feed.id = atom.data.Id()
except AttributeError:
self.fail('Empty id should not raise an attribute error.')
class BatchEntryTest(unittest.TestCase):
def testCorrectConversionFromAndToString(self):
batch_entry = parse(test_data.BATCH_ENTRY, gdata.data.BatchEntry)
self.assertEquals(batch_entry.batch_id.text, 'itemB')
self.assertEquals(batch_entry.id.text,
'http://www.google.com/base/feeds/items/'
'2173859253842813008')
self.assertEquals(batch_entry.batch_operation.type, 'insert')
self.assertEquals(batch_entry.batch_status.code, '201')
self.assertEquals(batch_entry.batch_status.reason, 'Created')
new_entry = parse(str(batch_entry), gdata.data.BatchEntry)
self.assertEquals(batch_entry.batch_id.text, new_entry.batch_id.text)
self.assertEquals(batch_entry.id.text, new_entry.id.text)
self.assertEquals(batch_entry.batch_operation.type,
new_entry.batch_operation.type)
self.assertEquals(batch_entry.batch_status.code,
new_entry.batch_status.code)
self.assertEquals(batch_entry.batch_status.reason,
new_entry.batch_status.reason)
class BatchFeedTest(unittest.TestCase):
def setUp(self):
self.batch_feed = gdata.data.BatchFeed()
self.example_entry = gdata.data.BatchEntry(
id=atom.data.Id(text='http://example.com/1'), text='This is a test')
def testConvertRequestFeed(self):
batch_feed = parse(test_data.BATCH_FEED_REQUEST, gdata.data.BatchFeed)
self.assertEquals(len(batch_feed.entry), 4)
for entry in batch_feed.entry:
self.assert_(isinstance(entry, gdata.data.BatchEntry))
self.assertEquals(batch_feed.title.text, 'My Batch Feed')
new_feed = parse(batch_feed.to_string(), gdata.data.BatchFeed)
self.assertEquals(len(new_feed.entry), 4)
for entry in new_feed.entry:
self.assert_(isinstance(entry, gdata.data.BatchEntry))
self.assertEquals(new_feed.title.text, 'My Batch Feed')
def testConvertResultFeed(self):
batch_feed = parse(test_data.BATCH_FEED_RESULT, gdata.data.BatchFeed)
self.assertEquals(len(batch_feed.entry), 4)
for entry in batch_feed.entry:
self.assert_(isinstance(entry, gdata.data.BatchEntry))
if entry.id.text == ('http://www.google.com/base/feeds/items/'
'2173859253842813008'):
self.assertEquals(entry.batch_operation.type, 'insert')
self.assertEquals(entry.batch_id.text, 'itemB')
self.assertEquals(entry.batch_status.code, '201')
self.assertEquals(entry.batch_status.reason, 'Created')
self.assertEquals(batch_feed.title.text, 'My Batch')
new_feed = parse(str(batch_feed), gdata.data.BatchFeed)
self.assertEquals(len(new_feed.entry), 4)
for entry in new_feed.entry:
self.assert_(isinstance(entry, gdata.data.BatchEntry))
if entry.id.text == ('http://www.google.com/base/feeds/items/'
'2173859253842813008'):
self.assertEquals(entry.batch_operation.type, 'insert')
self.assertEquals(entry.batch_id.text, 'itemB')
self.assertEquals(entry.batch_status.code, '201')
self.assertEquals(entry.batch_status.reason, 'Created')
self.assertEquals(new_feed.title.text, 'My Batch')
def testAddBatchEntry(self):
try:
self.batch_feed.AddBatchEntry(batch_id_string='a')
self.fail('AddBatchEntry with neither entry or URL should raise Error')
except gdata.data.MissingRequiredParameters:
pass
new_entry = self.batch_feed.AddBatchEntry(
id_url_string='http://example.com/1')
self.assertEquals(len(self.batch_feed.entry), 1)
self.assertEquals(self.batch_feed.entry[0].get_id(),
'http://example.com/1')
self.assertEquals(self.batch_feed.entry[0].batch_id.text, '0')
self.assertEquals(new_entry.id.text, 'http://example.com/1')
self.assertEquals(new_entry.batch_id.text, '0')
to_add = gdata.data.BatchEntry(id=atom.data.Id(text='originalId'))
new_entry = self.batch_feed.AddBatchEntry(entry=to_add,
batch_id_string='foo')
self.assertEquals(new_entry.batch_id.text, 'foo')
self.assertEquals(new_entry.id.text, 'originalId')
to_add = gdata.data.BatchEntry(id=atom.data.Id(text='originalId'),
batch_id=gdata.data.BatchId(text='bar'))
new_entry = self.batch_feed.AddBatchEntry(entry=to_add,
id_url_string='newId',
batch_id_string='foo')
self.assertEquals(new_entry.batch_id.text, 'foo')
self.assertEquals(new_entry.id.text, 'originalId')
to_add = gdata.data.BatchEntry(id=atom.data.Id(text='originalId'),
batch_id=gdata.data.BatchId(text='bar'))
new_entry = self.batch_feed.AddBatchEntry(entry=to_add,
id_url_string='newId')
self.assertEquals(new_entry.batch_id.text, 'bar')
self.assertEquals(new_entry.id.text, 'originalId')
to_add = gdata.data.BatchEntry(id=atom.data.Id(text='originalId'),
batch_id=gdata.data.BatchId(text='bar'),
batch_operation=gdata.data.BatchOperation(
type=gdata.data.BATCH_INSERT))
self.assertEquals(to_add.batch_operation.type, gdata.data.BATCH_INSERT)
new_entry = self.batch_feed.AddBatchEntry(entry=to_add,
id_url_string='newId', batch_id_string='foo',
operation_string=gdata.data.BATCH_UPDATE)
self.assertEquals(new_entry.batch_operation.type, gdata.data.BATCH_UPDATE)
def testAddInsert(self):
first_entry = gdata.data.BatchEntry(
id=atom.data.Id(text='http://example.com/1'), text='This is a test1')
self.batch_feed.AddInsert(first_entry)
self.assertEquals(self.batch_feed.entry[0].batch_operation.type,
gdata.data.BATCH_INSERT)
self.assertEquals(self.batch_feed.entry[0].batch_id.text, '0')
second_entry = gdata.data.BatchEntry(
id=atom.data.Id(text='http://example.com/2'), text='This is a test2')
self.batch_feed.AddInsert(second_entry, batch_id_string='foo')
self.assertEquals(self.batch_feed.entry[1].batch_operation.type,
gdata.data.BATCH_INSERT)
self.assertEquals(self.batch_feed.entry[1].batch_id.text, 'foo')
third_entry = gdata.data.BatchEntry(
id=atom.data.Id(text='http://example.com/3'), text='This is a test3')
third_entry.batch_operation = gdata.data.BatchOperation(
type=gdata.data.BATCH_DELETE)
# Add an entry with a delete operation already assigned.
self.batch_feed.AddInsert(third_entry)
# The batch entry should not have the original operation, it should
# have been changed to an insert.
self.assertEquals(self.batch_feed.entry[2].batch_operation.type,
gdata.data.BATCH_INSERT)
self.assertEquals(self.batch_feed.entry[2].batch_id.text, '2')
def testAddDelete(self):
# Try deleting an entry
delete_entry = gdata.data.BatchEntry(
id=atom.data.Id(text='http://example.com/1'), text='This is a test')
self.batch_feed.AddDelete(entry=delete_entry)
self.assertEquals(self.batch_feed.entry[0].batch_operation.type,
gdata.data.BATCH_DELETE)
self.assertEquals(self.batch_feed.entry[0].get_id(),
'http://example.com/1')
self.assertEquals(self.batch_feed.entry[0].text, 'This is a test')
# Try deleting a URL
self.batch_feed.AddDelete(url_string='http://example.com/2')
self.assertEquals(self.batch_feed.entry[0].batch_operation.type,
gdata.data.BATCH_DELETE)
self.assertEquals(self.batch_feed.entry[1].id.text,
'http://example.com/2')
self.assert_(self.batch_feed.entry[1].text is None)
def testAddQuery(self):
# Try querying with an existing batch entry
delete_entry = gdata.data.BatchEntry(
id=atom.data.Id(text='http://example.com/1'))
self.batch_feed.AddQuery(entry=delete_entry)
self.assertEquals(self.batch_feed.entry[0].batch_operation.type,
gdata.data.BATCH_QUERY)
self.assertEquals(self.batch_feed.entry[0].get_id(),
'http://example.com/1')
# Try querying a URL
self.batch_feed.AddQuery(url_string='http://example.com/2')
self.assertEquals(self.batch_feed.entry[0].batch_operation.type,
gdata.data.BATCH_QUERY)
self.assertEquals(self.batch_feed.entry[1].id.text,
'http://example.com/2')
def testAddUpdate(self):
# Try updating an entry
delete_entry = gdata.data.BatchEntry(
id=atom.data.Id(text='http://example.com/1'), text='This is a test')
self.batch_feed.AddUpdate(entry=delete_entry)
self.assertEquals(self.batch_feed.entry[0].batch_operation.type,
gdata.data.BATCH_UPDATE)
self.assertEquals(self.batch_feed.entry[0].get_id(),
'http://example.com/1')
self.assertEquals(self.batch_feed.entry[0].text, 'This is a test')
class ExtendedPropertyTest(unittest.TestCase):
def testXmlBlobRoundTrip(self):
ep = gdata.data.ExtendedProperty(name='blobby')
ep.SetXmlBlob('<some_xml attr="test"/>')
extension = ep.GetXmlBlob()
self.assertEquals(extension.tag, 'some_xml')
self.assert_(extension.namespace is None)
self.assertEquals(extension.attributes['attr'], 'test')
ep2 = parse(ep.ToString(), gdata.data.ExtendedProperty)
extension = ep2.GetXmlBlob()
self.assertEquals(extension.tag, 'some_xml')
self.assert_(extension.namespace is None)
self.assertEquals(extension.attributes['attr'], 'test')
def testGettersShouldReturnNoneWithNoBlob(self):
ep = gdata.data.ExtendedProperty(name='no blob')
self.assert_(ep.GetXmlBlob() is None)
def testGettersReturnCorrectTypes(self):
ep = gdata.data.ExtendedProperty(name='has blob')
ep.SetXmlBlob('<some_xml attr="test"/>')
self.assert_(isinstance(ep.GetXmlBlob(),
atom.core.XmlElement))
self.assert_(isinstance(ep.GetXmlBlob().to_string(), str))
class FeedLinkTest(unittest.TestCase):
def testCorrectFromStringType(self):
link = parse(
'<feedLink xmlns="http://schemas.google.com/g/2005" countHint="5"/>',
gdata.data.FeedLink)
self.assertTrue(isinstance(link, gdata.data.FeedLink))
self.assertEqual(link.count_hint, '5')
class SimpleV2FeedTest(unittest.TestCase):
def test_parsing_etags_and_edit_url(self):
feed = atom.core.parse(SIMPLE_V2_FEED_TEST_DATA, gdata.data.GDFeed)
# General parsing assertions.
self.assertEqual(feed.get_elements('title')[0].text,
'Elizabeth Bennet\'s Contacts')
self.assertEqual(len(feed.entry), 2)
for entry in feed.entry:
self.assertTrue(isinstance(entry, gdata.data.GDEntry))
self.assertEqual(feed.entry[0].GetElements('title')[0].text,
'Fitzwilliam')
self.assertEqual(feed.entry[0].get_elements('id')[0].text,
'http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base/c9e')
# ETags checks.
self.assertEqual(feed.etag, 'W/"CUMBRHo_fip7ImA9WxRbGU0."')
self.assertEqual(feed.entry[0].etag, '"Qn04eTVSLyp7ImA9WxRbGEUORAQ."')
self.assertEqual(feed.entry[1].etag, '"123456"')
# Look for Edit URLs.
self.assertEqual(feed.entry[0].find_edit_link(),
'http://www.google.com/m8/feeds/contacts/liz%40gmail.com/full/c9e')
self.assertEqual(feed.entry[1].FindEditLink(), 'http://example.com/1')
# Look for Next URLs.
self.assertEqual(feed.find_next_link(),
'http://www.google.com/m8/feeds/contacts/.../more')
def test_constructor_defauls(self):
feed = gdata.data.GDFeed()
self.assertTrue(feed.etag is None)
self.assertEqual(feed.link, [])
self.assertEqual(feed.entry, [])
entry = gdata.data.GDEntry()
self.assertTrue(entry.etag is None)
self.assertEqual(entry.link, [])
link = atom.data.Link()
self.assertTrue(link.href is None)
self.assertTrue(link.rel is None)
link1 = atom.data.Link(href='http://example.com', rel='test')
self.assertEqual(link1.href, 'http://example.com')
self.assertEqual(link1.rel, 'test')
link2 = atom.data.Link(href='http://example.org/', rel='alternate')
entry = gdata.data.GDEntry(etag='foo', link=[link1, link2])
feed = gdata.data.GDFeed(etag='12345', entry=[entry])
self.assertEqual(feed.etag, '12345')
self.assertEqual(len(feed.entry), 1)
self.assertEqual(feed.entry[0].etag, 'foo')
self.assertEqual(len(feed.entry[0].link), 2)
class DataClassSanityTest(unittest.TestCase):
def test_basic_element_structure(self):
conf.check_data_classes(self, [
gdata.data.TotalResults, gdata.data.StartIndex,
gdata.data.ItemsPerPage, gdata.data.ExtendedProperty,
gdata.data.GDEntry, gdata.data.GDFeed, gdata.data.BatchId,
gdata.data.BatchOperation, gdata.data.BatchStatus,
gdata.data.BatchEntry, gdata.data.BatchInterrupted,
gdata.data.BatchFeed, gdata.data.EntryLink, gdata.data.FeedLink,
gdata.data.AdditionalName, gdata.data.Comments, gdata.data.Country,
gdata.data.Email, gdata.data.FamilyName, gdata.data.Im,
gdata.data.GivenName, gdata.data.NamePrefix, gdata.data.NameSuffix,
gdata.data.FullName, gdata.data.Name, gdata.data.OrgDepartment,
gdata.data.OrgName, gdata.data.OrgSymbol, gdata.data.OrgTitle,
gdata.data.Organization, gdata.data.When, gdata.data.Who,
gdata.data.OriginalEvent, gdata.data.PhoneNumber,
gdata.data.PostalAddress, gdata.data.Rating, gdata.data.Recurrence,
gdata.data.RecurrenceException, gdata.data.Reminder,
gdata.data.Agent, gdata.data.HouseName, gdata.data.Street,
gdata.data.PoBox, gdata.data.Neighborhood, gdata.data.City,
gdata.data.Subregion, gdata.data.Region, gdata.data.Postcode,
gdata.data.Country, gdata.data.FormattedAddress,
gdata.data.StructuredPostalAddress, gdata.data.Where,
gdata.data.AttendeeType, gdata.data.AttendeeStatus])
def test_member_values(self):
self.assertEqual(
gdata.data.TotalResults._qname,
'{http://a9.com/-/spec/opensearch/1.1/}totalResults')
self.assertEqual(
gdata.data.RecurrenceException._qname,
'{http://schemas.google.com/g/2005}recurrenceException')
self.assertEqual(gdata.data.RecurrenceException.specialized,
'specialized')
def suite():
return conf.build_suite([StartIndexTest, StartIndexTest, GDataEntryTest,
LinkFinderTest, GDataFeedTest, BatchEntryTest, BatchFeedTest,
ExtendedPropertyTest, FeedLinkTest, SimpleV2FeedTest])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
mesosphere/dcos-kafka-service | testing/sdk_recovery.py | 2 | 2518 | import json
import logging
from typing import List, Optional
import sdk_cmd
import sdk_plan
import sdk_tasks
LOG = logging.getLogger(__name__)
def check_permanent_recovery(
package_name: str,
service_name: str,
pod_name: str,
recovery_timeout_s: int,
pods_with_updated_tasks: Optional[List[str]] = None,
) -> None:
"""
Perform a replace (permanent recovery) operation on the specified pod.
The specified pod AND any additional pods in `pods_with_updated_tasks` are
checked to ensure that their tasks have been restarted.
Any remaining pods are checked to ensure that their tasks are not changed.
For example, performing a pod replace kafka-0 on a Kafka framework should
result in ONLY the kafa-0-broker task being restarted. In this case,
pods_with_updated_tasks is specified as None.
When performing a pod replace operation on a Cassandra seed node (node-0),
a rolling restart of other nodes is triggered, and
pods_with_updated_tasks = ["node-0", "node-1", "node-2"]
(assuming a three node Cassandra ring)
"""
LOG.info("Testing pod replace operation for %s:%s", service_name, pod_name)
sdk_plan.wait_for_completed_deployment(service_name)
sdk_plan.wait_for_completed_recovery(service_name)
rc, stdout, _ = sdk_cmd.svc_cli(package_name, service_name, "pod list")
assert rc == 0, "Pod list failed"
pod_list = set(json.loads(stdout))
pods_with_updated_tasks = pods_with_updated_tasks if pods_with_updated_tasks else []
pods_to_update = set(pods_with_updated_tasks + [pod_name])
tasks_to_replace = {}
for pod in pods_to_update:
tasks_to_replace[pod] = set(sdk_tasks.get_task_ids(service_name, pod_name))
LOG.info("The following tasks will be replaced: %s", tasks_to_replace)
tasks_in_other_pods = {}
for pod in pod_list - pods_to_update:
tasks_in_other_pods[pod] = set(sdk_tasks.get_task_ids(service_name, pod))
LOG.info("Tasks in other pods should not be replaced: %s", tasks_in_other_pods)
sdk_cmd.svc_cli(package_name, service_name, "pod replace {}".format(pod_name))
sdk_plan.wait_for_kicked_off_recovery(service_name, recovery_timeout_s)
sdk_plan.wait_for_completed_recovery(service_name, recovery_timeout_s)
for pod, tasks in tasks_to_replace.items():
sdk_tasks.check_tasks_updated(service_name, pod, tasks)
for pod, tasks in tasks_in_other_pods.items():
sdk_tasks.check_tasks_not_updated(service_name, pod, tasks)
| apache-2.0 |
imosquera/spinnaker | testing/citest/tests/google_front50_test.py | 1 | 15297 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
# pylint: disable=invalid-name
import json
import logging
import sys
import urllib
from citest.base import ExecutionContext
import citest.base
import citest.gcp_testing as gcp
import citest.json_contract as jc
import citest.service_testing as st
# Spinnaker modules.
import spinnaker_testing as sk
import spinnaker_testing.front50 as front50
class GoogleFront50TestScenario(sk.SpinnakerTestScenario):
@classmethod
def new_agent(cls, bindings):
"""Implements citest.service_testing.AgentTestScenario.new_agent."""
return front50.new_agent(bindings)
@classmethod
def initArgumentParser(cls, parser, defaults=None):
"""Initialize command line argument parser.
Args:
parser: argparse.ArgumentParser
"""
parser.add_argument(
'--google_json_path', default='',
help='The path to the google service credentials JSON file.')
super(GoogleFront50TestScenario, cls).initArgumentParser(
parser, defaults=defaults)
def _do_init_bindings(self):
"""Hook to initialize custom test bindings so journaling is scoped."""
context = ExecutionContext()
config = self.agent.deployed_config
enabled = config.get('spinnaker.gcs.enabled', False)
if not enabled:
raise ValueError('spinnaker.gcs.enabled is not True')
self.BUCKET = config['spinnaker.gcs.bucket']
self.BASE_PATH = config['spinnaker.gcs.rootFolder']
self.TEST_APP = self.bindings['TEST_APP']
self.TEST_PIPELINE_NAME = 'My {app} Pipeline'.format(app=self.TEST_APP)
self.TEST_PIPELINE_ID = '{app}-pipeline-id'.format(app=self.TEST_APP)
self.gcs_observer = gcp.GcpStorageAgent.make_agent(
credentials_path=self.bindings['GCE_CREDENTIALS_PATH'],
scopes=gcp.gcp_storage_agent.STORAGE_FULL_SCOPE)
metadata = self.gcs_observer.inspect_bucket(context, self.BUCKET)
self.versioning_enabled = (metadata.get('versioning', {})
.get('enabled', False))
if not self.versioning_enabled:
self.logger.info('bucket=%s versioning enabled=%s',
self.BUCKET, self.versioning_enabled)
def __init__(self, bindings, agent=None):
"""Constructor.
Args:
bindings: [dict] The data bindings to use to configure the scenario.
agent: [Front50Agent] The agent for invoking the test operations on
Front50
"""
self.logger = logging.getLogger(__name__)
super(GoogleFront50TestScenario, self).__init__(bindings, agent)
self.app_history = []
self.pipeline_history = []
self.initial_app_spec = {
"name" : self.TEST_APP,
"description" : "My Application Description.",
"email" : "test@google.com",
"accounts" : "my-aws-account,my-google-account",
"updateTs" : "1463667655844",
"createTs" : "1463666817476",
"platformHealthOnly" : False,
"cloudProviders" : "gce,aws"
}
self.initial_pipeline_spec = {
"keepWaitingPipelines": False,
"limitConcurrent": True,
"application": self.TEST_APP,
"parallel": True,
"lastModifiedBy": "anonymous",
"name": self.TEST_PIPELINE_NAME,
"stages": [],
"index": 0,
"id": self.TEST_PIPELINE_ID,
"triggers": []
}
def create_app(self):
payload = self.agent.make_json_payload_from_object(self.initial_app_spec)
expect = dict(self.initial_app_spec)
expect['name'] = self.initial_app_spec['name'].upper()
expect['lastModifiedBy'] = 'anonymous'
contract = jc.Contract()
# Note that curiosly the updated timestamp is not adjusted in the storage
# file.
gcs_builder = gcp.GcpStorageContractBuilder(self.gcs_observer)
(gcs_builder.new_clause_builder('Created Google Cloud Storage File',
retryable_for_secs=5)
.list_bucket(self.BUCKET, '/'.join([self.BASE_PATH, 'applications']))
.contains_path_value('name', self.TEST_APP))
(gcs_builder.new_clause_builder('Wrote File Content')
.retrieve_content(self.BUCKET,
'/'.join([self.BASE_PATH, 'applications', self.TEST_APP,
'specification.json']),
transform=json.JSONDecoder().decode)
.contains_path_eq('', expect))
for clause in gcs_builder.build().clauses:
contract.add_clause(clause)
# The update timestamp is determined by the server,
# and we dont know what that is, so lets ignore it
# and assume the unit tests verify it is properly updated.
expect = dict(expect)
del expect['updateTs']
self.app_history.insert(0, expect)
f50_builder = st.http_observer.HttpContractBuilder(self.agent)
# These clauses are querying the Front50 http server directly
# to verify that it returns the application we added.
# We already verified the data was stored on GCS, but while we
# are here we will verify that it is also being returned when queried.
(f50_builder.new_clause_builder('Lists Application')
.get_url_path('/default/applications')
.contains_path_value('name', self.TEST_APP.upper()))
(f50_builder.new_clause_builder('Returns Application')
.get_url_path('/'.join(['/default/applications/name', self.TEST_APP]))
.contains_path_value('', self.app_history[0]))
for clause in f50_builder.build().clauses:
contract.add_clause(clause)
path = '/'.join(['/default/applications/name', self.TEST_APP])
return st.OperationContract(
self.new_post_operation(
title='create_app', data=payload, path=path),
contract=contract)
def update_app(self):
contract = jc.Contract()
spec = {}
for name, value in self.initial_app_spec.items():
if name == 'name':
spec[name] = value
elif name == 'cloudProviders':
spec[name] = value + ',kubernetes'
elif name in ['updateTs', 'createTs']:
spec[name] = str(int(value) + 1)
elif isinstance(value, basestring):
spec[name] = 'NEW_' + value
payload = self.agent.make_json_payload_from_object(spec)
expectUpdate = dict(spec)
# The actual update is determined by front50.
# The createTs we gave ignored.
# As before, the name is upper-cased.
del expectUpdate['updateTs']
expectUpdate['createTs'] = self.initial_app_spec['createTs']
expectUpdate['name'] = self.initial_app_spec['name'].upper()
self.app_history.insert(0, expectUpdate)
# TODO(ewiseblatt) 20160524:
# Add clauses that observe Front50 to verify the history method works
# and that the get method is the current version.
num_versions = 2 if self.versioning_enabled else 1
gcs_builder = gcp.GcpStorageContractBuilder(self.gcs_observer)
(gcs_builder.new_clause_builder('Google Cloud Storage Contains File',
retryable_for_secs=5)
.list_bucket(self.BUCKET,
'/'.join([self.BASE_PATH, 'applications', self.TEST_APP]),
with_versions=True)
.contains_path_value('name', self.TEST_APP,
min=num_versions, max=num_versions))
(gcs_builder.new_clause_builder('Updated File Content',
retryable_for_secs=5)
.retrieve_content(self.BUCKET,
'/'.join([self.BASE_PATH, 'applications', self.TEST_APP,
'specification.json']),
transform=json.JSONDecoder().decode)
.contains_path_value('', expectUpdate))
for clause in gcs_builder.build().clauses:
contract.add_clause(clause)
f50_builder = st.http_observer.HttpContractBuilder(self.agent)
(f50_builder.new_clause_builder('History Records Changes')
.get_url_path('/default/applications/{app}/history'
.format(app=self.TEST_APP))
.contains_path_value('[0]', self.app_history[0])
.contains_path_value('[1]', self.app_history[1]))
for clause in f50_builder.build().clauses:
contract.add_clause(clause)
# TODO(ewiseblatt): 20160524
# Add a mechanism here to check the previous version
# so that we can verify version recovery as well.
path = '/default/applications'
return st.OperationContract(
self.new_put_operation(
title='update_app', data=payload, path=path),
contract=contract)
def delete_app(self):
contract = jc.Contract()
app_url_path = '/'.join(['/default/applications/name', self.TEST_APP])
f50_builder = st.http_observer.HttpContractBuilder(self.agent)
(f50_builder.new_clause_builder('Unlists Application')
.get_url_path('/default/applications')
.excludes_path_value('name', self.TEST_APP.upper()))
(f50_builder.new_clause_builder('Deletes Application')
.get_url_path(app_url_path, allow_http_error_status=404))
(f50_builder.new_clause_builder('History Retains Application',
retryable_for_secs=5)
.get_url_path('/default/applications/{app}/history'
.format(app=self.TEST_APP))
.contains_path_value('[0]', self.app_history[0])
.contains_path_value('[1]', self.app_history[1]))
for clause in f50_builder.build().clauses:
contract.add_clause(clause)
gcs_builder = gcp.GcpStorageContractBuilder(self.gcs_observer)
(gcs_builder.new_clause_builder('Deleted File', retryable_for_secs=5)
.list_bucket(self.BUCKET, '/'.join([self.BASE_PATH, 'applications']))
.excludes_path_value('name', self.TEST_APP.upper()))
for clause in gcs_builder.build().clauses:
contract.add_clause(clause)
return st.OperationContract(
self.new_delete_operation(
title='delete_app', data=None, path=app_url_path),
contract=contract)
def create_pipeline(self):
payload = self.agent.make_json_payload_from_object(
self.initial_pipeline_spec)
expect = dict(self.initial_pipeline_spec)
expect['lastModifiedBy'] = 'anonymous'
self.pipeline_history.insert(0, expect)
contract = jc.Contract()
gcs_builder = gcp.GcpStorageContractBuilder(self.gcs_observer)
(gcs_builder.new_clause_builder('Created Google Cloud Storage File',
retryable_for_secs=5)
.list_bucket(self.BUCKET, '/'.join([self.BASE_PATH, 'pipelines']))
.contains_path_value('name',
'pipelines/{id}/specification.json'
.format(id=self.TEST_PIPELINE_ID)))
(gcs_builder.new_clause_builder('Wrote File Content')
.retrieve_content(self.BUCKET,
'/'.join([self.BASE_PATH, 'pipelines',
self.TEST_PIPELINE_ID, 'specification.json']),
transform=json.JSONDecoder().decode)
.contains_path_eq('', expect))
for clause in gcs_builder.build().clauses:
contract.add_clause(clause)
f50_builder = st.http_observer.HttpContractBuilder(self.agent)
# These clauses are querying the Front50 http server directly
# to verify that it returns the application we added.
# We already verified the data was stored on GCS, but while we
# are here we will verify that it is also being returned when queried.
(f50_builder.new_clause_builder('Global Lists Pipeline')
.get_url_path('/pipelines')
.contains_path_value('name', self.TEST_PIPELINE_NAME))
(f50_builder.new_clause_builder('Application Lists Pipeline')
.get_url_path('/pipelines/{app}'.format(app=self.TEST_APP))
.contains_path_value('name', self.TEST_PIPELINE_NAME))
(f50_builder.new_clause_builder('Returns Pipeline')
.get_url_path('/pipelines/{id}/history'.format(id=self.TEST_PIPELINE_ID))
.contains_path_value('[0]', self.pipeline_history[0]))
for clause in f50_builder.build().clauses:
contract.add_clause(clause)
path = '/pipelines'
return st.OperationContract(
self.new_post_operation(
title='create_pipeline', data=payload, path=path),
contract=contract)
def delete_pipeline(self):
contract = jc.Contract()
app_url_path = 'pipelines/{app}/{pipeline}'.format(
app=self.TEST_APP,
pipeline=urllib.quote(self.TEST_PIPELINE_NAME))
f50_builder = st.http_observer.HttpContractBuilder(self.agent)
(f50_builder.new_clause_builder('Global Unlists Pipeline',
retryable_for_secs=5)
.get_url_path('/pipelines')
.excludes_path_value('name', self.TEST_PIPELINE_NAME))
(f50_builder.new_clause_builder('Application Unlists Pipeline',
retryable_for_secs=5)
.get_url_path('/pipelines/{app}'.format(app=self.TEST_APP))
.excludes_path_value('id', self.TEST_PIPELINE_ID))
(f50_builder.new_clause_builder('History Retains Pipeline',
retryable_for_secs=5)
.get_url_path('/pipelines/{id}/history'.format(id=self.TEST_PIPELINE_ID))
.contains_path_value('[0]', self.pipeline_history[0]))
for clause in f50_builder.build().clauses:
contract.add_clause(clause)
gcs_builder = gcp.GcpStorageContractBuilder(self.gcs_observer)
(gcs_builder.new_clause_builder('Deleted File', retryable_for_secs=5)
.list_bucket(self.BUCKET, '/'.join([self.BASE_PATH, 'pipelines']))
.excludes_path_value('name', self.TEST_PIPELINE_ID))
for clause in gcs_builder.build().clauses:
contract.add_clause(clause)
return st.OperationContract(
self.new_delete_operation(
title='delete_pipeline', data=None, path=app_url_path),
contract=contract)
class GoogleFront50Test(st.AgentTestCase):
@property
def scenario(self):
return citest.base.TestRunner.global_runner().get_shared_data(
GoogleFront50TestScenario)
def test_a_create_app(self):
self.run_test_case(self.scenario.create_app())
def test_b_update_app(self):
self.run_test_case(self.scenario.update_app())
def test_c_create_pipeline(self):
self.run_test_case(self.scenario.create_pipeline())
def test_y_delete_pipeline(self):
self.run_test_case(self.scenario.delete_pipeline())
def test_z_delete_app(self):
self.run_test_case(self.scenario.delete_app())
def main():
"""Implements the main method running this smoke test."""
defaults = {
'TEST_APP': 'gcpfront50test' + GoogleFront50TestScenario.DEFAULT_TEST_ID
}
return citest.base.TestRunner.main(
parser_inits=[GoogleFront50TestScenario.initArgumentParser],
default_binding_overrides=defaults,
test_case_list=[GoogleFront50Test])
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
mfussenegger/cr8 | cr8/insert_json.py | 1 | 3145 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argh
import sys
from functools import partial
from argparse import FileType
from .cli import dicts_from_lines, to_int
from .misc import as_bulk_queries
from cr8 import aio, clients
from .metrics import Stats
from .log import format_stats
def to_insert(table, d):
"""Generate an insert statement using the given table and dictionary.
Args:
table (str): table name
d (dict): dictionary with column names as keys and values as values.
Returns:
tuple of statement and arguments
>>> to_insert('doc.foobar', {'name': 'Marvin'})
('insert into doc.foobar ("name") values ($1)', ['Marvin'])
"""
columns = []
args = []
for key, val in d.items():
columns.append('"{}"'.format(key))
args.append(val)
stmt = 'insert into {table} ({columns}) values ({params})'.format(
table=table,
columns=', '.join(columns),
params=', '.join(f'${i + 1}' for i in range(len(columns)))
)
return (stmt, args)
def print_only(lines, table):
for d in dicts_from_lines(lines):
print(to_insert(table, d))
print('')
print('No hosts provided. Nothing inserted')
@argh.arg('--table', help='Target table', required=True)
@argh.arg('-b', '--bulk-size', type=to_int)
@argh.arg('--hosts', help='crate hosts which will be used \
to execute the insert statement', type=str)
@argh.arg('-c', '--concurrency', type=to_int)
@argh.arg('-i', '--infile', type=FileType('r', encoding='utf-8'), default=sys.stdin)
@argh.arg('-of', '--output-fmt', choices=['json', 'text'], default='text')
@argh.wrap_errors([KeyboardInterrupt, BrokenPipeError] + clients.client_errors)
def insert_json(table=None,
bulk_size=1000,
concurrency=25,
hosts=None,
infile=None,
output_fmt=None):
"""Insert JSON lines from a file or stdin into a CrateDB cluster.
If no hosts are specified the statements will be printed.
Args:
table: Target table name.
bulk_size: Bulk size of the insert statements.
concurrency: Number of operations to run concurrently.
hosts: hostname:port pairs of the Crate nodes
"""
if not hosts:
return print_only(infile, table)
queries = (to_insert(table, d) for d in dicts_from_lines(infile))
bulk_queries = as_bulk_queries(queries, bulk_size)
print('Executing inserts: bulk_size={} concurrency={}'.format(
bulk_size, concurrency), file=sys.stderr)
stats = Stats()
with clients.client(hosts, concurrency=concurrency) as client:
f = partial(aio.measure, stats, client.execute_many)
try:
aio.run_many(f, bulk_queries, concurrency)
except clients.SqlException as e:
raise SystemExit(str(e))
try:
print(format_stats(stats.get(), output_fmt))
except KeyError:
if not stats.sampler.values:
raise SystemExit('No data received via stdin')
raise
def main():
argh.dispatch_command(insert_json)
if __name__ == '__main__':
main()
| mit |
wscullin/spack | var/spack/repos/builtin/packages/xkill/package.py | 3 | 1898 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Xkill(AutotoolsPackage):
"""xkill is a utility for forcing the X server to close connections to
clients. This program is very dangerous, but is useful for aborting
programs that have displayed undesired windows on a user's screen."""
homepage = "http://cgit.freedesktop.org/xorg/app/xkill"
url = "https://www.x.org/archive/individual/app/xkill-1.0.4.tar.gz"
version('1.0.4', 'b04c15bfd0b619f1e4ff3e44607e738d')
depends_on('libx11')
depends_on('libxmu')
depends_on('xproto@7.0.22:', type='build')
depends_on('pkg-config@0.9.0:', type='build')
depends_on('util-macros', type='build')
| lgpl-2.1 |
adrienbrault/home-assistant | homeassistant/components/goalzero/config_flow.py | 3 | 2629 | """Config flow for Goal Zero Yeti integration."""
import logging
from goalzero import Yeti, exceptions
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_NAME
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import DEFAULT_NAME, DOMAIN
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema({"host": str, "name": str})
class GoalZeroFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Goal Zero Yeti."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
errors = {}
if user_input is not None:
host = user_input[CONF_HOST]
name = user_input[CONF_NAME]
if await self._async_endpoint_existed(host):
return self.async_abort(reason="already_configured")
try:
await self._async_try_connect(host)
except exceptions.ConnectError:
errors["base"] = "cannot_connect"
_LOGGER.error("Error connecting to device at %s", host)
except exceptions.InvalidHost:
errors["base"] = "invalid_host"
_LOGGER.error("Invalid host at %s", host)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
return self.async_create_entry(
title=name,
data={CONF_HOST: host, CONF_NAME: name},
)
user_input = user_input or {}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(
CONF_HOST, default=user_input.get(CONF_HOST) or ""
): str,
vol.Optional(
CONF_NAME, default=user_input.get(CONF_NAME) or DEFAULT_NAME
): str,
}
),
errors=errors,
)
async def _async_endpoint_existed(self, endpoint):
for entry in self._async_current_entries():
if endpoint == entry.data.get(CONF_HOST):
return True
return False
async def _async_try_connect(self, host):
session = async_get_clientsession(self.hass)
api = Yeti(host, self.hass.loop, session)
await api.get_state()
| mit |
crawfordsm/saltdes | saltdes/snextract.py | 1 | 8840 |
"""
SNEXTRACT
extract supernova spectra from image or images and combine them
"""
import os, sys, glob, shutil, string
import numpy as np
import pyfits
from scipy import ndimage as nd
from pyraf import iraf
from iraf import pysalt
from specsky import skysubtract
from specextract import extract, write_extract
from specslitnormalize import specslitnormalize
from specsens import specsens
from speccal import speccal
from PySpectrograph.Spectra import findobj
def snextract(img, calfile=None):
#set up some files that will be needed
logfile='specext.log'
#create the spectra text files for all of our objects
spec_list=[]
spec_list.extend(extract_spectra(img, calfile=calfile, smooth=False, clobber=True))
print spec_list
def speccombine(spec_list, obsdate):
"""Combine N spectra"""
w1,f1, e1=np.loadtxt(spec_list[0], usecols=(0,1,2), unpack=True)
w=w1
f=1.0*f1
e=e1**2
for sfile in spec_list[1:]:
w2,f2, e2=np.loadtxt(sfile, usecols=(0,1,2), unpack=True)
if2=np.interp(w1, w2, f2)
ie2=np.interp(w1, w2, e2)
f2=f2*np.median(f1/if2)
f+=if2
e+=ie2**2
f=f/len(spec_list)
e=e**0.5/len(spec_list)
return w,f,e
def cleanspectra(w, f, e=None, dw=2, grow=6, neg=False):
"""Remove possible bad pixels"""
if e is None: e = f*0.0+f.std()
m = (w>0)
#set a few unreliable sky lines to zero
for l in [5577, 6300, 6364]:
m[abs(w-l)<dw]=0
if neg: m = m * (f>0)
#remove and grow the bad areas
m = nd.minimum_filter(m, size=grow)
return w[m],f[m],e[m]
def write_spectra(sfile, w, f, e):
fout=open(sfile, 'w')
for i in range(len(w)):
if f[i]!=np.nan:
fout.write('%f %e %e\n' % (w[i], f[i], e[i]))
fout.close()
def normalizespectra(sfile, compfile):
"""Normalize spectra by the comparison object"""
#read in the spectra
w,f,e=np.loadtxt(sfile, usecols=(0,1,2), unpack=True)
#read in the comparison spectra
cfile=sfile.replace('MCG-6-30-15', 'COMP')
print cfile
wc,fc,ec=np.loadtxt(cfile, usecols=(0,1,2), unpack=True)
#read in the base star
ws,fs,es=np.loadtxt(compfile, usecols=(0,1,2), unpack=True)
#calcualte the normalization
ifc=np.interp(ws, wc, fc)
norm=np.median(fs/ifc)
print norm
f=norm*f
e=norm*e
#write out the result
fout=open(sfile, 'w')
for i in range(len(w)):
fout.write('%f %e %e\n' % (w[i], f[i], e[i]))
fout.close()
#copy
def extract_spectra(img, yc=None, oy=10, dy=50, minsize=5, thresh=3, calfile=None, smooth=False, maskzeros=False, clobber=True):
"""Create a list of spectra for each of the objects in the images"""
#okay we need to identify the objects for extraction and identify the regions for sky extraction
#first find the objects in the image
#skynormalize the data
#specslitnormalize(img, 'n'+img, '', response=None, response_output=None, order=3, conv=1e-2, niter=20,
# startext=0, clobber=False,logfile='salt.log',verbose=True)
print img
hdu=pyfits.open(img)
target=hdu[0].header['OBJECT']
propcode=hdu[0].header['PROPID']
airmass=hdu[0].header['AIRMASS']
exptime=hdu[0].header['EXPTIME']
if smooth:
data=smooth_data(hdu[1].data)
else:
data=hdu[1].data
#replace the zeros with the average from the frame
if maskzeros:
mean,std=iterstat(data[data>0])
#rdata=mean np.random.normal(mean, std, size=data.shape)
print mean, std
data[data<=0]=mean #rdata[data<=0]
#use manual intervention to get section
#y1=517
#y2=526
#sy1=420
#sy2=500
if yc is None:
os.system('ds9 %s &' % img)
print len(hdu)
if len(hdu)==2:
print 'Using basic extraction'
if yc is None:
y1=int(raw_input('y1:'))
y2=int(raw_input('y2:'))
sy1=int(raw_input('sky y1:'))
sy2=int(raw_input('sky y2:'))
ap_list=extract(hdu, method='normal', section=[(y1,y2)], minsize=minsize, thresh=thresh, convert=True)
sk_list=extract(hdu, method='normal', section=[(sy1,sy2)], minsize=minsize, thresh=thresh, convert=True)
ap_list[0].ldata=ap_list[0].ldata-float(y2-y1)/(sy2-sy1)*sk_list[0].ldata
ofile='%s.%s_%i.txt' % (target, extract_date(img), extract_number(img))
write_extract(ofile, [ap_list[0]], outformat='ascii', clobber=clobber)
w, f, e = np.loadtxt(ofile, usecols=(0,1,2), unpack=True)
w, f, e=cleanspectra(w, f, e, neg=True)
m = (w>3900)*(w<8100)
write_spectra(ofile, w[m], f[m], e[m])
else:
print 'Using advanced extraction'
if yc is None: yc=int(raw_input('yc:'))
w0=hdu[1].header['CRVAL1']
dw=hdu[1].header['CD1_1']
xarr = np.arange(len(hdu[1].data[0]))
warr=w0+dw*xarr
print hdu[1].data[yc, 1462], hdu[2].data[yc,1462]
warr, madata, var = skysub_region(warr, hdu[1].data, hdu[2].data, hdu[3].data, yc, oy, dy)
w, f, e = masked_extract(warr, madata[yc-oy:yc+oy, :], var[yc-oy:yc+oy, :])
print yc
ofile='%s.%s_%i_%i.txt' % (target, extract_date(img), extract_number(img), yc)
write_spectra(ofile, w, f, e)
if calfile is not None:
extfile=iraf.osfn("pysalt$data/site/suth_extinct.dat")
speccal(ofile, ofile.replace("txt", "spec"), calfile, extfile, airmass, exptime, clobber=True, logfile='salt.log', verbose=True)
spec_list=[ofile, airmass, exptime, propcode]
return spec_list
def masked_extract(w, madata, var=None, grow=10):
"""Extraction of spectra from an array. The extration
returns a weighted average of an array where the weighting
is based on the distance from the center line and the
variance of the frame
"""
print w.min(), w.max()
print madata[10, 1062], var[10,1062]
ylen = len(madata)
ywei = abs((np.arange(ylen) - 0.5*ylen)) + 1
ywei = 1.0 / ywei
ywei.shape = (ylen, 1)
if var is None:
ewei = abs(f)
else:
ewei = abs(madata)/var
weights = ewei * ywei
f = np.ma.average(madata, axis=0, weights=weights)
e = np.ma.average(var/abs(madata), axis=0, weights=weights)
e = e * abs(f)
e = e**0.5 / ywei.sum()**0.5
w, f, e = cleanspectra(w, f, e, grow=grow)
return w,f,e
def skysub_region(warr, data, var, mask, yc, oy, dy, order=1, x1=670, x2=2100, grow=10):
"""Within the region of interested specified by yc-dy to yc+dy, exclude
any possible object and fit a polynomial to the remaining sky
background pixels.
"""
madata = np.ma.array(data, mask=mask)
#fit the background along each column
for i in range(x1,x2):
y = data[yc-dy:yc+dy, i]
x = np.arange(len(y))
m = (mask[yc-dy:yc+dy, i]==0) * (abs (x - 0.5*len(x)) > oy)
if y[m].any():
#g = fitdata(x[m], y[m])
#madata[yc-dy:yc+dy,i] = madata[yc-dy:yc+dy,i] - g(x)
coef = np.polyfit(x[m],y[m],1)
madata[yc-dy:yc+dy,i] = madata[yc-dy:yc+dy,i] - np.polyval(coef, x)
return warr[x1:x2], madata[:, x1:x2], var[:,x1:x2]
def smooth_data(data, mbox=25):
mdata=nd.filters.median_filter(data, size=(mbox, mbox))
return data-mdata
def find_section(section, y):
"""Find the section closest to y"""
best_i=-1
dist=1e5
for i in range(len(section)):
d=min(abs(section[i][0]-y), abs(section[i][1]-y))
if d < dist:
best_i=i
dist=d
return best_i
def extract_number(img):
"""Get the image number only"""
img=img.split('.fits')
nimg=int(img[0][-4:])
return nimg
def extract_date(img):
"""Get the date"""
img=img.split('.fits')
obsdate=int(img[0][-8:-4])
return string.zfill(obsdate,4)
def iterstat(data, thresh=3, niter=5):
mean=data.mean()
std=data.std()
for i in range(niter):
mask=(abs(data-mean)<std*thresh)
mean=data[mask].mean()
std=data[mask].std()
return mean, std
def findskysection(section, skysection=[800,900], skylimit=100):
"""Look through the section lists and determine a section to measure the sky in
It should be as close as possible to the center and about 200 pixels wide
"""
#check to make sure it doesn't overlap any existing spectra
#and adjust if necessary
for y1, y2 in section:
if -30< (skysection[1]-y1)<0:
skysection[1]=y1-30
if 0< (skysection[0]-y2)<30:
skysection[0]=y2+30
if skysection[1]-skysection[0] < skylimit: print "WARNING SMALL SKY SECTION"
return skysection
if __name__=='__main__':
snextract(sys.argv[1], calfile=sys.argv[2])
| bsd-3-clause |
muchu1983/104_cameo | cameo/spiderForINDIEGOGO.py | 1 | 3060 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2015, MuChu Hsu
Contributed by Muchu Hsu (muchu1983@gmail.com)
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
from subprocess import call
"""
以 shell script 執行 sikuli
從 parsed_resul 取得資訊
透過 sikuli 將 HTML 抓取到 source_html 下
"""
class SpiderForINDIEGOGO:
#建構子
def __init__(self):
self.dicSubCommandHandler = {
"explore":self.handleExplorePage,
"category":self.handleCategoryPage,
"project":self.handleProjectPage,
"individuals":self.handleIndividualsPage,
}
self.strSikuliFolderPath = r"cameo_sikuli\spiderForINDIEGOGO_chrome.sikuli"
#self.strSikuliFolderPath = r"cameo_sikuli\spiderForINDIEGOGO_tor.sikuli"
#下載及解析 explore 頁面
def handleExplorePage(self, arg1=None):
#download html
call(
[
r"cameo_sikuli\runsikulix.cmd", "-c",
r"-r", self.strSikuliFolderPath,
r"--args", r"explore"
]
)
#下載及解析 category 頁面
def handleCategoryPage(self, arg1=None):
if arg1:
call(
[
r"cameo_sikuli\runsikulix.cmd", "-c",
r"-r", self.strSikuliFolderPath,
r"--args", r"category", arg1
]
)
else:
call(
[
r"cameo_sikuli\runsikulix.cmd", "-c",
r"-r", self.strSikuliFolderPath,
r"--args", r"category"
]
)
#下載及解析 project 頁面
def handleProjectPage(self, arg1=None):
call(
[
r"cameo_sikuli\runsikulix.cmd", "-c",
r"-r", self.strSikuliFolderPath,
r"--args", r"project", arg1
]
)
#下載及解析 individuals 頁面
def handleIndividualsPage(self, arg1=None):
call(
[
r"cameo_sikuli\runsikulix.cmd", "-c",
r"-r", self.strSikuliFolderPath,
r"--args", r"individuals", arg1
]
)
#取得 spider 使用資訊
def getUseageMessage(self):
return (
"- INDIEGOGO -\n"
"useage:\n"
"explore - download explore.html\n"
"category [category]- download category.html [of given category]\n"
"project category - download project's pages of given category, if category==automode means all category\n"
"individuals category - download individuals's pages of given category, if category==automode means all category\n"
)
#執行 spider
def runSpider(self, lstSubcommand=None):
strSubcommand = lstSubcommand[0]
strArg1 = None
if len(lstSubcommand) == 2:
strArg1 = lstSubcommand[1]
self.dicSubCommandHandler[strSubcommand](strArg1) | bsd-3-clause |
jaja14/project4 | lib/werkzeug/testsuite/routing.py | 97 | 28826 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.routing
~~~~~~~~~~~~~~~~~~~~~~~~~~
Routing tests.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug import routing as r
from werkzeug.wrappers import Response
from werkzeug.datastructures import ImmutableDict
from werkzeug.test import create_environ
class RoutingTestCase(WerkzeugTestCase):
def test_basic_routing(self):
map = r.Map([
r.Rule('/', endpoint='index'),
r.Rule('/foo', endpoint='foo'),
r.Rule('/bar/', endpoint='bar')
])
adapter = map.bind('example.org', '/')
assert adapter.match('/') == ('index', {})
assert adapter.match('/foo') == ('foo', {})
assert adapter.match('/bar/') == ('bar', {})
self.assert_raises(r.RequestRedirect, lambda: adapter.match('/bar'))
self.assert_raises(r.NotFound, lambda: adapter.match('/blub'))
adapter = map.bind('example.org', '/test')
try:
adapter.match('/bar')
except r.RequestRedirect as e:
assert e.new_url == 'http://example.org/test/bar/'
else:
self.fail('Expected request redirect')
adapter = map.bind('example.org', '/')
try:
adapter.match('/bar')
except r.RequestRedirect as e:
assert e.new_url == 'http://example.org/bar/'
else:
self.fail('Expected request redirect')
adapter = map.bind('example.org', '/')
try:
adapter.match('/bar', query_args={'aha': 'muhaha'})
except r.RequestRedirect as e:
assert e.new_url == 'http://example.org/bar/?aha=muhaha'
else:
self.fail('Expected request redirect')
adapter = map.bind('example.org', '/')
try:
adapter.match('/bar', query_args='aha=muhaha')
except r.RequestRedirect as e:
assert e.new_url == 'http://example.org/bar/?aha=muhaha'
else:
self.fail('Expected request redirect')
adapter = map.bind_to_environ(create_environ('/bar?foo=bar',
'http://example.org/'))
try:
adapter.match()
except r.RequestRedirect as e:
assert e.new_url == 'http://example.org/bar/?foo=bar'
else:
self.fail('Expected request redirect')
def test_environ_defaults(self):
environ = create_environ("/foo")
self.assert_strict_equal(environ["PATH_INFO"], '/foo')
m = r.Map([r.Rule("/foo", endpoint="foo"), r.Rule("/bar", endpoint="bar")])
a = m.bind_to_environ(environ)
self.assert_strict_equal(a.match("/foo"), ('foo', {}))
self.assert_strict_equal(a.match(), ('foo', {}))
self.assert_strict_equal(a.match("/bar"), ('bar', {}))
self.assert_raises(r.NotFound, a.match, "/bars")
def test_environ_nonascii_pathinfo(self):
environ = create_environ(u'/лошадь')
m = r.Map([
r.Rule(u'/', endpoint='index'),
r.Rule(u'/лошадь', endpoint='horse')
])
a = m.bind_to_environ(environ)
self.assert_strict_equal(a.match(u'/'), ('index', {}))
self.assert_strict_equal(a.match(u'/лошадь'), ('horse', {}))
self.assert_raises(r.NotFound, a.match, u'/барсук')
def test_basic_building(self):
map = r.Map([
r.Rule('/', endpoint='index'),
r.Rule('/foo', endpoint='foo'),
r.Rule('/bar/<baz>', endpoint='bar'),
r.Rule('/bar/<int:bazi>', endpoint='bari'),
r.Rule('/bar/<float:bazf>', endpoint='barf'),
r.Rule('/bar/<path:bazp>', endpoint='barp'),
r.Rule('/hehe', endpoint='blah', subdomain='blah')
])
adapter = map.bind('example.org', '/', subdomain='blah')
assert adapter.build('index', {}) == 'http://example.org/'
assert adapter.build('foo', {}) == 'http://example.org/foo'
assert adapter.build('bar', {'baz': 'blub'}) == 'http://example.org/bar/blub'
assert adapter.build('bari', {'bazi': 50}) == 'http://example.org/bar/50'
assert adapter.build('barf', {'bazf': 0.815}) == 'http://example.org/bar/0.815'
assert adapter.build('barp', {'bazp': 'la/di'}) == 'http://example.org/bar/la/di'
assert adapter.build('blah', {}) == '/hehe'
self.assert_raises(r.BuildError, lambda: adapter.build('urks'))
adapter = map.bind('example.org', '/test', subdomain='blah')
assert adapter.build('index', {}) == 'http://example.org/test/'
assert adapter.build('foo', {}) == 'http://example.org/test/foo'
assert adapter.build('bar', {'baz': 'blub'}) == 'http://example.org/test/bar/blub'
assert adapter.build('bari', {'bazi': 50}) == 'http://example.org/test/bar/50'
assert adapter.build('barf', {'bazf': 0.815}) == 'http://example.org/test/bar/0.815'
assert adapter.build('barp', {'bazp': 'la/di'}) == 'http://example.org/test/bar/la/di'
assert adapter.build('blah', {}) == '/test/hehe'
def test_defaults(self):
map = r.Map([
r.Rule('/foo/', defaults={'page': 1}, endpoint='foo'),
r.Rule('/foo/<int:page>', endpoint='foo')
])
adapter = map.bind('example.org', '/')
assert adapter.match('/foo/') == ('foo', {'page': 1})
self.assert_raises(r.RequestRedirect, lambda: adapter.match('/foo/1'))
assert adapter.match('/foo/2') == ('foo', {'page': 2})
assert adapter.build('foo', {}) == '/foo/'
assert adapter.build('foo', {'page': 1}) == '/foo/'
assert adapter.build('foo', {'page': 2}) == '/foo/2'
def test_greedy(self):
map = r.Map([
r.Rule('/foo', endpoint='foo'),
r.Rule('/<path:bar>', endpoint='bar'),
r.Rule('/<path:bar>/<path:blub>', endpoint='bar')
])
adapter = map.bind('example.org', '/')
assert adapter.match('/foo') == ('foo', {})
assert adapter.match('/blub') == ('bar', {'bar': 'blub'})
assert adapter.match('/he/he') == ('bar', {'bar': 'he', 'blub': 'he'})
assert adapter.build('foo', {}) == '/foo'
assert adapter.build('bar', {'bar': 'blub'}) == '/blub'
assert adapter.build('bar', {'bar': 'blub', 'blub': 'bar'}) == '/blub/bar'
def test_path(self):
map = r.Map([
r.Rule('/', defaults={'name': 'FrontPage'}, endpoint='page'),
r.Rule('/Special', endpoint='special'),
r.Rule('/<int:year>', endpoint='year'),
r.Rule('/<path:name>', endpoint='page'),
r.Rule('/<path:name>/edit', endpoint='editpage'),
r.Rule('/<path:name>/silly/<path:name2>', endpoint='sillypage'),
r.Rule('/<path:name>/silly/<path:name2>/edit', endpoint='editsillypage'),
r.Rule('/Talk:<path:name>', endpoint='talk'),
r.Rule('/User:<username>', endpoint='user'),
r.Rule('/User:<username>/<path:name>', endpoint='userpage'),
r.Rule('/Files/<path:file>', endpoint='files'),
])
adapter = map.bind('example.org', '/')
assert adapter.match('/') == ('page', {'name':'FrontPage'})
self.assert_raises(r.RequestRedirect, lambda: adapter.match('/FrontPage'))
assert adapter.match('/Special') == ('special', {})
assert adapter.match('/2007') == ('year', {'year':2007})
assert adapter.match('/Some/Page') == ('page', {'name':'Some/Page'})
assert adapter.match('/Some/Page/edit') == ('editpage', {'name':'Some/Page'})
assert adapter.match('/Foo/silly/bar') == ('sillypage', {'name':'Foo', 'name2':'bar'})
assert adapter.match('/Foo/silly/bar/edit') == ('editsillypage', {'name':'Foo', 'name2':'bar'})
assert adapter.match('/Talk:Foo/Bar') == ('talk', {'name':'Foo/Bar'})
assert adapter.match('/User:thomas') == ('user', {'username':'thomas'})
assert adapter.match('/User:thomas/projects/werkzeug') == \
('userpage', {'username':'thomas', 'name':'projects/werkzeug'})
assert adapter.match('/Files/downloads/werkzeug/0.2.zip') == \
('files', {'file':'downloads/werkzeug/0.2.zip'})
def test_dispatch(self):
env = create_environ('/')
map = r.Map([
r.Rule('/', endpoint='root'),
r.Rule('/foo/', endpoint='foo')
])
adapter = map.bind_to_environ(env)
raise_this = None
def view_func(endpoint, values):
if raise_this is not None:
raise raise_this
return Response(repr((endpoint, values)))
dispatch = lambda p, q=False: Response.force_type(adapter.dispatch(view_func, p,
catch_http_exceptions=q), env)
assert dispatch('/').data == b"('root', {})"
assert dispatch('/foo').status_code == 301
raise_this = r.NotFound()
self.assert_raises(r.NotFound, lambda: dispatch('/bar'))
assert dispatch('/bar', True).status_code == 404
def test_http_host_before_server_name(self):
env = {
'HTTP_HOST': 'wiki.example.com',
'SERVER_NAME': 'web0.example.com',
'SERVER_PORT': '80',
'SCRIPT_NAME': '',
'PATH_INFO': '',
'REQUEST_METHOD': 'GET',
'wsgi.url_scheme': 'http'
}
map = r.Map([r.Rule('/', endpoint='index', subdomain='wiki')])
adapter = map.bind_to_environ(env, server_name='example.com')
assert adapter.match('/') == ('index', {})
assert adapter.build('index', force_external=True) == 'http://wiki.example.com/'
assert adapter.build('index') == '/'
env['HTTP_HOST'] = 'admin.example.com'
adapter = map.bind_to_environ(env, server_name='example.com')
assert adapter.build('index') == 'http://wiki.example.com/'
def test_adapter_url_parameter_sorting(self):
map = r.Map([r.Rule('/', endpoint='index')], sort_parameters=True,
sort_key=lambda x: x[1])
adapter = map.bind('localhost', '/')
assert adapter.build('index', {'x': 20, 'y': 10, 'z': 30},
force_external=True) == 'http://localhost/?y=10&x=20&z=30'
def test_request_direct_charset_bug(self):
map = r.Map([r.Rule(u'/öäü/')])
adapter = map.bind('localhost', '/')
try:
adapter.match(u'/öäü')
except r.RequestRedirect as e:
assert e.new_url == 'http://localhost/%C3%B6%C3%A4%C3%BC/'
else:
self.fail('expected request redirect exception')
def test_request_redirect_default(self):
map = r.Map([r.Rule(u'/foo', defaults={'bar': 42}),
r.Rule(u'/foo/<int:bar>')])
adapter = map.bind('localhost', '/')
try:
adapter.match(u'/foo/42')
except r.RequestRedirect as e:
assert e.new_url == 'http://localhost/foo'
else:
self.fail('expected request redirect exception')
def test_request_redirect_default_subdomain(self):
map = r.Map([r.Rule(u'/foo', defaults={'bar': 42}, subdomain='test'),
r.Rule(u'/foo/<int:bar>', subdomain='other')])
adapter = map.bind('localhost', '/', subdomain='other')
try:
adapter.match(u'/foo/42')
except r.RequestRedirect as e:
assert e.new_url == 'http://test.localhost/foo'
else:
self.fail('expected request redirect exception')
def test_adapter_match_return_rule(self):
rule = r.Rule('/foo/', endpoint='foo')
map = r.Map([rule])
adapter = map.bind('localhost', '/')
assert adapter.match('/foo/', return_rule=True) == (rule, {})
def test_server_name_interpolation(self):
server_name = 'example.invalid'
map = r.Map([r.Rule('/', endpoint='index'),
r.Rule('/', endpoint='alt', subdomain='alt')])
env = create_environ('/', 'http://%s/' % server_name)
adapter = map.bind_to_environ(env, server_name=server_name)
assert adapter.match() == ('index', {})
env = create_environ('/', 'http://alt.%s/' % server_name)
adapter = map.bind_to_environ(env, server_name=server_name)
assert adapter.match() == ('alt', {})
env = create_environ('/', 'http://%s/' % server_name)
adapter = map.bind_to_environ(env, server_name='foo')
assert adapter.subdomain == '<invalid>'
def test_rule_emptying(self):
rule = r.Rule('/foo', {'meh': 'muh'}, 'x', ['POST'],
False, 'x', True, None)
rule2 = rule.empty()
assert rule.__dict__ == rule2.__dict__
rule.methods.add('GET')
assert rule.__dict__ != rule2.__dict__
rule.methods.discard('GET')
rule.defaults['meh'] = 'aha'
assert rule.__dict__ != rule2.__dict__
def test_rule_templates(self):
testcase = r.RuleTemplate(
[ r.Submount('/test/$app',
[ r.Rule('/foo/', endpoint='handle_foo')
, r.Rule('/bar/', endpoint='handle_bar')
, r.Rule('/baz/', endpoint='handle_baz')
]),
r.EndpointPrefix('${app}',
[ r.Rule('/${app}-blah', endpoint='bar')
, r.Rule('/${app}-meh', endpoint='baz')
]),
r.Subdomain('$app',
[ r.Rule('/blah', endpoint='x_bar')
, r.Rule('/meh', endpoint='x_baz')
])
])
url_map = r.Map(
[ testcase(app='test1')
, testcase(app='test2')
, testcase(app='test3')
, testcase(app='test4')
])
out = sorted([(x.rule, x.subdomain, x.endpoint)
for x in url_map.iter_rules()])
assert out == ([
('/blah', 'test1', 'x_bar'),
('/blah', 'test2', 'x_bar'),
('/blah', 'test3', 'x_bar'),
('/blah', 'test4', 'x_bar'),
('/meh', 'test1', 'x_baz'),
('/meh', 'test2', 'x_baz'),
('/meh', 'test3', 'x_baz'),
('/meh', 'test4', 'x_baz'),
('/test/test1/bar/', '', 'handle_bar'),
('/test/test1/baz/', '', 'handle_baz'),
('/test/test1/foo/', '', 'handle_foo'),
('/test/test2/bar/', '', 'handle_bar'),
('/test/test2/baz/', '', 'handle_baz'),
('/test/test2/foo/', '', 'handle_foo'),
('/test/test3/bar/', '', 'handle_bar'),
('/test/test3/baz/', '', 'handle_baz'),
('/test/test3/foo/', '', 'handle_foo'),
('/test/test4/bar/', '', 'handle_bar'),
('/test/test4/baz/', '', 'handle_baz'),
('/test/test4/foo/', '', 'handle_foo'),
('/test1-blah', '', 'test1bar'),
('/test1-meh', '', 'test1baz'),
('/test2-blah', '', 'test2bar'),
('/test2-meh', '', 'test2baz'),
('/test3-blah', '', 'test3bar'),
('/test3-meh', '', 'test3baz'),
('/test4-blah', '', 'test4bar'),
('/test4-meh', '', 'test4baz')
])
def test_non_string_parts(self):
m = r.Map([
r.Rule('/<foo>', endpoint='foo')
])
a = m.bind('example.com')
self.assert_equal(a.build('foo', {'foo': 42}), '/42')
def test_complex_routing_rules(self):
m = r.Map([
r.Rule('/', endpoint='index'),
r.Rule('/<int:blub>', endpoint='an_int'),
r.Rule('/<blub>', endpoint='a_string'),
r.Rule('/foo/', endpoint='nested'),
r.Rule('/foobar/', endpoint='nestedbar'),
r.Rule('/foo/<path:testing>/', endpoint='nested_show'),
r.Rule('/foo/<path:testing>/edit', endpoint='nested_edit'),
r.Rule('/users/', endpoint='users', defaults={'page': 1}),
r.Rule('/users/page/<int:page>', endpoint='users'),
r.Rule('/foox', endpoint='foox'),
r.Rule('/<path:bar>/<path:blub>', endpoint='barx_path_path')
])
a = m.bind('example.com')
assert a.match('/') == ('index', {})
assert a.match('/42') == ('an_int', {'blub': 42})
assert a.match('/blub') == ('a_string', {'blub': 'blub'})
assert a.match('/foo/') == ('nested', {})
assert a.match('/foobar/') == ('nestedbar', {})
assert a.match('/foo/1/2/3/') == ('nested_show', {'testing': '1/2/3'})
assert a.match('/foo/1/2/3/edit') == ('nested_edit', {'testing': '1/2/3'})
assert a.match('/users/') == ('users', {'page': 1})
assert a.match('/users/page/2') == ('users', {'page': 2})
assert a.match('/foox') == ('foox', {})
assert a.match('/1/2/3') == ('barx_path_path', {'bar': '1', 'blub': '2/3'})
assert a.build('index') == '/'
assert a.build('an_int', {'blub': 42}) == '/42'
assert a.build('a_string', {'blub': 'test'}) == '/test'
assert a.build('nested') == '/foo/'
assert a.build('nestedbar') == '/foobar/'
assert a.build('nested_show', {'testing': '1/2/3'}) == '/foo/1/2/3/'
assert a.build('nested_edit', {'testing': '1/2/3'}) == '/foo/1/2/3/edit'
assert a.build('users', {'page': 1}) == '/users/'
assert a.build('users', {'page': 2}) == '/users/page/2'
assert a.build('foox') == '/foox'
assert a.build('barx_path_path', {'bar': '1', 'blub': '2/3'}) == '/1/2/3'
def test_default_converters(self):
class MyMap(r.Map):
default_converters = r.Map.default_converters.copy()
default_converters['foo'] = r.UnicodeConverter
assert isinstance(r.Map.default_converters, ImmutableDict)
m = MyMap([
r.Rule('/a/<foo:a>', endpoint='a'),
r.Rule('/b/<foo:b>', endpoint='b'),
r.Rule('/c/<c>', endpoint='c')
], converters={'bar': r.UnicodeConverter})
a = m.bind('example.org', '/')
assert a.match('/a/1') == ('a', {'a': '1'})
assert a.match('/b/2') == ('b', {'b': '2'})
assert a.match('/c/3') == ('c', {'c': '3'})
assert 'foo' not in r.Map.default_converters
def test_build_append_unknown(self):
map = r.Map([
r.Rule('/bar/<float:bazf>', endpoint='barf')
])
adapter = map.bind('example.org', '/', subdomain='blah')
assert adapter.build('barf', {'bazf': 0.815, 'bif' : 1.0}) == \
'http://example.org/bar/0.815?bif=1.0'
assert adapter.build('barf', {'bazf': 0.815, 'bif' : 1.0},
append_unknown=False) == 'http://example.org/bar/0.815'
def test_method_fallback(self):
map = r.Map([
r.Rule('/', endpoint='index', methods=['GET']),
r.Rule('/<name>', endpoint='hello_name', methods=['GET']),
r.Rule('/select', endpoint='hello_select', methods=['POST']),
r.Rule('/search_get', endpoint='search', methods=['GET']),
r.Rule('/search_post', endpoint='search', methods=['POST'])
])
adapter = map.bind('example.com')
assert adapter.build('index') == '/'
assert adapter.build('index', method='GET') == '/'
assert adapter.build('hello_name', {'name': 'foo'}) == '/foo'
assert adapter.build('hello_select') == '/select'
assert adapter.build('hello_select', method='POST') == '/select'
assert adapter.build('search') == '/search_get'
assert adapter.build('search', method='GET') == '/search_get'
assert adapter.build('search', method='POST') == '/search_post'
def test_implicit_head(self):
url_map = r.Map([
r.Rule('/get', methods=['GET'], endpoint='a'),
r.Rule('/post', methods=['POST'], endpoint='b')
])
adapter = url_map.bind('example.org')
assert adapter.match('/get', method='HEAD') == ('a', {})
self.assert_raises(r.MethodNotAllowed, adapter.match,
'/post', method='HEAD')
def test_protocol_joining_bug(self):
m = r.Map([r.Rule('/<foo>', endpoint='x')])
a = m.bind('example.org')
assert a.build('x', {'foo': 'x:y'}) == '/x:y'
assert a.build('x', {'foo': 'x:y'}, force_external=True) == \
'http://example.org/x:y'
def test_allowed_methods_querying(self):
m = r.Map([r.Rule('/<foo>', methods=['GET', 'HEAD']),
r.Rule('/foo', methods=['POST'])])
a = m.bind('example.org')
assert sorted(a.allowed_methods('/foo')) == ['GET', 'HEAD', 'POST']
def test_external_building_with_port(self):
map = r.Map([
r.Rule('/', endpoint='index'),
])
adapter = map.bind('example.org:5000', '/')
built_url = adapter.build('index', {}, force_external=True)
assert built_url == 'http://example.org:5000/', built_url
def test_external_building_with_port_bind_to_environ(self):
map = r.Map([
r.Rule('/', endpoint='index'),
])
adapter = map.bind_to_environ(
create_environ('/', 'http://example.org:5000/'),
server_name="example.org:5000"
)
built_url = adapter.build('index', {}, force_external=True)
assert built_url == 'http://example.org:5000/', built_url
def test_external_building_with_port_bind_to_environ_wrong_servername(self):
map = r.Map([
r.Rule('/', endpoint='index'),
])
environ = create_environ('/', 'http://example.org:5000/')
adapter = map.bind_to_environ(environ, server_name="example.org")
assert adapter.subdomain == '<invalid>'
def test_converter_parser(self):
args, kwargs = r.parse_converter_args(u'test, a=1, b=3.0')
assert args == ('test',)
assert kwargs == {'a': 1, 'b': 3.0 }
args, kwargs = r.parse_converter_args('')
assert not args and not kwargs
args, kwargs = r.parse_converter_args('a, b, c,')
assert args == ('a', 'b', 'c')
assert not kwargs
args, kwargs = r.parse_converter_args('True, False, None')
assert args == (True, False, None)
args, kwargs = r.parse_converter_args('"foo", u"bar"')
assert args == ('foo', 'bar')
def test_alias_redirects(self):
m = r.Map([
r.Rule('/', endpoint='index'),
r.Rule('/index.html', endpoint='index', alias=True),
r.Rule('/users/', defaults={'page': 1}, endpoint='users'),
r.Rule('/users/index.html', defaults={'page': 1}, alias=True,
endpoint='users'),
r.Rule('/users/page/<int:page>', endpoint='users'),
r.Rule('/users/page-<int:page>.html', alias=True, endpoint='users'),
])
a = m.bind('example.com')
def ensure_redirect(path, new_url, args=None):
try:
a.match(path, query_args=args)
except r.RequestRedirect as e:
assert e.new_url == 'http://example.com' + new_url
else:
assert False, 'expected redirect'
ensure_redirect('/index.html', '/')
ensure_redirect('/users/index.html', '/users/')
ensure_redirect('/users/page-2.html', '/users/page/2')
ensure_redirect('/users/page-1.html', '/users/')
ensure_redirect('/users/page-1.html', '/users/?foo=bar', {'foo': 'bar'})
assert a.build('index') == '/'
assert a.build('users', {'page': 1}) == '/users/'
assert a.build('users', {'page': 2}) == '/users/page/2'
def test_double_defaults(self):
for prefix in '', '/aaa':
m = r.Map([
r.Rule(prefix + '/', defaults={'foo': 1, 'bar': False}, endpoint='x'),
r.Rule(prefix + '/<int:foo>', defaults={'bar': False}, endpoint='x'),
r.Rule(prefix + '/bar/', defaults={'foo': 1, 'bar': True}, endpoint='x'),
r.Rule(prefix + '/bar/<int:foo>', defaults={'bar': True}, endpoint='x')
])
a = m.bind('example.com')
assert a.match(prefix + '/') == ('x', {'foo': 1, 'bar': False})
assert a.match(prefix + '/2') == ('x', {'foo': 2, 'bar': False})
assert a.match(prefix + '/bar/') == ('x', {'foo': 1, 'bar': True})
assert a.match(prefix + '/bar/2') == ('x', {'foo': 2, 'bar': True})
assert a.build('x', {'foo': 1, 'bar': False}) == prefix + '/'
assert a.build('x', {'foo': 2, 'bar': False}) == prefix + '/2'
assert a.build('x', {'bar': False}) == prefix + '/'
assert a.build('x', {'foo': 1, 'bar': True}) == prefix + '/bar/'
assert a.build('x', {'foo': 2, 'bar': True}) == prefix + '/bar/2'
assert a.build('x', {'bar': True}) == prefix + '/bar/'
def test_host_matching(self):
m = r.Map([
r.Rule('/', endpoint='index', host='www.<domain>'),
r.Rule('/', endpoint='files', host='files.<domain>'),
r.Rule('/foo/', defaults={'page': 1}, host='www.<domain>', endpoint='x'),
r.Rule('/<int:page>', host='files.<domain>', endpoint='x')
], host_matching=True)
a = m.bind('www.example.com')
assert a.match('/') == ('index', {'domain': 'example.com'})
assert a.match('/foo/') == ('x', {'domain': 'example.com', 'page': 1})
try:
a.match('/foo')
except r.RequestRedirect as e:
assert e.new_url == 'http://www.example.com/foo/'
else:
assert False, 'expected redirect'
a = m.bind('files.example.com')
assert a.match('/') == ('files', {'domain': 'example.com'})
assert a.match('/2') == ('x', {'domain': 'example.com', 'page': 2})
try:
a.match('/1')
except r.RequestRedirect as e:
assert e.new_url == 'http://www.example.com/foo/'
else:
assert False, 'expected redirect'
def test_server_name_casing(self):
m = r.Map([
r.Rule('/', endpoint='index', subdomain='foo')
])
env = create_environ()
env['SERVER_NAME'] = env['HTTP_HOST'] = 'FOO.EXAMPLE.COM'
a = m.bind_to_environ(env, server_name='example.com')
assert a.match('/') == ('index', {})
env = create_environ()
env['SERVER_NAME'] = '127.0.0.1'
env['SERVER_PORT'] = '5000'
del env['HTTP_HOST']
a = m.bind_to_environ(env, server_name='example.com')
try:
a.match()
except r.NotFound:
pass
else:
assert False, 'Expected not found exception'
def test_redirect_request_exception_code(self):
exc = r.RequestRedirect('http://www.google.com/')
exc.code = 307
env = create_environ()
self.assert_strict_equal(exc.get_response(env).status_code, exc.code)
def test_unicode_rules(self):
m = r.Map([
r.Rule(u'/войти/', endpoint='enter'),
r.Rule(u'/foo+bar/', endpoint='foobar')
])
a = m.bind(u'☃.example.com')
try:
a.match(u'/войти')
except r.RequestRedirect as e:
self.assert_strict_equal(e.new_url, 'http://xn--n3h.example.com/'
'%D0%B2%D0%BE%D0%B9%D1%82%D0%B8/')
endpoint, values = a.match(u'/войти/')
self.assert_strict_equal(endpoint, 'enter')
self.assert_strict_equal(values, {})
try:
a.match(u'/foo+bar')
except r.RequestRedirect as e:
self.assert_strict_equal(e.new_url, 'http://xn--n3h.example.com/'
'foo+bar/')
endpoint, values = a.match(u'/foo+bar/')
self.assert_strict_equal(endpoint, 'foobar')
self.assert_strict_equal(values, {})
url = a.build('enter', {}, force_external=True)
self.assert_strict_equal(url, 'http://xn--n3h.example.com/%D0%B2%D0%BE%D0%B9%D1%82%D0%B8/')
url = a.build('foobar', {}, force_external=True)
self.assert_strict_equal(url, 'http://xn--n3h.example.com/foo+bar/')
def test_map_repr(self):
m = r.Map([
r.Rule(u'/wat', endpoint='enter'),
r.Rule(u'/woop', endpoint='foobar')
])
rv = repr(m)
self.assert_strict_equal(rv,
"Map([<Rule '/woop' -> foobar>, <Rule '/wat' -> enter>])")
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(RoutingTestCase))
return suite
| apache-2.0 |
Aristocles/CouchPotatoServer | libs/pyutil/humanreadable.py | 106 | 4483 | # Copyright (c) 2001 Autonomous Zone Industries
# Copyright (c) 2002-2009 Zooko "Zooko" Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import exceptions, os
from repr import Repr
class BetterRepr(Repr):
def __init__(self):
Repr.__init__(self)
# Note: These levels can get adjusted dynamically! My goal is to get more info when printing important debug stuff like exceptions and stack traces and less info when logging normal events. --Zooko 2000-10-14
self.maxlevel = 6
self.maxdict = 6
self.maxlist = 6
self.maxtuple = 6
self.maxstring = 300
self.maxother = 300
def repr_function(self, obj, level):
if hasattr(obj, 'func_code'):
return '<' + obj.func_name + '() at ' + os.path.basename(obj.func_code.co_filename) + ':' + str(obj.func_code.co_firstlineno) + '>'
else:
return '<' + obj.func_name + '() at (builtin)'
def repr_instance_method(self, obj, level):
if hasattr(obj, 'func_code'):
return '<' + obj.im_class.__name__ + '.' + obj.im_func.__name__ + '() at ' + os.path.basename(obj.im_func.func_code.co_filename) + ':' + str(obj.im_func.func_code.co_firstlineno) + '>'
else:
return '<' + obj.im_class.__name__ + '.' + obj.im_func.__name__ + '() at (builtin)'
def repr_long(self, obj, level):
s = `obj` # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)/2)
j = max(0, self.maxlong-3-i)
s = s[:i] + '...' + s[len(s)-j:]
if s[-1] == 'L':
return s[:-1]
return s
def repr_instance(self, obj, level):
"""
If it is an instance of Exception, format it nicely (trying to emulate
the format that you see when an exception is actually raised, plus
bracketing '<''s). If it is an instance of dict call self.repr_dict()
on it. If it is an instance of list call self.repr_list() on it. Else
call Repr.repr_instance().
"""
if isinstance(obj, exceptions.Exception):
# Don't cut down exception strings so much.
tms = self.maxstring
self.maxstring = max(512, tms * 4)
tml = self.maxlist
self.maxlist = max(12, tml * 4)
try:
if hasattr(obj, 'args'):
if len(obj.args) == 1:
return '<' + obj.__class__.__name__ + ': ' + self.repr1(obj.args[0], level-1) + '>'
else:
return '<' + obj.__class__.__name__ + ': ' + self.repr1(obj.args, level-1) + '>'
else:
return '<' + obj.__class__.__name__ + '>'
finally:
self.maxstring = tms
self.maxlist = tml
if isinstance(obj, dict):
return self.repr_dict(obj, level)
if isinstance(obj, list):
return self.repr_list(obj, level)
return Repr.repr_instance(self, obj, level)
def repr_list(self, obj, level):
"""
copied from standard repr.py and fixed to work on multithreadedly mutating lists.
"""
if level <= 0: return '[...]'
n = len(obj)
myl = obj[:min(n, self.maxlist)]
s = ''
for item in myl:
entry = self.repr1(item, level-1)
if s: s = s + ', '
s = s + entry
if n > self.maxlist: s = s + ', ...'
return '[' + s + ']'
def repr_dict(self, obj, level):
"""
copied from standard repr.py and fixed to work on multithreadedly mutating dicts.
"""
if level <= 0: return '{...}'
s = ''
n = len(obj)
items = obj.items()[:min(n, self.maxdict)]
items.sort()
for key, val in items:
entry = self.repr1(key, level-1) + ':' + self.repr1(val, level-1)
if s: s = s + ', '
s = s + entry
if n > self.maxdict: s = s + ', ...'
return '{' + s + '}'
# This object can be changed by other code updating this module's "brepr"
# variables. This is so that (a) code can use humanreadable with
# "from humanreadable import hr; hr(mything)", and (b) code can override
# humanreadable to provide application-specific human readable output
# (e.g. libbase32's base32id.AbbrevRepr).
brepr = BetterRepr()
def hr(x):
return brepr.repr(x)
| gpl-3.0 |
peak6/st2 | st2reactor/st2reactor/container/utils.py | 6 | 3896 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from st2common import log as logging
from st2common.constants.triggers import TRIGGER_INSTANCE_PENDING
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from st2common.models.db.trigger import TriggerInstanceDB
from st2common.persistence.trigger import TriggerInstance
from st2common.services import triggers as TriggerService
LOG = logging.getLogger('st2reactor.sensor.container_utils')
def create_trigger_instance(trigger, payload, occurrence_time, raise_on_no_trigger=False):
"""
This creates a trigger instance object given trigger and payload.
Trigger can be just a string reference (pack.name) or a ``dict`` containing 'id' or
'uid' or type' and 'parameters' keys.
:param trigger: Trigger reference or dictionary with trigger query filters.
:type trigger: ``str`` or ``dict``
:param payload: Trigger payload.
:type payload: ``dict``
"""
# TODO: This is nasty, this should take a unique reference and not a dict
if isinstance(trigger, six.string_types):
trigger_db = TriggerService.get_trigger_db_by_ref(trigger)
else:
# If id / uid is available we try to look up Trigger by id. This way we can avoid bug in
# pymongo / mongoengine related to "parameters" dictionary lookups
trigger_id = trigger.get('id', None)
trigger_uid = trigger.get('uid', None)
# TODO: Remove parameters dictionary look up when we can confirm each trigger dictionary
# passed to this method always contains id or uid
if trigger_id:
LOG.debug('Looking up TriggerDB by id: %s', trigger_id)
trigger_db = TriggerService.get_trigger_db_by_id(id=trigger_id)
elif trigger_uid:
LOG.debug('Looking up TriggerDB by uid: %s', trigger_uid)
trigger_db = TriggerService.get_trigger_db_by_uid(uid=trigger_uid)
else:
# Last resort - look it up by parameters
trigger_type = trigger.get('type', None)
parameters = trigger.get('parameters', {})
LOG.debug('Looking up TriggerDB by type and parameters: type=%s, parameters=%s',
trigger_type, parameters)
trigger_db = TriggerService.get_trigger_db_given_type_and_params(type=trigger_type,
parameters=parameters)
if trigger_db is None:
LOG.debug('No trigger in db for %s', trigger)
if raise_on_no_trigger:
raise StackStormDBObjectNotFoundError('Trigger not found for %s', trigger)
return None
trigger_ref = trigger_db.get_reference().ref
trigger_instance = TriggerInstanceDB()
trigger_instance.trigger = trigger_ref
trigger_instance.payload = payload
trigger_instance.occurrence_time = occurrence_time
trigger_instance.status = TRIGGER_INSTANCE_PENDING
return TriggerInstance.add_or_update(trigger_instance)
def update_trigger_instance_status(trigger_instance, status):
trigger_instance.status = status
return TriggerInstance.add_or_update(trigger_instance)
| apache-2.0 |
Workday/OpenFrame | third_party/WebKit/LayoutTests/http/tests/websocket/echo_wsh.py | 53 | 2032 | # Copyright 2009, 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
_GOODBYE_MESSAGE = u'Goodbye'
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
while True:
line = request.ws_stream.receive_message()
if line is None:
return
if isinstance(line, unicode):
request.ws_stream.send_message(line, binary=False)
if line == _GOODBYE_MESSAGE:
return
else:
request.ws_stream.send_message(line, binary=True)
| bsd-3-clause |
SriHarshaGajavalli/SE2017 | home/forms.py | 1 | 1154 | from django import forms
from .models import *
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
import json
class PersonnelForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput)
class Meta:
model = Personnel
fields = ('Person_ID','Role','Dept')
#Form for taking input from user for email frequency
class ProfileForm(forms.ModelForm):
username = forms.CharField(label = 'dd')
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
self.name = self.user.username
super(ProfileForm, self).__init__(*args, **kwargs)
self.fields['username'] = forms.CharField(label = 'Username',initial = self.name, disabled = 'disabled')
class Meta:
model = NotificationTime
fields = ('Notification_time',)
def save(self, commit=True):
obj = Personnel.objects.get(LDAP_id=self.user.id)
personID = obj.Person_ID
personObj = Personnel.objects.get(Person_ID=personID)
obj_ , created = NotificationTime.objects.update_or_create(Personnel_ID=personObj,defaults={'Notification_time':self.cleaned_data.get('Notification_time')})
return obj_
| mit |
Pikecillo/genna | external/PyXML-0.8.4/xml/dom/html/HTMLFrameElement.py | 10 | 3564 | ########################################################################
#
# File Name: HTMLFrameElement.py
#
#
"""
WWW: http://4suite.com/4DOM e-mail: support@4suite.com
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import string
from xml.dom.html.HTMLElement import HTMLElement
class HTMLFrameElement(HTMLElement):
def __init__(self, ownerDocument, nodeName="FRAME"):
HTMLElement.__init__(self, ownerDocument, nodeName)
self.__content = None
### Attribute Methods ###
def _get_contentDocument(self):
if not self.__content:
source = self._get_src()
import os.path
ext = os.path.splitext(source)
if string.find(ext, 'htm') > 0:
from xml.dom.ext.reader import HtmlLib
self.__content = HtmlLib.FromHtmlUrl(source)
elif string.lower(ext) == '.xml':
from xml.dom.ext.reader import Sax2
self.__content = Sax2.FromXmlUrl(source)
return self.__content
def _get_frameBorder(self):
return string.capitalize(self.getAttribute("FRAMEBORDER"))
def _set_frameBorder(self, value):
self.setAttribute("FRAMEBORDER", value)
def _get_longDesc(self):
return self.getAttribute("LONGDESC")
def _set_longDesc(self, value):
self.setAttribute("LONGDESC", value)
def _get_marginHeight(self):
return self.getAttribute("MARGINHEIGHT")
def _set_marginHeight(self, value):
self.setAttribute("MARGINHEIGHT", value)
def _get_marginWidth(self):
return self.getAttribute("MARGINWIDTH")
def _set_marginWidth(self, value):
self.setAttribute("MARGINWIDTH", value)
def _get_name(self):
return self.getAttribute("NAME")
def _set_name(self, value):
self.setAttribute("NAME", value)
def _get_noResize(self):
return self.hasAttribute("NORESIZE")
def _set_noResize(self, value):
if value:
self.setAttribute("NORESIZE", "NORESIZE")
else:
self.removeAttribute("NORESIZE")
def _get_scrolling(self):
return string.capitalize(self.getAttribute("SCROLLING"))
def _set_scrolling(self, value):
self.setAttribute("SCROLLING", value)
def _get_src(self):
return self.getAttribute("SRC")
def _set_src(self, value):
self.setAttribute("SRC", value)
### Attribute Access Mappings ###
_readComputedAttrs = HTMLElement._readComputedAttrs.copy()
_readComputedAttrs.update({
"contentDocument" : _get_contentDocument,
"frameBorder" : _get_frameBorder,
"longDesc" : _get_longDesc,
"marginHeight" : _get_marginHeight,
"marginWidth" : _get_marginWidth,
"name" : _get_name,
"noResize" : _get_noResize,
"scrolling" : _get_scrolling,
"src" : _get_src
})
_writeComputedAttrs = HTMLElement._writeComputedAttrs.copy()
_writeComputedAttrs.update({
"frameBorder" : _set_frameBorder,
"longDesc" : _set_longDesc,
"marginHeight" : _set_marginHeight,
"marginWidth" : _set_marginWidth,
"name" : _set_name,
"noResize" : _set_noResize,
"scrolling" : _set_scrolling,
"src" : _set_src
})
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
HTMLElement._readOnlyAttrs + _readComputedAttrs.keys())
| gpl-2.0 |
kylerbrown/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
thomasalrin/Ghost | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/__init__.py | 95 | 2978 | # -*- coding: utf-8 -*-
"""
Pygments
~~~~~~~~
Pygments is a syntax highlighting package written in Python.
It is a generic syntax highlighter for general use in all kinds of software
such as forum systems, wikis or other applications that need to prettify
source code. Highlights are:
* a wide range of common languages and markup formats is supported
* special attention is paid to details, increasing quality by a fair amount
* support for new languages and formats are added easily
* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
formats that PIL supports, and ANSI sequences
* it is usable as a command-line tool and as a library
* ... and it highlights even Brainfuck!
The `Pygments tip`_ is installable with ``easy_install Pygments==dev``.
.. _Pygments tip:
http://bitbucket.org/birkenfeld/pygments-main/get/tip.zip#egg=Pygments-dev
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
__version__ = '1.6'
__docformat__ = 'restructuredtext'
__all__ = ['lex', 'format', 'highlight']
import sys
from pygments.util import StringIO, BytesIO
def lex(code, lexer):
"""
Lex ``code`` with ``lexer`` and return an iterable of tokens.
"""
try:
return lexer.get_tokens(code)
except TypeError as err:
if isinstance(err.args[0], str) and \
'unbound method get_tokens' in err.args[0]:
raise TypeError('lex() argument must be a lexer instance, '
'not a class')
raise
def format(tokens, formatter, outfile=None):
"""
Format a tokenlist ``tokens`` with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
try:
if not outfile:
#print formatter, 'using', formatter.encoding
realoutfile = formatter.encoding and BytesIO() or StringIO()
formatter.format(tokens, realoutfile)
return realoutfile.getvalue()
else:
formatter.format(tokens, outfile)
except TypeError as err:
if isinstance(err.args[0], str) and \
'unbound method format' in err.args[0]:
raise TypeError('format() argument must be a formatter instance, '
'not a class')
raise
def highlight(code, lexer, formatter, outfile=None):
"""
Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
return format(lex(code, lexer), formatter, outfile)
if __name__ == '__main__':
from pygments.cmdline import main
sys.exit(main(sys.argv))
| mit |
pwmarcz/django | tests/introspection/models.py | 30 | 1039 | from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Reporter(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.EmailField()
facebook_user_id = models.BigIntegerField(null=True)
raw_data = models.BinaryField(null=True)
small_int = models.SmallIntegerField()
class Meta:
unique_together = ('first_name', 'last_name')
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateField()
reporter = models.ForeignKey(Reporter)
response_to = models.ForeignKey('self', null=True)
def __str__(self):
return self.headline
class Meta:
ordering = ('headline',)
index_together = [
["headline", "pub_date"],
]
| bsd-3-clause |
Jeongseob/xen-coboost-sched | tools/xm-test/tests/block-create/01_block_attach_device_pos.py | 42 | 1059 | #!/usr/bin/python
# Copyright (C) International Business Machines Corp., 2005
# Author: Murillo F. Bernardes <mfb@br.ibm.com>
import re
from XmTestLib import *
from XmTestLib.block_utils import *
if ENABLE_HVM_SUPPORT:
SKIP("Block-attach not supported for HVM domains")
# Create a domain (default XmTestDomain, with our ramdisk)
domain = XmTestDomain()
try:
console = domain.start()
except DomainError, e:
if verbose:
print "Failed to create test domain because:"
print e.extra
FAIL(str(e))
# Attach a console to it
try:
console.setHistorySaveCmds(value=True)
# Run 'ls'
run = console.runCmd("ls")
except ConsoleError, e:
saveLog(console.getHistory())
FAIL(str(e))
block_attach(domain, "phy:ram1", "xvda1")
try:
run = console.runCmd("cat /proc/partitions")
except ConsoleError, e:
FAIL(str(e))
# Close the console
domain.closeConsole()
# Stop the domain (nice shutdown)
domain.stop()
if not re.search("xvda1",run["output"]):
FAIL("Device is not actually connected to the domU")
| gpl-2.0 |
mahak/ansible | lib/ansible/plugins/cache/__init__.py | 13 | 12262 | # (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2018, Ansible Project
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import os
import time
import errno
from abc import ABCMeta, abstractmethod
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.six import with_metaclass
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.common._collections_compat import MutableMapping
from ansible.plugins import AnsiblePlugin
from ansible.plugins.loader import cache_loader
from ansible.utils.collection_loader import resource_from_fqcr
from ansible.utils.display import Display
display = Display()
class BaseCacheModule(AnsiblePlugin):
# Backwards compat only. Just import the global display instead
_display = display
def __init__(self, *args, **kwargs):
# Third party code is not using cache_loader to load plugin - fall back to previous behavior
if not hasattr(self, '_load_name'):
display.deprecated('Rather than importing custom CacheModules directly, use ansible.plugins.loader.cache_loader',
version='2.14', collection_name='ansible.builtin')
self._load_name = self.__module__.split('.')[-1]
self._load_name = resource_from_fqcr(self.__module__)
super(BaseCacheModule, self).__init__()
self.set_options(var_options=args, direct=kwargs)
@abstractmethod
def get(self, key):
pass
@abstractmethod
def set(self, key, value):
pass
@abstractmethod
def keys(self):
pass
@abstractmethod
def contains(self, key):
pass
@abstractmethod
def delete(self, key):
pass
@abstractmethod
def flush(self):
pass
@abstractmethod
def copy(self):
pass
class BaseFileCacheModule(BaseCacheModule):
"""
A caching module backed by file based storage.
"""
def __init__(self, *args, **kwargs):
try:
super(BaseFileCacheModule, self).__init__(*args, **kwargs)
self._cache_dir = self._get_cache_connection(self.get_option('_uri'))
self._timeout = float(self.get_option('_timeout'))
except KeyError:
self._cache_dir = self._get_cache_connection(C.CACHE_PLUGIN_CONNECTION)
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
self.plugin_name = resource_from_fqcr(self.__module__)
self._cache = {}
self.validate_cache_connection()
def _get_cache_connection(self, source):
if source:
try:
return os.path.expanduser(os.path.expandvars(source))
except TypeError:
pass
def validate_cache_connection(self):
if not self._cache_dir:
raise AnsibleError("error, '%s' cache plugin requires the 'fact_caching_connection' config option "
"to be set (to a writeable directory path)" % self.plugin_name)
if not os.path.exists(self._cache_dir):
try:
os.makedirs(self._cache_dir)
except (OSError, IOError) as e:
raise AnsibleError("error in '%s' cache plugin while trying to create cache dir %s : %s" % (self.plugin_name, self._cache_dir, to_bytes(e)))
else:
for x in (os.R_OK, os.W_OK, os.X_OK):
if not os.access(self._cache_dir, x):
raise AnsibleError("error in '%s' cache, configured path (%s) does not have necessary permissions (rwx), disabling plugin" % (
self.plugin_name, self._cache_dir))
def _get_cache_file_name(self, key):
prefix = self.get_option('_prefix')
if prefix:
cachefile = "%s/%s%s" % (self._cache_dir, prefix, key)
else:
cachefile = "%s/%s" % (self._cache_dir, key)
return cachefile
def get(self, key):
""" This checks the in memory cache first as the fact was not expired at 'gather time'
and it would be problematic if the key did expire after some long running tasks and
user gets 'undefined' error in the same play """
if key not in self._cache:
if self.has_expired(key) or key == "":
raise KeyError
cachefile = self._get_cache_file_name(key)
try:
value = self._load(cachefile)
self._cache[key] = value
except ValueError as e:
display.warning("error in '%s' cache plugin while trying to read %s : %s. "
"Most likely a corrupt file, so erasing and failing." % (self.plugin_name, cachefile, to_bytes(e)))
self.delete(key)
raise AnsibleError("The cache file %s was corrupt, or did not otherwise contain valid data. "
"It has been removed, so you can re-run your command now." % cachefile)
except (OSError, IOError) as e:
display.warning("error in '%s' cache plugin while trying to read %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
raise KeyError
except Exception as e:
raise AnsibleError("Error while decoding the cache file %s: %s" % (cachefile, to_bytes(e)))
return self._cache.get(key)
def set(self, key, value):
self._cache[key] = value
cachefile = self._get_cache_file_name(key)
try:
self._dump(value, cachefile)
except (OSError, IOError) as e:
display.warning("error in '%s' cache plugin while trying to write to %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
def has_expired(self, key):
if self._timeout == 0:
return False
cachefile = self._get_cache_file_name(key)
try:
st = os.stat(cachefile)
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
return False
else:
display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
return False
if time.time() - st.st_mtime <= self._timeout:
return False
if key in self._cache:
del self._cache[key]
return True
def keys(self):
# When using a prefix we must remove it from the key name before
# checking the expiry and returning it to the caller. Keys that do not
# share the same prefix cannot be fetched from the cache.
prefix = self.get_option('_prefix')
prefix_length = len(prefix)
keys = []
for k in os.listdir(self._cache_dir):
if k.startswith('.') or not k.startswith(prefix):
continue
k = k[prefix_length:]
if not self.has_expired(k):
keys.append(k)
return keys
def contains(self, key):
cachefile = self._get_cache_file_name(key)
if key in self._cache:
return True
if self.has_expired(key):
return False
try:
os.stat(cachefile)
return True
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
return False
else:
display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
def delete(self, key):
try:
del self._cache[key]
except KeyError:
pass
try:
os.remove(self._get_cache_file_name(key))
except (OSError, IOError):
pass # TODO: only pass on non existing?
def flush(self):
self._cache = {}
for key in self.keys():
self.delete(key)
def copy(self):
ret = dict()
for key in self.keys():
ret[key] = self.get(key)
return ret
@abstractmethod
def _load(self, filepath):
"""
Read data from a filepath and return it as a value
:arg filepath: The filepath to read from.
:returns: The value stored in the filepath
This method reads from the file on disk and takes care of any parsing
and transformation of the data before returning it. The value
returned should be what Ansible would expect if it were uncached data.
.. note:: Filehandles have advantages but calling code doesn't know
whether this file is text or binary, should be decoded, or accessed via
a library function. Therefore the API uses a filepath and opens
the file inside of the method.
"""
pass
@abstractmethod
def _dump(self, value, filepath):
"""
Write data to a filepath
:arg value: The value to store
:arg filepath: The filepath to store it at
"""
pass
class CachePluginAdjudicator(MutableMapping):
"""
Intermediary between a cache dictionary and a CacheModule
"""
def __init__(self, plugin_name='memory', **kwargs):
self._cache = {}
self._retrieved = {}
self._plugin = cache_loader.get(plugin_name, **kwargs)
if not self._plugin:
raise AnsibleError('Unable to load the cache plugin (%s).' % plugin_name)
self._plugin_name = plugin_name
def update_cache_if_changed(self):
if self._retrieved != self._cache:
self.set_cache()
def set_cache(self):
for top_level_cache_key in self._cache.keys():
self._plugin.set(top_level_cache_key, self._cache[top_level_cache_key])
self._retrieved = copy.deepcopy(self._cache)
def load_whole_cache(self):
for key in self._plugin.keys():
self._cache[key] = self._plugin.get(key)
def __repr__(self):
return to_text(self._cache)
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(self.keys())
def _do_load_key(self, key):
load = False
if all([
key not in self._cache,
key not in self._retrieved,
self._plugin_name != 'memory',
self._plugin.contains(key),
]):
load = True
return load
def __getitem__(self, key):
if self._do_load_key(key):
try:
self._cache[key] = self._plugin.get(key)
except KeyError:
pass
else:
self._retrieved[key] = self._cache[key]
return self._cache[key]
def get(self, key, default=None):
if self._do_load_key(key):
try:
self._cache[key] = self._plugin.get(key)
except KeyError as e:
pass
else:
self._retrieved[key] = self._cache[key]
return self._cache.get(key, default)
def items(self):
return self._cache.items()
def values(self):
return self._cache.values()
def keys(self):
return self._cache.keys()
def pop(self, key, *args):
if args:
return self._cache.pop(key, args[0])
return self._cache.pop(key)
def __delitem__(self, key):
del self._cache[key]
def __setitem__(self, key, value):
self._cache[key] = value
def flush(self):
self._plugin.flush()
self._cache = {}
def update(self, value):
self._cache.update(value)
| gpl-3.0 |
nrhine1/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 248 | 6359 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. Journal of Computational and
Graphical Statistics. December 1, 2005, 14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
netgroup/Dreamer-Mininet-Extensions | test.py | 1 | 7341 | #!/usr/bin/python
from mininet_extensions import MininetOSHI
from utility import unmountAll, PropertiesGenerator
import subprocess
def test_add_nodes_and_add_link():
verbose=True
net = MininetOSHI(verbose)
generator = PropertiesGenerator(False)
cr_os = ["cro1","cro2","cro3"]
pe_os = ["peo1","peo2","peo3"]
ctrls = ["ctr1","ctr2","ctr3"]
sws = ["swi1","swi2","swi3"]
ce_os = ["cer1","cer2","cer3"]
net1 = [("cro1","swi1"),("swi1","cro2"),("cro3","swi1")]
net2 = [("peo1","cro1")]
net3 = [("cro2","peo2")]
net4 = [("peo3","cro3")]
net5 = [("cer1","peo1")]
net6 = [("cer2","peo2")]
net7 = [("cer3","peo3")]
net8 = [("cro1","ctr1")]
net9 = [("ctr2","cro2")]
net10 = [("cro3","ctr3")]
vlls = [("cer1","cer2"), ("cer2","cer3"), ("cer3","cer1")]
cr_prop = generator.getVerticesProperties(cr_os)
pe_prop = generator.getVerticesProperties(pe_os)
ct_prop = generator.getVerticesProperties(ctrls)
sw_prop = generator.getVerticesProperties(sws)
ce_prop = generator.getVerticesProperties(ce_os)
net1_properties = generator.getLinksProperties(net1)
net2_properties = generator.getLinksProperties(net2)
net3_properties = generator.getLinksProperties(net3)
net4_properties = generator.getLinksProperties(net4)
net5_properties = generator.getLinksProperties(net5)
net6_properties = generator.getLinksProperties(net6)
net7_properties = generator.getLinksProperties(net7)
net8_properties = generator.getLinksProperties(net8)
net9_properties = generator.getLinksProperties(net9)
net10_properties = generator.getLinksProperties(net10)
vlls_properties = []
for vll in vlls:
vll_properties = generator.getVLLsProperties(vll)
vlls_properties.append(vll_properties)
print "*** Create Core OSHI"
i = 0
for i in range(0, len(cr_os)):
cr_oshi = net.addCrOSHI(name = cr_os[i], params = cr_prop[i])
i = i + 1
print "*** Create Provider Edge OSHI"
i = 0
for i in range(0, len(pe_os)):
pe_oshi = net.addPeOSHI(name = pe_os[i], params = pe_prop[i])
i = i + 1
print "*** Create LegacyL2Switch"
i = 0
for i in range(0, len(sws)):
switch = net.addSwitch(name = sws[i])
i = i + 1
print "*** Create Controllers"
i = 0
for i in range(0, len(cr_os)):
ctrl = net.addController(name = ctrls[i])
i = i + 1
print "*** Create Customer Edge Router"
i = 0
for i in range(0, len(ce_os)):
ce_router = net.addCeRouter(name = ce_os[i])
i = i + 1
i = 0
for link in net1:
lhs = net.getNodeByName(link[0])
rhs = net.getNodeByName(link[1])
net.addLink(lhs, rhs, net1_properties[i])
i = i + 1
i = 0
for link in net2:
lhs = net.getNodeByName(link[0])
rhs = net.getNodeByName(link[1])
net.addLink(lhs, rhs, net2_properties[i])
i = i + 1
i = 0
for link in net3:
lhs = net.getNodeByName(link[0])
rhs = net.getNodeByName(link[1])
net.addLink(lhs, rhs, net3_properties[i])
i = i + 1
i = 0
for link in net4:
lhs = net.getNodeByName(link[0])
rhs = net.getNodeByName(link[1])
net.addLink(lhs, rhs, net4_properties[i])
i = i + 1
i = 0
for link in net5:
lhs = net.getNodeByName(link[0])
rhs = net.getNodeByName(link[1])
net.addLink(lhs, rhs, net5_properties[i])
i = i + 1
i = 0
for link in net6:
lhs = net.getNodeByName(link[0])
rhs = net.getNodeByName(link[1])
net.addLink(lhs, rhs, net6_properties[i])
i = i + 1
i = 0
for link in net7:
lhs = net.getNodeByName(link[0])
rhs = net.getNodeByName(link[1])
net.addLink(lhs, rhs, net7_properties[i])
i = i + 1
i = 0
for link in net8:
lhs = net.getNodeByName(link[0])
rhs = net.getNodeByName(link[1])
net.addLink(lhs, rhs, net8_properties[i])
i = i + 1
i = 0
for link in net9:
lhs = net.getNodeByName(link[0])
rhs = net.getNodeByName(link[1])
net.addLink(lhs, rhs, net9_properties[i])
i = i + 1
i = 0
for link in net10:
lhs = net.getNodeByName(link[0])
rhs = net.getNodeByName(link[1])
net.addLink(lhs, rhs, net10_properties[i])
i = i + 1
net.start()
print "*** Press any key to continue"
a=raw_input()
net.stop()
subprocess.call(["sudo", "mn", "-c"], stdout=None, stderr=None)
unmountAll()
def test_properties_generator():
verbose=True
net = MininetOSHI(verbose)
generator = PropertiesGenerator(False)
cr_os = ["cro1","cro2","cro3"]
pe_os = ["peo1","peo2","peo3"]
ctrls = ["ctr1","ctr2","ctr3"]
sws = ["swi1","swi2","swi3"]
ce_os = ["cer1","cer2","cer3"]
mgm_os = ["mgm1"]
net1 = [("cro1","swi1"),("swi1","cro2"),("cro3","swi1")]
net2 = [("peo1","cro1")]
net3 = [("cro2","peo2")]
net4 = [("peo3","cro3")]
net5 = [("cer1","peo1")]
net6 = [("cer2","peo2")]
net7 = [("cer3","peo3")]
net8 = [("cro1","mgm1")]
print "Vertices Properties"
cr_prop = generator.getVerticesProperties(cr_os)
i = 0
for cr_property in cr_prop:
print "%s -> %s" %(cr_os[i], cr_property)
i = i + 1
i = 0
pe_prop = generator.getVerticesProperties(pe_os)
i = 0
for pe_property in pe_prop:
print "%s -> %s" %(pe_os[i], pe_property)
i = i + 1
i = 0
ct_prop = generator.getVerticesProperties(ctrls)
i = 0
for ct_property in ct_prop:
print "%s -> %s" %(ctrls[i], ct_property)
i = i + 1
i = 0
sw_prop = generator.getVerticesProperties(sws)
i = 0
for sw_property in sw_prop:
print "%s -> %s" %(sws[i], sw_property)
i = i + 1
i = 0
ce_prop = generator.getVerticesProperties(ce_os)
i = 0
for ce_property in ce_prop:
print "%s -> %s" %(ce_os[i], ce_property)
i = i + 1
i = 0
mgm_prop = generator.getVerticesProperties(mgm_os)
for mgm_property in mgm_prop:
print "%s -> %s" %(mgm_os[i], mgm_property)
i = i + 1
print "###################################################"
properties = generator.getLinksProperties(net1)
print "Net1 Properties"
i = 0
for l_property in properties:
print "%s -> %s" %(net1[i],l_property)
i = i + 1
properties = generator.getLinksProperties(net2)
print "Net2 Properties"
i = 0
for l_property in properties:
print "%s -> %s" %(net2[i],l_property)
i = i + 1
properties = generator.getLinksProperties(net3)
print "Net3 Properties"
i = 0
for l_property in properties:
print "%s -> %s" %(net3[i],l_property)
i = i + 1
properties = generator.getLinksProperties(net4)
print "Net4 Properties"
i = 0
for l_property in properties:
print "%s -> %s" %(net4[i],l_property)
i = i + 1
properties = generator.getLinksProperties(net5)
print "Net5 Properties"
i = 0
for l_property in properties:
print "%s -> %s" %(net5[i],l_property)
i = i + 1
properties = generator.getLinksProperties(net6)
print "Net6 Properties"
i = 0
for l_property in properties:
print "%s -> %s" %(net6[i],l_property)
i = i + 1
properties = generator.getLinksProperties(net7)
print "Net7 Properties"
i = 0
for l_property in properties:
print "%s -> %s" %(net7[i],l_property)
i = i + 1
properties = generator.getLinksProperties(net8)
print "Net8 Properties"
i = 0
for l_property in properties:
print "%s -> %s" %(net8[i],l_property)
i = i + 1
print "###################################################"
print "VLLs Properties"
vlls = [("cer1","cer2"), ("cer2","cer3"), ("cer3","cer1")]
for vll in vlls:
print "%s -> %s" %(vll, generator.getVLLsProperties(vll))
print "###################################################"
subprocess.call(["sudo", "mn", "-c"], stdout=None, stderr=None)
unmountAll()
if __name__ == '__main__':
test_properties_generator()
| apache-2.0 |
pnorman/mapnik | scons/scons-local-2.4.1/SCons/Warnings.py | 6 | 6921 | #
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""SCons.Warnings
This file implements the warnings framework for SCons.
"""
__revision__ = "src/engine/SCons/Warnings.py rel_2.4.1:3453:73fefd3ea0b0 2015/11/09 03:25:05 bdbaddog"
import sys
import SCons.Errors
class Warning(SCons.Errors.UserError):
pass
class WarningOnByDefault(Warning):
pass
# NOTE: If you add a new warning class, add it to the man page, too!
class TargetNotBuiltWarning(Warning): # Should go to OnByDefault
pass
class CacheWriteErrorWarning(Warning):
pass
class CorruptSConsignWarning(WarningOnByDefault):
pass
class DependencyWarning(Warning):
pass
class DevelopmentVersionWarning(WarningOnByDefault):
pass
class DuplicateEnvironmentWarning(WarningOnByDefault):
pass
class FutureReservedVariableWarning(WarningOnByDefault):
pass
class LinkWarning(WarningOnByDefault):
pass
class MisleadingKeywordsWarning(WarningOnByDefault):
pass
class MissingSConscriptWarning(WarningOnByDefault):
pass
class NoMD5ModuleWarning(WarningOnByDefault):
pass
class NoMetaclassSupportWarning(WarningOnByDefault):
pass
class NoObjectCountWarning(WarningOnByDefault):
pass
class NoParallelSupportWarning(WarningOnByDefault):
pass
class ReservedVariableWarning(WarningOnByDefault):
pass
class StackSizeWarning(WarningOnByDefault):
pass
class VisualCMissingWarning(WarningOnByDefault):
pass
# Used when MSVC_VERSION and MSVS_VERSION do not point to the
# same version (MSVS_VERSION is deprecated)
class VisualVersionMismatch(WarningOnByDefault):
pass
class VisualStudioMissingWarning(Warning):
pass
class FortranCxxMixWarning(LinkWarning):
pass
# Deprecation warnings
class FutureDeprecatedWarning(Warning):
pass
class DeprecatedWarning(Warning):
pass
class MandatoryDeprecatedWarning(DeprecatedWarning):
pass
# Special case; base always stays DeprecatedWarning
class PythonVersionWarning(DeprecatedWarning):
pass
class DeprecatedSourceCodeWarning(FutureDeprecatedWarning):
pass
class DeprecatedBuildDirWarning(DeprecatedWarning):
pass
class TaskmasterNeedsExecuteWarning(DeprecatedWarning):
pass
class DeprecatedCopyWarning(MandatoryDeprecatedWarning):
pass
class DeprecatedOptionsWarning(MandatoryDeprecatedWarning):
pass
class DeprecatedSourceSignaturesWarning(MandatoryDeprecatedWarning):
pass
class DeprecatedTargetSignaturesWarning(MandatoryDeprecatedWarning):
pass
class DeprecatedDebugOptionsWarning(MandatoryDeprecatedWarning):
pass
class DeprecatedSigModuleWarning(MandatoryDeprecatedWarning):
pass
class DeprecatedBuilderKeywordsWarning(MandatoryDeprecatedWarning):
pass
# The below is a list of 2-tuples. The first element is a class object.
# The second element is true if that class is enabled, false if it is disabled.
_enabled = []
# If set, raise the warning as an exception
_warningAsException = 0
# If not None, a function to call with the warning
_warningOut = None
def suppressWarningClass(clazz):
"""Suppresses all warnings that are of type clazz or
derived from clazz."""
_enabled.insert(0, (clazz, 0))
def enableWarningClass(clazz):
"""Enables all warnings that are of type clazz or
derived from clazz."""
_enabled.insert(0, (clazz, 1))
def warningAsException(flag=1):
"""Turn warnings into exceptions. Returns the old value of the flag."""
global _warningAsException
old = _warningAsException
_warningAsException = flag
return old
def warn(clazz, *args):
global _enabled, _warningAsException, _warningOut
warning = clazz(args)
for clazz, flag in _enabled:
if isinstance(warning, clazz):
if flag:
if _warningAsException:
raise warning
if _warningOut:
_warningOut(warning)
break
def process_warn_strings(arguments):
"""Process string specifications of enabling/disabling warnings,
as passed to the --warn option or the SetOption('warn') function.
An argument to this option should be of the form <warning-class>
or no-<warning-class>. The warning class is munged in order
to get an actual class name from the classes above, which we
need to pass to the {enable,disable}WarningClass() functions.
The supplied <warning-class> is split on hyphens, each element
is capitalized, then smushed back together. Then the string
"Warning" is appended to get the class name.
For example, 'deprecated' will enable the DeprecatedWarning
class. 'no-dependency' will disable the DependencyWarning class.
As a special case, --warn=all and --warn=no-all will enable or
disable (respectively) the base Warning class of all warnings.
"""
def _capitalize(s):
if s[:5] == "scons":
return "SCons" + s[5:]
else:
return s.capitalize()
for arg in arguments:
elems = arg.lower().split('-')
enable = 1
if elems[0] == 'no':
enable = 0
del elems[0]
if len(elems) == 1 and elems[0] == 'all':
class_name = "Warning"
else:
class_name = ''.join(map(_capitalize, elems)) + "Warning"
try:
clazz = globals()[class_name]
except KeyError:
sys.stderr.write("No warning type: '%s'\n" % arg)
else:
if enable:
enableWarningClass(clazz)
elif issubclass(clazz, MandatoryDeprecatedWarning):
fmt = "Can not disable mandataory warning: '%s'\n"
sys.stderr.write(fmt % arg)
else:
suppressWarningClass(clazz)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lgpl-2.1 |
roadmapper/ansible | lib/ansible/modules/cloud/vmware/vmware_dvswitch.py | 23 | 33516 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
# Copyright: (c) 2018, Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_dvswitch
short_description: Create or remove a Distributed Switch
description:
- This module can be used to create, remove a Distributed Switch.
version_added: 2.0
author:
- Joseph Callen (@jcpowermac)
- Abhijeet Kasurde (@Akasurde)
- Christian Kotte (@ckotte)
notes:
- Tested on vSphere 6.5 and 6.7
requirements:
- "python >= 2.6"
- PyVmomi
options:
datacenter_name:
description:
- The name of the datacenter that will contain the Distributed Switch.
- This parameter is optional, if C(folder) is provided.
- Mutually exclusive with C(folder) parameter.
required: False
aliases: ['datacenter']
type: str
switch_name:
description:
- The name of the distribute vSwitch to create or remove.
required: True
aliases: ['switch', 'dvswitch']
type: str
switch_version:
description:
- The version of the Distributed Switch to create.
- Can be 6.0.0, 5.5.0, 5.1.0, 5.0.0 with a vCenter running vSphere 6.0 and 6.5.
- Can be 6.6.0, 6.5.0, 6.0.0 with a vCenter running vSphere 6.7.
- The version must match the version of the ESXi hosts you want to connect.
- The version of the vCenter server is used if not specified.
- Required only if C(state) is set to C(present).
version_added: 2.5
choices: ['5.0.0', '5.1.0', '5.5.0', '6.0.0', '6.5.0', '6.6.0']
aliases: ['version']
type: str
mtu:
description:
- The switch maximum transmission unit.
- Required parameter for C(state) both C(present) and C(absent), before Ansible 2.6 version.
- Required only if C(state) is set to C(present), for Ansible 2.6 and onwards.
- Accepts value between 1280 to 9000 (both inclusive).
type: int
default: 1500
multicast_filtering_mode:
description:
- The multicast filtering mode.
- 'C(basic) mode: multicast traffic for virtual machines is forwarded according to the destination MAC address of the multicast group.'
- 'C(snooping) mode: the Distributed Switch provides IGMP and MLD snooping according to RFC 4541.'
type: str
choices: ['basic', 'snooping']
default: 'basic'
version_added: 2.8
uplink_quantity:
description:
- Quantity of uplink per ESXi host added to the Distributed Switch.
- The uplink quantity can be increased or decreased, but a decrease will only be successfull if the uplink isn't used by a portgroup.
- Required parameter for C(state) both C(present) and C(absent), before Ansible 2.6 version.
- Required only if C(state) is set to C(present), for Ansible 2.6 and onwards.
type: int
uplink_prefix:
description:
- The prefix used for the naming of the uplinks.
- Only valid if the Distributed Switch will be created. Not used if the Distributed Switch is already present.
- Uplinks are created as Uplink 1, Uplink 2, etc. pp. by default.
default: 'Uplink '
version_added: 2.8
type: str
discovery_proto:
description:
- Link discovery protocol between Cisco and Link Layer discovery.
- Required parameter for C(state) both C(present) and C(absent), before Ansible 2.6 version.
- Required only if C(state) is set to C(present), for Ansible 2.6 and onwards.
- 'C(cdp): Use Cisco Discovery Protocol (CDP).'
- 'C(lldp): Use Link Layer Discovery Protocol (LLDP).'
- 'C(disabled): Do not use a discovery protocol.'
choices: ['cdp', 'lldp', 'disabled']
default: 'cdp'
aliases: [ 'discovery_protocol' ]
type: str
discovery_operation:
description:
- Select the discovery operation.
- Required parameter for C(state) both C(present) and C(absent), before Ansible 2.6 version.
- Required only if C(state) is set to C(present), for Ansible 2.6 and onwards.
choices: ['both', 'advertise', 'listen']
default: 'listen'
type: str
contact:
description:
- Dictionary which configures administrator contact name and description for the Distributed Switch.
- 'Valid attributes are:'
- '- C(name) (str): Administrator name.'
- '- C(description) (str): Description or other details.'
type: dict
version_added: 2.8
description:
description:
- Description of the Distributed Switch.
type: str
version_added: 2.8
health_check:
description:
- Dictionary which configures Health Check for the Distributed Switch.
- 'Valid attributes are:'
- '- C(vlan_mtu) (bool): VLAN and MTU health check. (default: False)'
- '- C(teaming_failover) (bool): Teaming and failover health check. (default: False)'
- '- C(vlan_mtu_interval) (int): VLAN and MTU health check interval (minutes). (default: 0)'
- '- The default for C(vlan_mtu_interval) is 1 in the vSphere Client if the VLAN and MTU health check is enabled.'
- '- C(teaming_failover_interval) (int): Teaming and failover health check interval (minutes). (default: 0)'
- '- The default for C(teaming_failover_interval) is 1 in the vSphere Client if the Teaming and failover health check is enabled.'
type: dict
default: {
vlan_mtu: False,
teaming_failover: False,
vlan_mtu_interval: 0,
teaming_failover_interval: 0,
}
version_added: 2.8
state:
description:
- If set to C(present) and the Distributed Switch doesn't exists then the Distributed Switch will be created.
- If set to C(absent) and the Distributed Switch exists then the Distributed Switch will be deleted.
default: 'present'
choices: ['present', 'absent']
type: str
folder:
description:
- Destination folder, absolute path to place dvswitch in.
- The folder should include the datacenter.
- This parameter is case sensitive.
- This parameter is optional, if C(datacenter) is provided.
- 'Examples:'
- ' folder: /datacenter1/network'
- ' folder: datacenter1/network'
- ' folder: /datacenter1/network/folder1'
- ' folder: datacenter1/network/folder1'
- ' folder: /folder1/datacenter1/network'
- ' folder: folder1/datacenter1/network'
- ' folder: /folder1/datacenter1/network/folder2'
required: False
type: str
version_added: 2.9
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Create dvSwitch
vmware_dvswitch:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter: '{{ datacenter }}'
switch: dvSwitch
version: 6.0.0
mtu: 9000
uplink_quantity: 2
discovery_protocol: lldp
discovery_operation: both
state: present
delegate_to: localhost
- name: Create dvSwitch with all options
vmware_dvswitch:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter: '{{ datacenter }}'
switch: dvSwitch
version: 6.5.0
mtu: 9000
uplink_quantity: 2
uplink_prefix: 'Uplink_'
discovery_protocol: cdp
discovery_operation: both
multicast_filtering_mode: snooping
health_check:
vlan_mtu: true
vlan_mtu_interval: 1
teaming_failover: true
teaming_failover_interval: 1
state: present
delegate_to: localhost
- name: Delete dvSwitch
vmware_dvswitch:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter: '{{ datacenter }}'
switch: dvSwitch
state: absent
delegate_to: localhost
'''
RETURN = """
result:
description: information about performed operation
returned: always
type: str
sample: {
"changed": false,
"contact": null,
"contact_details": null,
"description": null,
"discovery_operation": "both",
"discovery_protocol": "cdp",
"dvswitch": "test",
"health_check_teaming": false,
"health_check_teaming_interval": 0,
"health_check_vlan": false,
"health_check_vlan_interval": 0,
"mtu": 9000,
"multicast_filtering_mode": "basic",
"result": "DVS already configured properly",
"uplink_quantity": 2,
"uplinks": [
"Uplink_1",
"Uplink_2"
],
"version": "6.6.0"
}
"""
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.vmware import (
PyVmomi, TaskError, find_dvs_by_name, vmware_argument_spec, wait_for_task
)
class VMwareDvSwitch(PyVmomi):
"""Class to manage a Distributed Virtual Switch"""
def __init__(self, module):
super(VMwareDvSwitch, self).__init__(module)
self.dvs = None
self.switch_name = self.module.params['switch_name']
self.switch_version = self.module.params['switch_version']
if self.content.about.version == '6.7.0':
self.vcenter_switch_version = '6.6.0'
else:
self.vcenter_switch_version = self.content.about.version
folder = self.params['folder']
if folder:
self.folder_obj = self.content.searchIndex.FindByInventoryPath(folder)
if not self.folder_obj:
self.module.fail_json(msg="Failed to find the folder specified by %(folder)s" % self.params)
else:
datacenter_name = self.params.get('datacenter_name')
datacenter_obj = self.find_datacenter_by_name(datacenter_name)
if not datacenter_obj:
self.module.fail_json(msg="Failed to find datacenter '%s' required"
" for managing distributed vSwitch." % datacenter_name)
self.folder_obj = datacenter_obj.networkFolder
self.mtu = self.module.params['mtu']
# MTU sanity check
if not 1280 <= self.mtu <= 9000:
self.module.fail_json(
msg="MTU value should be between 1280 and 9000 (both inclusive), provided %d." % self.mtu
)
self.multicast_filtering_mode = self.module.params['multicast_filtering_mode']
self.uplink_quantity = self.module.params['uplink_quantity']
self.uplink_prefix = self.module.params['uplink_prefix']
self.discovery_protocol = self.module.params['discovery_proto']
self.discovery_operation = self.module.params['discovery_operation']
# TODO: add port mirroring
self.health_check_vlan = self.params['health_check'].get('vlan_mtu')
self.health_check_vlan_interval = self.params['health_check'].get('vlan_mtu_interval')
self.health_check_teaming = self.params['health_check'].get('teaming_failover')
self.health_check_teaming_interval = self.params['health_check'].get('teaming_failover_interval')
if self.params['contact']:
self.contact_name = self.params['contact'].get('name')
self.contact_details = self.params['contact'].get('details')
else:
self.contact_name = None
self.contact_details = None
self.description = self.module.params['description']
self.state = self.module.params['state']
def process_state(self):
"""Process the current state of the DVS"""
dvs_states = {
'absent': {
'present': self.destroy_dvswitch,
'absent': self.exit_unchanged,
},
'present': {
'present': self.update_dvswitch,
'absent': self.create_dvswitch,
}
}
try:
dvs_states[self.state][self.check_dvs()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=to_native(runtime_fault.msg))
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=to_native(method_fault.msg))
except Exception as e:
self.module.fail_json(msg=to_native(e))
def check_dvs(self):
"""Check if DVS is present"""
self.dvs = find_dvs_by_name(self.content, self.switch_name, folder=self.folder_obj)
if self.dvs is None:
return 'absent'
return 'present'
def create_dvswitch(self):
"""Create a DVS"""
changed = True
results = dict(changed=changed)
spec = vim.DistributedVirtualSwitch.CreateSpec()
spec.configSpec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
# Name
results['dvswitch'] = self.switch_name
spec.configSpec.name = self.switch_name
# MTU
results['mtu'] = self.mtu
spec.configSpec.maxMtu = self.mtu
# Discovery Protocol type and operation
results['discovery_protocol'] = self.discovery_protocol
results['discovery_operation'] = self.discovery_operation
spec.configSpec.linkDiscoveryProtocolConfig = self.create_ldp_spec()
# Administrator contact
results['contact'] = self.contact_name
results['contact_details'] = self.contact_details
if self.contact_name or self.contact_details:
spec.contact = self.create_contact_spec()
# Description
results['description'] = self.description
if self.description:
spec.description = self.description
# Uplinks
results['uplink_quantity'] = self.uplink_quantity
spec.configSpec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy()
for count in range(1, self.uplink_quantity + 1):
spec.configSpec.uplinkPortPolicy.uplinkPortName.append("%s%d" % (self.uplink_prefix, count))
results['uplinks'] = spec.configSpec.uplinkPortPolicy.uplinkPortName
# Version
results['version'] = self.switch_version
if self.switch_version:
spec.productInfo = self.create_product_spec(self.switch_version)
if self.module.check_mode:
result = "DVS would be created"
else:
# Create DVS
network_folder = self.folder_obj
task = network_folder.CreateDVS_Task(spec)
try:
wait_for_task(task)
except TaskError as invalid_argument:
self.module.fail_json(
msg="Failed to create DVS : %s" % to_native(invalid_argument)
)
# Find new DVS
self.dvs = find_dvs_by_name(self.content, self.switch_name)
changed_multicast = False
spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
# Use the same version in the new spec; The version will be increased by one by the API automatically
spec.configVersion = self.dvs.config.configVersion
# Set multicast filtering mode
results['multicast_filtering_mode'] = self.multicast_filtering_mode
multicast_filtering_mode = self.get_api_mc_filtering_mode(self.multicast_filtering_mode)
if self.dvs.config.multicastFilteringMode != multicast_filtering_mode:
changed_multicast = True
spec.multicastFilteringMode = multicast_filtering_mode
spec.multicastFilteringMode = self.get_api_mc_filtering_mode(self.multicast_filtering_mode)
if changed_multicast:
self.update_dvs_config(self.dvs, spec)
# Set Health Check config
results['health_check_vlan'] = self.health_check_vlan
results['health_check_teaming'] = self.health_check_teaming
result = self.check_health_check_config(self.dvs.config.healthCheckConfig)
changed_health_check = result[1]
if changed_health_check:
self.update_health_check_config(self.dvs, result[0])
result = "DVS created"
self.module.exit_json(changed=changed, result=to_native(result))
def create_ldp_spec(self):
"""Create Link Discovery Protocol config spec"""
ldp_config_spec = vim.host.LinkDiscoveryProtocolConfig()
if self.discovery_protocol == 'disabled':
ldp_config_spec.protocol = 'cdp'
ldp_config_spec.operation = 'none'
else:
ldp_config_spec.protocol = self.discovery_protocol
ldp_config_spec.operation = self.discovery_operation
return ldp_config_spec
def create_product_spec(self, switch_version):
"""Create product info spec"""
product_info_spec = vim.dvs.ProductSpec()
product_info_spec.version = switch_version
return product_info_spec
@staticmethod
def get_api_mc_filtering_mode(mode):
"""Get Multicast filtering mode"""
if mode == 'basic':
return 'legacyFiltering'
return 'snooping'
def create_contact_spec(self):
"""Create contact info spec"""
contact_info_spec = vim.DistributedVirtualSwitch.ContactInfo()
contact_info_spec.name = self.contact_name
contact_info_spec.contact = self.contact_details
return contact_info_spec
def update_dvs_config(self, switch_object, spec):
"""Update DVS config"""
try:
task = switch_object.ReconfigureDvs_Task(spec)
wait_for_task(task)
except TaskError as invalid_argument:
self.module.fail_json(
msg="Failed to update DVS : %s" % to_native(invalid_argument)
)
def check_health_check_config(self, health_check_config):
"""Check Health Check config"""
changed = changed_vlan = changed_vlan_interval = changed_teaming = changed_teaming_interval = False
vlan_previous = teaming_previous = None
vlan_interval_previous = teaming_interval_previous = 0
for config in health_check_config:
if isinstance(config, vim.dvs.VmwareDistributedVirtualSwitch.VlanMtuHealthCheckConfig):
if config.enable != self.health_check_vlan:
changed = changed_vlan = True
vlan_previous = config.enable
config.enable = self.health_check_vlan
if config.enable and config.interval != self.health_check_vlan_interval:
changed = changed_vlan_interval = True
vlan_interval_previous = config.interval
config.interval = self.health_check_vlan_interval
if isinstance(config, vim.dvs.VmwareDistributedVirtualSwitch.TeamingHealthCheckConfig):
if config.enable != self.health_check_teaming:
changed = changed_teaming = True
teaming_previous = config.enable
config.enable = self.health_check_teaming
if config.enable and config.interval != self.health_check_teaming_interval:
changed = changed_teaming_interval = True
teaming_interval_previous = config.interval
config.interval = self.health_check_teaming_interval
return (health_check_config, changed, changed_vlan, vlan_previous, changed_vlan_interval, vlan_interval_previous,
changed_teaming, teaming_previous, changed_teaming_interval, teaming_interval_previous)
def update_health_check_config(self, switch_object, health_check_config):
"""Update Health Check config"""
try:
task = switch_object.UpdateDVSHealthCheckConfig_Task(healthCheckConfig=health_check_config)
except vim.fault.DvsFault as dvs_fault:
self.module.fail_json(msg="Update failed due to DVS fault : %s" % to_native(dvs_fault))
except vmodl.fault.NotSupported as not_supported:
self.module.fail_json(msg="Health check not supported on the switch : %s" % to_native(not_supported))
except TaskError as invalid_argument:
self.module.fail_json(msg="Failed to configure health check : %s" % to_native(invalid_argument))
try:
wait_for_task(task)
except TaskError as invalid_argument:
self.module.fail_json(msg="Failed to update health check config : %s" % to_native(invalid_argument))
def exit_unchanged(self):
"""Exit with status message"""
changed = False
results = dict(changed=changed)
results['dvswitch'] = self.switch_name
results['result'] = "DVS not present"
self.module.exit_json(**results)
def destroy_dvswitch(self):
"""Delete a DVS"""
changed = True
results = dict(changed=changed)
results['dvswitch'] = self.switch_name
if self.module.check_mode:
results['result'] = "DVS would be deleted"
else:
try:
task = self.dvs.Destroy_Task()
except vim.fault.VimFault as vim_fault:
self.module.fail_json(msg="Failed to deleted DVS : %s" % to_native(vim_fault))
wait_for_task(task)
results['result'] = "DVS deleted"
self.module.exit_json(**results)
def update_dvswitch(self):
"""Check and update DVS settings"""
changed = changed_settings = changed_ldp = changed_version = changed_health_check = False
results = dict(changed=changed)
results['dvswitch'] = self.switch_name
changed_list = []
config_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
# Use the same version in the new spec; The version will be increased by one by the API automatically
config_spec.configVersion = self.dvs.config.configVersion
# Check MTU
results['mtu'] = self.mtu
if self.dvs.config.maxMtu != self.mtu:
changed = changed_settings = True
changed_list.append("mtu")
results['mtu_previous'] = config_spec.maxMtu
config_spec.maxMtu = self.mtu
# Check Discovery Protocol type and operation
ldp_protocol = self.dvs.config.linkDiscoveryProtocolConfig.protocol
ldp_operation = self.dvs.config.linkDiscoveryProtocolConfig.operation
if self.discovery_protocol == 'disabled':
results['discovery_protocol'] = self.discovery_protocol
results['discovery_operation'] = 'n/a'
if ldp_protocol != 'cdp' or ldp_operation != 'none':
changed_ldp = True
results['discovery_protocol_previous'] = ldp_protocol
results['discovery_operation_previous'] = ldp_operation
else:
results['discovery_protocol'] = self.discovery_protocol
results['discovery_operation'] = self.discovery_operation
if ldp_protocol != self.discovery_protocol or ldp_operation != self.discovery_operation:
changed_ldp = True
if ldp_protocol != self.discovery_protocol:
results['discovery_protocol_previous'] = ldp_protocol
if ldp_operation != self.discovery_operation:
results['discovery_operation_previous'] = ldp_operation
if changed_ldp:
changed = changed_settings = True
changed_list.append("discovery protocol")
config_spec.linkDiscoveryProtocolConfig = self.create_ldp_spec()
# Check Multicast filtering mode
results['multicast_filtering_mode'] = self.multicast_filtering_mode
multicast_filtering_mode = self.get_api_mc_filtering_mode(self.multicast_filtering_mode)
if self.dvs.config.multicastFilteringMode != multicast_filtering_mode:
changed = changed_settings = True
changed_list.append("multicast filtering")
results['multicast_filtering_mode_previous'] = self.dvs.config.multicastFilteringMode
config_spec.multicastFilteringMode = multicast_filtering_mode
# Check administrator contact
results['contact'] = self.contact_name
results['contact_details'] = self.contact_details
if self.dvs.config.contact.name != self.contact_name or self.dvs.config.contact.contact != self.contact_details:
changed = changed_settings = True
changed_list.append("contact")
results['contact_previous'] = self.dvs.config.contact.name
results['contact_details_previous'] = self.dvs.config.contact.contact
config_spec.contact = self.create_contact_spec()
# Check description
results['description'] = self.description
if self.dvs.config.description != self.description:
changed = changed_settings = True
changed_list.append("description")
results['description_previous'] = self.dvs.config.description
if self.description is None:
# need to use empty string; will be set to None by API
config_spec.description = ''
else:
config_spec.description = self.description
# Check uplinks
results['uplink_quantity'] = self.uplink_quantity
if len(self.dvs.config.uplinkPortPolicy.uplinkPortName) != self.uplink_quantity:
changed = changed_settings = True
changed_list.append("uplink quantity")
results['uplink_quantity_previous'] = len(self.dvs.config.uplinkPortPolicy.uplinkPortName)
config_spec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy()
# just replace the uplink array if uplinks need to be added
if len(self.dvs.config.uplinkPortPolicy.uplinkPortName) < self.uplink_quantity:
for count in range(1, self.uplink_quantity + 1):
config_spec.uplinkPortPolicy.uplinkPortName.append("%s%d" % (self.uplink_prefix, count))
# just replace the uplink array if uplinks need to be removed
if len(self.dvs.config.uplinkPortPolicy.uplinkPortName) > self.uplink_quantity:
for count in range(1, self.uplink_quantity + 1):
config_spec.uplinkPortPolicy.uplinkPortName.append("%s%d" % (self.uplink_prefix, count))
results['uplinks'] = config_spec.uplinkPortPolicy.uplinkPortName
results['uplinks_previous'] = self.dvs.config.uplinkPortPolicy.uplinkPortName
else:
# No uplink name check; uplink names can't be changed easily if they are used by a portgroup
results['uplinks'] = self.dvs.config.uplinkPortPolicy.uplinkPortName
# Check Health Check
results['health_check_vlan'] = self.health_check_vlan
results['health_check_teaming'] = self.health_check_teaming
results['health_check_vlan_interval'] = self.health_check_vlan_interval
results['health_check_teaming_interval'] = self.health_check_teaming_interval
(health_check_config, changed_health_check, changed_vlan, vlan_previous,
changed_vlan_interval, vlan_interval_previous, changed_teaming, teaming_previous,
changed_teaming_interval, teaming_interval_previous) = \
self.check_health_check_config(self.dvs.config.healthCheckConfig)
if changed_health_check:
changed = True
changed_list.append("health check")
if changed_vlan:
results['health_check_vlan_previous'] = vlan_previous
if changed_vlan_interval:
results['health_check_vlan_interval_previous'] = vlan_interval_previous
if changed_teaming:
results['health_check_teaming_previous'] = teaming_previous
if changed_teaming_interval:
results['health_check_teaming_interval_previous'] = teaming_interval_previous
# Check switch version
if self.switch_version:
results['version'] = self.switch_version
if self.dvs.config.productInfo.version != self.switch_version:
changed_version = True
spec_product = self.create_product_spec(self.switch_version)
else:
results['version'] = self.vcenter_switch_version
if self.dvs.config.productInfo.version != self.vcenter_switch_version:
changed_version = True
spec_product = self.create_product_spec(self.vcenter_switch_version)
if changed_version:
changed = True
changed_list.append("switch version")
results['version_previous'] = self.dvs.config.productInfo.version
if changed:
if self.module.check_mode:
changed_suffix = ' would be changed'
else:
changed_suffix = ' changed'
if len(changed_list) > 2:
message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1])
elif len(changed_list) == 2:
message = ' and '.join(changed_list)
elif len(changed_list) == 1:
message = changed_list[0]
message += changed_suffix
if not self.module.check_mode:
if changed_settings:
self.update_dvs_config(self.dvs, config_spec)
if changed_health_check:
self.update_health_check_config(self.dvs, health_check_config)
if changed_version:
task = self.dvs.PerformDvsProductSpecOperation_Task("upgrade", spec_product)
try:
wait_for_task(task)
except TaskError as invalid_argument:
self.module.fail_json(msg="Failed to update DVS version : %s" % to_native(invalid_argument))
else:
message = "DVS already configured properly"
results['changed'] = changed
results['result'] = message
self.module.exit_json(**results)
def main():
"""Main"""
argument_spec = vmware_argument_spec()
argument_spec.update(
dict(
datacenter_name=dict(aliases=['datacenter']),
folder=dict(),
switch_name=dict(required=True, aliases=['switch', 'dvswitch']),
mtu=dict(type='int', default=1500),
multicast_filtering_mode=dict(type='str', default='basic', choices=['basic', 'snooping']),
switch_version=dict(
choices=['5.0.0', '5.1.0', '5.5.0', '6.0.0', '6.5.0', '6.6.0'],
aliases=['version'],
default=None
),
uplink_quantity=dict(type='int'),
uplink_prefix=dict(type='str', default='Uplink '),
discovery_proto=dict(
type='str', choices=['cdp', 'lldp', 'disabled'], default='cdp', aliases=['discovery_protocol']
),
discovery_operation=dict(type='str', choices=['both', 'advertise', 'listen'], default='listen'),
health_check=dict(
type='dict',
options=dict(
vlan_mtu=dict(type='bool', default=False),
teaming_failover=dict(type='bool', default=False),
vlan_mtu_interval=dict(type='int', default=0),
teaming_failover_interval=dict(type='int', default=0),
),
default=dict(
vlan_mtu=False,
teaming_failover=False,
vlan_mtu_interval=0,
teaming_failover_interval=0,
),
),
contact=dict(
type='dict',
options=dict(
name=dict(type='str'),
description=dict(type='str'),
),
),
description=dict(type='str'),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_if=[
('state', 'present',
['uplink_quantity']),
],
required_one_of=[
['folder', 'datacenter_name'],
],
mutually_exclusive=[
['folder', 'datacenter_name'],
],
supports_check_mode=True,
)
vmware_dvswitch = VMwareDvSwitch(module)
vmware_dvswitch.process_state()
if __name__ == '__main__':
main()
| gpl-3.0 |
habibmasuro/bitex | libs/pusherclient/connection.py | 9 | 7522 | from threading import Thread, Timer
import websocket
import logging
import time
try:
import simplejson as json
except:
import json
class Connection(Thread):
def __init__(self, event_handler, url, log_level=logging.INFO):
self.event_handler = event_handler
self.url = url
self.socket = None
self.socket_id = ""
self.event_callbacks = {}
self.needs_reconnect = False
self.reconnect_interval = 10
self.pong_timer = None
self.pong_received = False
self.pong_timeout = 30
self.bind("pusher:connection_established", self._connect_handler)
self.bind("pusher:connection_failed", self._failed_handler)
self.bind("pusher:pong", self._pong_handler)
self.bind("pusher:ping", self._ping_handler)
self.bind("pusher:error", self._pusher_error_handler)
self.state = "initialized"
self.logger = logging.getLogger(self.__module__) # create a new logger
self.logger.addHandler(logging.StreamHandler())
if log_level == logging.DEBUG:
websocket.enableTrace(True)
self.logger.setLevel(log_level)
# From Martyn's comment at:
# https://pusher.tenderapp.com/discussions/problems/36-no-messages-received-after-1-idle-minute-heartbeat
# "We send a ping every 5 minutes in an attempt to keep connections
# alive..."
# This is why we set the connection timeout to 5 minutes, since we can
# expect a pusher heartbeat message every 5 minutes. Adding 5 sec to
# account for small timing delays which may cause messages to not be
# received in exact 5 minute intervals.
self.connection_timeout = 305
self.connection_timer = None
self.ping_interval = 120
self.ping_timer = None
Thread.__init__(self)
def bind(self, event_name, callback):
"""Bind an event to a callback
:param event_name: The name of the event to bind to.
:type event_name: str
:param callback: The callback to notify of this event.
"""
if event_name not in self.event_callbacks.keys():
self.event_callbacks[event_name] = []
self.event_callbacks[event_name].append(callback)
def disconnect(self):
self.needs_reconnect = False
if self.socket:
self.socket.close()
def reconnect(self, reconnect_interval=10):
self.logger.info("Connection: Reconnect in %s" % reconnect_interval)
self.reconnect_interval = reconnect_interval
self.needs_reconnect = True
if self.socket:
self.socket.close()
def run(self):
self._connect()
def _connect(self):
self.state = "connecting"
self.socket = websocket.WebSocketApp(
self.url,
on_open=self._on_open,
on_message=self._on_message,
on_error=self._on_error,
on_close=self._on_close
)
self.socket.run_forever()
while self.needs_reconnect:
self.logger.info("Attempting to connect again in %s seconds." % self.reconnect_interval)
self.state = "unavailable"
time.sleep(self.reconnect_interval)
# We need to set this flag since closing the socket will set it to
# false
self.socket.keep_running = True
self.socket.run_forever()
def _on_open(self, ws):
self.logger.info("Connection: Connection opened")
self._start_timers()
def _on_error(self, ws, error):
self.logger.info("Connection: Error - %s" % error)
self.state = "failed"
self.needs_reconnect = True
def _on_message(self, ws, message):
self.logger.info("Connection: Message - %s" % message)
# Stop our timeout timer, since we got some data
self._stop_timers()
params = self._parse(message)
if 'event' in params.keys():
if 'channel' not in params.keys():
# We've got a connection event. Lets handle it.
if params['event'] in self.event_callbacks.keys():
for callback in self.event_callbacks[params['event']]:
callback(params['data'])
else:
self.logger.info("Connection: Unhandled event")
else:
# We've got a channel event. Lets pass it up to the pusher
# so it can be handled by the appropriate channel.
self.event_handler(
params['event'],
params['data'],
params['channel']
)
# We've handled our data, so restart our connection timeout handler
self._start_timers()
def _on_close(self, ws, *args):
self.logger.info("Connection: Connection closed")
self.state = "disconnected"
self._stop_timers()
@staticmethod
def _parse(message):
return json.loads(message)
def _stop_timers(self):
if self.ping_timer:
self.ping_timer.cancel()
if self.connection_timer:
self.connection_timer.cancel()
def _start_timers(self):
self._stop_timers()
self.ping_timer = Timer(self.ping_interval, self.send_ping)
self.ping_timer.start()
self.connection_timer = Timer(self.connection_timeout, self._connection_timed_out)
self.connection_timer.start()
def send_event(self, event_name, data, channel_name=None):
event = {'event': event_name, 'data': data}
if channel_name:
event['channel'] = channel_name
self.logger.info("Connection: Sending event - %s" % event)
self.socket.send(json.dumps(event))
def send_ping(self):
self.logger.info("Connection: ping to pusher")
self.socket.send(json.dumps({'event': 'pusher:ping', 'data': ''}))
self.pong_timer = Timer(self.pong_timeout, self._check_pong)
self.pong_timer.start()
def send_pong(self):
self.logger.info("Connection: pong to pusher")
self.socket.send(json.dumps({'event': 'pusher:pong', 'data': ''}))
def _check_pong(self):
self.pong_timer.cancel()
if self.pong_received:
self.pong_received = False
else:
self.logger.info("Did not receive pong in time. Will attempt to reconnect.")
self.state = "failed"
self.reconnect()
def _connect_handler(self, data):
parsed = json.loads(data)
self.socket_id = parsed['socket_id']
self.state = "connected"
def _failed_handler(self, data):
parsed = json.loads(data)
self.state = "failed"
def _ping_handler(self, data):
self.send_pong()
# Restart our timers since we received something on the connection
self._start_timers()
def _pong_handler(self, data):
# self. logger.info("Connection: pong from pusher")
self.pong_received = True
def _pusher_error_handler(self, data):
if 'code' in data:
error_code = None
try:
error_code = int(data['code'])
except:
pass
if error_code is not None:
self.logger.error("Connection: Received error %s" % error_code)
if (error_code >= 4000) and (error_code <= 4099):
# The connection SHOULD NOT be re-established unchanged
self.logger.info("Connection: Error is unrecoverable. Disconnecting")
self.disconnect()
elif (error_code >= 4100) and (error_code <= 4199):
# The connection SHOULD be re-established after backing off
self.reconnect()
elif (error_code >= 4200) and (error_code <= 4299):
# The connection SHOULD be re-established immediately
self.reconnect(0)
else:
pass
else:
self.logger.error("Connection: Unknown error code")
else:
self.logger.error("Connection: No error code supplied")
def _connection_timed_out(self):
self.logger.info("Did not receive any data in time. Reconnecting.")
self.state = "failed"
self.reconnect() | gpl-3.0 |
tylerjereddy/scipy | scipy/optimize/_trustregion_exact.py | 12 | 15401 | """Nearly exact trust-region optimization subproblem."""
import numpy as np
from scipy.linalg import (norm, get_lapack_funcs, solve_triangular,
cho_solve)
from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem)
__all__ = ['_minimize_trustregion_exact',
'estimate_smallest_singular_value',
'singular_leading_submatrix',
'IterativeSubproblem']
def _minimize_trustregion_exact(fun, x0, args=(), jac=None, hess=None,
**trust_region_options):
"""
Minimization of scalar function of one or more variables using
a nearly exact trust-region algorithm.
Options
-------
initial_tr_radius : float
Initial trust-region radius.
max_tr_radius : float
Maximum value of the trust-region radius. No steps that are longer
than this value will be proposed.
eta : float
Trust region related acceptance stringency for proposed steps.
gtol : float
Gradient norm must be less than ``gtol`` before successful
termination.
"""
if jac is None:
raise ValueError('Jacobian is required for trust region '
'exact minimization.')
if hess is None:
raise ValueError('Hessian matrix is required for trust region '
'exact minimization.')
return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess,
subproblem=IterativeSubproblem,
**trust_region_options)
def estimate_smallest_singular_value(U):
"""Given upper triangular matrix ``U`` estimate the smallest singular
value and the correspondent right singular vector in O(n**2) operations.
Parameters
----------
U : ndarray
Square upper triangular matrix.
Returns
-------
s_min : float
Estimated smallest singular value of the provided matrix.
z_min : ndarray
Estimatied right singular vector.
Notes
-----
The procedure is based on [1]_ and is done in two steps. First, it finds
a vector ``e`` with components selected from {+1, -1} such that the
solution ``w`` from the system ``U.T w = e`` is as large as possible.
Next it estimate ``U v = w``. The smallest singular value is close
to ``norm(w)/norm(v)`` and the right singular vector is close
to ``v/norm(v)``.
The estimation will be better more ill-conditioned is the matrix.
References
----------
.. [1] Cline, A. K., Moler, C. B., Stewart, G. W., Wilkinson, J. H.
An estimate for the condition number of a matrix. 1979.
SIAM Journal on Numerical Analysis, 16(2), 368-375.
"""
U = np.atleast_2d(U)
m, n = U.shape
if m != n:
raise ValueError("A square triangular matrix should be provided.")
# A vector `e` with components selected from {+1, -1}
# is selected so that the solution `w` to the system
# `U.T w = e` is as large as possible. Implementation
# based on algorithm 3.5.1, p. 142, from reference [2]
# adapted for lower triangular matrix.
p = np.zeros(n)
w = np.empty(n)
# Implemented according to: Golub, G. H., Van Loan, C. F. (2013).
# "Matrix computations". Forth Edition. JHU press. pp. 140-142.
for k in range(n):
wp = (1-p[k]) / U.T[k, k]
wm = (-1-p[k]) / U.T[k, k]
pp = p[k+1:] + U.T[k+1:, k]*wp
pm = p[k+1:] + U.T[k+1:, k]*wm
if abs(wp) + norm(pp, 1) >= abs(wm) + norm(pm, 1):
w[k] = wp
p[k+1:] = pp
else:
w[k] = wm
p[k+1:] = pm
# The system `U v = w` is solved using backward substitution.
v = solve_triangular(U, w)
v_norm = norm(v)
w_norm = norm(w)
# Smallest singular value
s_min = w_norm / v_norm
# Associated vector
z_min = v / v_norm
return s_min, z_min
def gershgorin_bounds(H):
"""
Given a square matrix ``H`` compute upper
and lower bounds for its eigenvalues (Gregoshgorin Bounds).
Defined ref. [1].
References
----------
.. [1] Conn, A. R., Gould, N. I., & Toint, P. L.
Trust region methods. 2000. Siam. pp. 19.
"""
H_diag = np.diag(H)
H_diag_abs = np.abs(H_diag)
H_row_sums = np.sum(np.abs(H), axis=1)
lb = np.min(H_diag + H_diag_abs - H_row_sums)
ub = np.max(H_diag - H_diag_abs + H_row_sums)
return lb, ub
def singular_leading_submatrix(A, U, k):
"""
Compute term that makes the leading ``k`` by ``k``
submatrix from ``A`` singular.
Parameters
----------
A : ndarray
Symmetric matrix that is not positive definite.
U : ndarray
Upper triangular matrix resulting of an incomplete
Cholesky decomposition of matrix ``A``.
k : int
Positive integer such that the leading k by k submatrix from
`A` is the first non-positive definite leading submatrix.
Returns
-------
delta : float
Amount that should be added to the element (k, k) of the
leading k by k submatrix of ``A`` to make it singular.
v : ndarray
A vector such that ``v.T B v = 0``. Where B is the matrix A after
``delta`` is added to its element (k, k).
"""
# Compute delta
delta = np.sum(U[:k-1, k-1]**2) - A[k-1, k-1]
n = len(A)
# Inicialize v
v = np.zeros(n)
v[k-1] = 1
# Compute the remaining values of v by solving a triangular system.
if k != 1:
v[:k-1] = solve_triangular(U[:k-1, :k-1], -U[:k-1, k-1])
return delta, v
class IterativeSubproblem(BaseQuadraticSubproblem):
"""Quadratic subproblem solved by nearly exact iterative method.
Notes
-----
This subproblem solver was based on [1]_, [2]_ and [3]_,
which implement similar algorithms. The algorithm is basically
that of [1]_ but ideas from [2]_ and [3]_ were also used.
References
----------
.. [1] A.R. Conn, N.I. Gould, and P.L. Toint, "Trust region methods",
Siam, pp. 169-200, 2000.
.. [2] J. Nocedal and S. Wright, "Numerical optimization",
Springer Science & Business Media. pp. 83-91, 2006.
.. [3] J.J. More and D.C. Sorensen, "Computing a trust region step",
SIAM Journal on Scientific and Statistical Computing, vol. 4(3),
pp. 553-572, 1983.
"""
# UPDATE_COEFF appears in reference [1]_
# in formula 7.3.14 (p. 190) named as "theta".
# As recommended there it value is fixed in 0.01.
UPDATE_COEFF = 0.01
EPS = np.finfo(float).eps
def __init__(self, x, fun, jac, hess, hessp=None,
k_easy=0.1, k_hard=0.2):
super().__init__(x, fun, jac, hess)
# When the trust-region shrinks in two consecutive
# calculations (``tr_radius < previous_tr_radius``)
# the lower bound ``lambda_lb`` may be reused,
# facilitating the convergence. To indicate no
# previous value is known at first ``previous_tr_radius``
# is set to -1 and ``lambda_lb`` to None.
self.previous_tr_radius = -1
self.lambda_lb = None
self.niter = 0
# ``k_easy`` and ``k_hard`` are parameters used
# to determine the stop criteria to the iterative
# subproblem solver. Take a look at pp. 194-197
# from reference _[1] for a more detailed description.
self.k_easy = k_easy
self.k_hard = k_hard
# Get Lapack function for cholesky decomposition.
# The implemented SciPy wrapper does not return
# the incomplete factorization needed by the method.
self.cholesky, = get_lapack_funcs(('potrf',), (self.hess,))
# Get info about Hessian
self.dimension = len(self.hess)
self.hess_gershgorin_lb,\
self.hess_gershgorin_ub = gershgorin_bounds(self.hess)
self.hess_inf = norm(self.hess, np.Inf)
self.hess_fro = norm(self.hess, 'fro')
# A constant such that for vectors smaler than that
# backward substituition is not reliable. It was stabilished
# based on Golub, G. H., Van Loan, C. F. (2013).
# "Matrix computations". Forth Edition. JHU press., p.165.
self.CLOSE_TO_ZERO = self.dimension * self.EPS * self.hess_inf
def _initial_values(self, tr_radius):
"""Given a trust radius, return a good initial guess for
the damping factor, the lower bound and the upper bound.
The values were chosen accordingly to the guidelines on
section 7.3.8 (p. 192) from [1]_.
"""
# Upper bound for the damping factor
lambda_ub = max(0, self.jac_mag/tr_radius + min(-self.hess_gershgorin_lb,
self.hess_fro,
self.hess_inf))
# Lower bound for the damping factor
lambda_lb = max(0, -min(self.hess.diagonal()),
self.jac_mag/tr_radius - min(self.hess_gershgorin_ub,
self.hess_fro,
self.hess_inf))
# Improve bounds with previous info
if tr_radius < self.previous_tr_radius:
lambda_lb = max(self.lambda_lb, lambda_lb)
# Initial guess for the damping factor
if lambda_lb == 0:
lambda_initial = 0
else:
lambda_initial = max(np.sqrt(lambda_lb * lambda_ub),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
return lambda_initial, lambda_lb, lambda_ub
def solve(self, tr_radius):
"""Solve quadratic subproblem"""
lambda_current, lambda_lb, lambda_ub = self._initial_values(tr_radius)
n = self.dimension
hits_boundary = True
already_factorized = False
self.niter = 0
while True:
# Compute Cholesky factorization
if already_factorized:
already_factorized = False
else:
H = self.hess+lambda_current*np.eye(n)
U, info = self.cholesky(H, lower=False,
overwrite_a=False,
clean=True)
self.niter += 1
# Check if factorization succeeded
if info == 0 and self.jac_mag > self.CLOSE_TO_ZERO:
# Successful factorization
# Solve `U.T U p = s`
p = cho_solve((U, False), -self.jac)
p_norm = norm(p)
# Check for interior convergence
if p_norm <= tr_radius and lambda_current == 0:
hits_boundary = False
break
# Solve `U.T w = p`
w = solve_triangular(U, p, trans='T')
w_norm = norm(w)
# Compute Newton step accordingly to
# formula (4.44) p.87 from ref [2]_.
delta_lambda = (p_norm/w_norm)**2 * (p_norm-tr_radius)/tr_radius
lambda_new = lambda_current + delta_lambda
if p_norm < tr_radius: # Inside boundary
s_min, z_min = estimate_smallest_singular_value(U)
ta, tb = self.get_boundaries_intersections(p, z_min,
tr_radius)
# Choose `step_len` with the smallest magnitude.
# The reason for this choice is explained at
# ref [3]_, p. 6 (Immediately before the formula
# for `tau`).
step_len = min([ta, tb], key=abs)
# Compute the quadratic term (p.T*H*p)
quadratic_term = np.dot(p, np.dot(H, p))
# Check stop criteria
relative_error = (step_len**2 * s_min**2) / (quadratic_term + lambda_current*tr_radius**2)
if relative_error <= self.k_hard:
p += step_len * z_min
break
# Update uncertanty bounds
lambda_ub = lambda_current
lambda_lb = max(lambda_lb, lambda_current - s_min**2)
# Compute Cholesky factorization
H = self.hess + lambda_new*np.eye(n)
c, info = self.cholesky(H, lower=False,
overwrite_a=False,
clean=True)
# Check if the factorization have succeeded
#
if info == 0: # Successful factorization
# Update damping factor
lambda_current = lambda_new
already_factorized = True
else: # Unsuccessful factorization
# Update uncertanty bounds
lambda_lb = max(lambda_lb, lambda_new)
# Update damping factor
lambda_current = max(np.sqrt(lambda_lb * lambda_ub),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
else: # Outside boundary
# Check stop criteria
relative_error = abs(p_norm - tr_radius) / tr_radius
if relative_error <= self.k_easy:
break
# Update uncertanty bounds
lambda_lb = lambda_current
# Update damping factor
lambda_current = lambda_new
elif info == 0 and self.jac_mag <= self.CLOSE_TO_ZERO:
# jac_mag very close to zero
# Check for interior convergence
if lambda_current == 0:
p = np.zeros(n)
hits_boundary = False
break
s_min, z_min = estimate_smallest_singular_value(U)
step_len = tr_radius
# Check stop criteria
if step_len**2 * s_min**2 <= self.k_hard * lambda_current * tr_radius**2:
p = step_len * z_min
break
# Update uncertanty bounds
lambda_ub = lambda_current
lambda_lb = max(lambda_lb, lambda_current - s_min**2)
# Update damping factor
lambda_current = max(np.sqrt(lambda_lb * lambda_ub),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
else: # Unsuccessful factorization
# Compute auxiliary terms
delta, v = singular_leading_submatrix(H, U, info)
v_norm = norm(v)
# Update uncertanty interval
lambda_lb = max(lambda_lb, lambda_current + delta/v_norm**2)
# Update damping factor
lambda_current = max(np.sqrt(lambda_lb * lambda_ub),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
self.lambda_lb = lambda_lb
self.lambda_current = lambda_current
self.previous_tr_radius = tr_radius
return p, hits_boundary
| bsd-3-clause |
gkno/gkno_launcher | src/networkx/algorithms/traversal/tests/test_bfs.py | 33 | 1075 | #!/usr/bin/env python
from nose.tools import *
import networkx as nx
class TestBFS:
def setUp(self):
# simple graph
G=nx.Graph()
G.add_edges_from([(0,1),(1,2),(1,3),(2,4),(3,4)])
self.G=G
def test_successor(self):
assert_equal(nx.bfs_successors(self.G,source=0),
{0: [1], 1: [2,3], 2:[4]})
def test_predecessor(self):
assert_equal(nx.bfs_predecessors(self.G,source=0),
{1: 0, 2: 1, 3: 1, 4: 2})
def test_bfs_tree(self):
T=nx.bfs_tree(self.G,source=0)
assert_equal(sorted(T.nodes()),sorted(self.G.nodes()))
assert_equal(sorted(T.edges()),[(0, 1), (1, 2), (1, 3), (2, 4)])
def test_bfs_edges(self):
edges=nx.bfs_edges(self.G,source=0)
assert_equal(list(edges),[(0, 1), (1, 2), (1, 3), (2, 4)])
def test_bfs_tree_isolates(self):
G = nx.Graph()
G.add_node(1)
G.add_node(2)
T=nx.bfs_tree(G,source=1)
assert_equal(sorted(T.nodes()),[1])
assert_equal(sorted(T.edges()),[])
| mit |
stevereyes01/pycbc | pycbc/waveform/utils.py | 10 | 19674 | # Copyright (C) 2013 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""This module contains convenience utilities for manipulating waveforms
"""
from __future__ import absolute_import
from pycbc.types import TimeSeries, FrequencySeries, Array, float32, float64, complex_same_precision_as, real_same_precision_as
import lal
import lalsimulation as sim
from math import frexp
import numpy
from pycbc.scheme import schemed
from scipy import signal
def ceilpow2(n):
"""convenience function to determine a power-of-2 upper frequency limit"""
signif,exponent = frexp(n)
if (signif < 0):
return 1;
if (signif == 0.5):
exponent -= 1;
return (1) << exponent;
def coalign_waveforms(h1, h2, psd=None,
low_frequency_cutoff=None,
high_frequency_cutoff=None,
resize=True):
""" Return two time series which are aligned in time and phase.
The alignment is only to the nearest sample point and all changes to the
phase are made to the first input waveform. Waveforms should not be split
accross the vector boundary. If it is, please use roll or cyclic time shift
to ensure that the entire signal is contiguous in the time series.
Parameters
----------
h1: pycbc.types.TimeSeries
The first waveform to align.
h2: pycbc.types.TimeSeries
The second waveform to align.
psd: {None, pycbc.types.FrequencySeries}
A psd to weight the alignment
low_frequency_cutoff: {None, float}
The low frequency cutoff to weight the matching in Hz.
high_frequency_cutoff: {None, float}
The high frequency cutoff to weight the matching in Hz.
resize: Optional, {True, boolean}
If true, the vectors will be resized to match each other. If false,
they must be the same length and even in length
Returns
-------
h1: pycbc.types.TimeSeries
The shifted waveform to align with h2
h2: pycbc.type.TimeSeries
The resized (if necessary) waveform to align with h1.
"""
from pycbc.filter import matched_filter
mlen = ceilpow2(max(len(h1), len(h2)))
h1 = h1.copy()
h2 = h2.copy()
if resize:
h1.resize(mlen)
h2.resize(mlen)
elif len(h1) != len(h2) or len(h2) % 2 != 0:
raise ValueError("Time series must be the same size and even if you do "
"not allow resizing")
snr = matched_filter(h1, h2, psd=psd,
low_frequency_cutoff=low_frequency_cutoff,
high_frequency_cutoff=high_frequency_cutoff)
_, l = snr.abs_max_loc()
rotation = snr[l] / abs(snr[l])
h1 = (h1.to_frequencyseries() * rotation).to_timeseries()
h1.roll(l)
h1 = TimeSeries(h1, delta_t=h2.delta_t, epoch=h2.start_time)
return h1, h2
def phase_from_frequencyseries(htilde, remove_start_phase=True):
"""Returns the phase from the given frequency-domain waveform. This assumes
that the waveform has been sampled finely enough that the phase cannot
change by more than pi radians between each step.
Parameters
----------
htilde : FrequencySeries
The waveform to get the phase for; must be a complex frequency series.
remove_start_phase : {True, bool}
Subtract the initial phase before returning.
Returns
-------
FrequencySeries
The phase of the waveform as a function of frequency.
"""
p = numpy.unwrap(numpy.angle(htilde.data)).astype(
real_same_precision_as(htilde))
if remove_start_phase:
p += -p[0]
return FrequencySeries(p, delta_f=htilde.delta_f, epoch=htilde.epoch,
copy=False)
def amplitude_from_frequencyseries(htilde):
"""Returns the amplitude of the given frequency-domain waveform as a
FrequencySeries.
Parameters
----------
htilde : FrequencySeries
The waveform to get the amplitude of.
Returns
-------
FrequencySeries
The amplitude of the waveform as a function of frequency.
"""
amp = abs(htilde.data).astype(real_same_precision_as(htilde))
return FrequencySeries(amp, delta_f=htilde.delta_f, epoch=htilde.epoch,
copy=False)
def time_from_frequencyseries(htilde, sample_frequencies=None,
discont_threshold=0.99*numpy.pi):
"""Computes time as a function of frequency from the given
frequency-domain waveform. This assumes the stationary phase
approximation. Any frequencies lower than the first non-zero value in
htilde are assigned the time at the first non-zero value. Times for any
frequencies above the next-to-last non-zero value in htilde will be
assigned the time of the next-to-last non-zero value.
.. note::
Some waveform models (e.g., `SEOBNRv2_ROM_DoubleSpin`) can have
discontinuities in the phase towards the end of the waveform due to
numerical error. We therefore exclude any points that occur after a
discontinuity in the phase, as the time estimate becomes untrustworthy
beyond that point. What determines a discontinuity in the phase is set
by the `discont_threshold`. To turn this feature off, just set
`discont_threshold` to a value larger than pi (due to the unwrapping
of the phase, no two points can have a difference > pi).
Parameters
----------
htilde : FrequencySeries
The waveform to get the time evolution of; must be complex.
sample_frequencies : {None, array}
The frequencies at which the waveform is sampled. If None, will
retrieve from ``htilde.sample_frequencies``.
discont_threshold : {0.99*pi, float}
If the difference in the phase changes by more than this threshold,
it is considered to be a discontinuity. Default is 0.99*pi.
Returns
-------
FrequencySeries
The time evolution of the waveform as a function of frequency.
"""
if sample_frequencies is None:
sample_frequencies = htilde.sample_frequencies.numpy()
phase = phase_from_frequencyseries(htilde).data
dphi = numpy.diff(phase)
time = -dphi / (2.*numpy.pi*numpy.diff(sample_frequencies))
nzidx = numpy.nonzero(abs(htilde.data))[0]
kmin, kmax = nzidx[0], nzidx[-2]
# exclude everything after a discontinuity
discont_idx = numpy.where(abs(dphi[kmin:]) >= discont_threshold)[0]
if discont_idx.size != 0:
kmax = min(kmax, kmin + discont_idx[0]-1)
time[:kmin] = time[kmin]
time[kmax:] = time[kmax]
return FrequencySeries(time.astype(real_same_precision_as(htilde)),
delta_f=htilde.delta_f, epoch=htilde.epoch,
copy=False)
def phase_from_polarizations(h_plus, h_cross, remove_start_phase=True):
"""Return gravitational wave phase
Return the gravitation-wave phase from the h_plus and h_cross
polarizations of the waveform. The returned phase is always
positive and increasing with an initial phase of 0.
Parameters
----------
h_plus : TimeSeries
An PyCBC TmeSeries vector that contains the plus polarization of the
gravitational waveform.
h_cross : TimeSeries
A PyCBC TmeSeries vector that contains the cross polarization of the
gravitational waveform.
Returns
-------
GWPhase : TimeSeries
A TimeSeries containing the gravitational wave phase.
Examples
--------s
>>> from pycbc.waveform import get_td_waveform, phase_from_polarizations
>>> hp, hc = get_td_waveform(approximant="TaylorT4", mass1=10, mass2=10,
f_lower=30, delta_t=1.0/4096)
>>> phase = phase_from_polarizations(hp, hc)
"""
p = numpy.unwrap(numpy.arctan2(h_cross.data, h_plus.data)).astype(
real_same_precision_as(h_plus))
if remove_start_phase:
p += -p[0]
return TimeSeries(p, delta_t=h_plus.delta_t, epoch=h_plus.start_time,
copy=False)
def amplitude_from_polarizations(h_plus, h_cross):
"""Return gravitational wave amplitude
Return the gravitation-wave amplitude from the h_plus and h_cross
polarizations of the waveform.
Parameters
----------
h_plus : TimeSeries
An PyCBC TmeSeries vector that contains the plus polarization of the
gravitational waveform.
h_cross : TimeSeries
A PyCBC TmeSeries vector that contains the cross polarization of the
gravitational waveform.
Returns
-------
GWAmplitude : TimeSeries
A TimeSeries containing the gravitational wave amplitude.
Examples
--------
>>> from pycbc.waveform import get_td_waveform, phase_from_polarizations
>>> hp, hc = get_td_waveform(approximant="TaylorT4", mass1=10, mass2=10,
f_lower=30, delta_t=1.0/4096)
>>> amp = amplitude_from_polarizations(hp, hc)
"""
amp = (h_plus.squared_norm() + h_cross.squared_norm()) ** (0.5)
return TimeSeries(amp, delta_t=h_plus.delta_t, epoch=h_plus.start_time)
def frequency_from_polarizations(h_plus, h_cross):
"""Return gravitational wave frequency
Return the gravitation-wave frequency as a function of time
from the h_plus and h_cross polarizations of the waveform.
It is 1 bin shorter than the input vectors and the sample times
are advanced half a bin.
Parameters
----------
h_plus : TimeSeries
A PyCBC TimeSeries vector that contains the plus polarization of the
gravitational waveform.
h_cross : TimeSeries
A PyCBC TimeSeries vector that contains the cross polarization of the
gravitational waveform.
Returns
-------
GWFrequency : TimeSeries
A TimeSeries containing the gravitational wave frequency as a function
of time.
Examples
--------
>>> from pycbc.waveform import get_td_waveform, phase_from_polarizations
>>> hp, hc = get_td_waveform(approximant="TaylorT4", mass1=10, mass2=10,
f_lower=30, delta_t=1.0/4096)
>>> freq = frequency_from_polarizations(hp, hc)
"""
phase = phase_from_polarizations(h_plus, h_cross)
freq = numpy.diff(phase) / ( 2 * lal.PI * phase.delta_t )
start_time = phase.start_time + phase.delta_t / 2
return TimeSeries(freq.astype(real_same_precision_as(h_plus)),
delta_t=phase.delta_t, epoch=start_time)
# map between tapering string in sim_inspiral table or inspiral
# code option and lalsimulation constants
taper_map = {
'TAPER_NONE' : None,
'TAPER_START' : sim.SIM_INSPIRAL_TAPER_START,
'start' : sim.SIM_INSPIRAL_TAPER_START,
'TAPER_END' : sim.SIM_INSPIRAL_TAPER_END,
'end' : sim.SIM_INSPIRAL_TAPER_END,
'TAPER_STARTEND': sim.SIM_INSPIRAL_TAPER_STARTEND,
'startend' : sim.SIM_INSPIRAL_TAPER_STARTEND
}
taper_func_map = {
numpy.dtype(float32): sim.SimInspiralREAL4WaveTaper,
numpy.dtype(float64): sim.SimInspiralREAL8WaveTaper
}
def taper_timeseries(tsdata, tapermethod=None, return_lal=False):
"""
Taper either or both ends of a time series using wrapped
LALSimulation functions
Parameters
----------
tsdata : TimeSeries
Series to be tapered, dtype must be either float32 or float64
tapermethod : string
Should be one of ('TAPER_NONE', 'TAPER_START', 'TAPER_END',
'TAPER_STARTEND', 'start', 'end', 'startend') - NB 'TAPER_NONE' will
not change the series!
return_lal : Boolean
If True, return a wrapped LAL time series object, else return a
PyCBC time series.
"""
if tapermethod is None:
raise ValueError("Must specify a tapering method (function was called"
"with tapermethod=None)")
if tapermethod not in taper_map.keys():
raise ValueError("Unknown tapering method %s, valid methods are %s" % \
(tapermethod, ", ".join(taper_map.keys())))
if tsdata.dtype not in (float32, float64):
raise TypeError("Strain dtype must be float32 or float64, not "
+ str(tsdata.dtype))
taper_func = taper_func_map[tsdata.dtype]
# make a LAL TimeSeries to pass to the LALSim function
ts_lal = tsdata.astype(tsdata.dtype).lal()
if taper_map[tapermethod] is not None:
taper_func(ts_lal.data, taper_map[tapermethod])
if return_lal:
return ts_lal
else:
return TimeSeries(ts_lal.data.data[:], delta_t=ts_lal.deltaT,
epoch=ts_lal.epoch)
@schemed("pycbc.waveform.utils_")
def apply_fseries_time_shift(htilde, dt, kmin=0, copy=True):
"""Shifts a frequency domain waveform in time. The waveform is assumed to
be sampled at equal frequency intervals.
"""
def apply_fd_time_shift(htilde, shifttime, kmin=0, fseries=None, copy=True):
"""Shifts a frequency domain waveform in time. The shift applied is
shiftime - htilde.epoch.
Parameters
----------
htilde : FrequencySeries
The waveform frequency series.
shifttime : float
The time to shift the frequency series to.
kmin : {0, int}
The starting index of htilde to apply the time shift. Default is 0.
fseries : {None, numpy array}
The frequencies of each element in htilde. This is only needed if htilde is not
sampled at equal frequency steps.
copy : {True, bool}
Make a copy of htilde before applying the time shift. If False, the time
shift will be applied to htilde's data.
Returns
-------
FrequencySeries
A frequency series with the waveform shifted to the new time. If makecopy
is True, will be a new frequency series; if makecopy is False, will be
the same as htilde.
"""
dt = float(shifttime - htilde.epoch)
if dt == 0.:
# no shift to apply, just copy if desired
if copy:
htilde = 1. * htilde
elif isinstance(htilde, FrequencySeries):
# FrequencySeries means equally sampled in frequency, use faster shifting
htilde = apply_fseries_time_shift(htilde, dt, kmin=kmin, copy=copy)
else:
if fseries is None:
fseries = htilde.sample_frequencies.numpy()
shift = Array(numpy.exp(-2j*numpy.pi*dt*fseries),
dtype=complex_same_precision_as(htilde))
if copy:
htilde = 1. * htilde
htilde *= shift
return htilde
def td_taper(out, start, end, beta=8, side='left'):
"""Applies a taper to the given TimeSeries.
A half-kaiser window is used for the roll-off.
Parameters
----------
out : TimeSeries
The ``TimeSeries`` to taper.
start : float
The time (in s) to start the taper window.
end : float
The time (in s) to end the taper window.
beta : int, optional
The beta parameter to use for the Kaiser window. See
``scipy.signal.kaiser`` for details. Default is 8.
side : {'left', 'right'}
The side to apply the taper to. If ``'left'`` (``'right'``), the taper
will roll up (down) between ``start`` and ``end``, with all values
before ``start`` (after ``end``) set to zero. Default is ``'left'``.
Returns
-------
TimeSeries
The tapered time series.
"""
out = out.copy()
width = end - start
winlen = 2 * int(width / out.delta_t)
window = Array(signal.get_window(('kaiser', beta), winlen))
xmin = int((start - out.start_time) / out.delta_t)
xmax = xmin + winlen//2
if side == 'left':
out[xmin:xmax] *= window[:winlen//2]
if xmin > 0:
out[:xmin].clear()
elif side == 'right':
out[xmin:xmax] *= window[winlen//2:]
if xmax < len(out):
out[xmax:].clear()
else:
raise ValueError("unrecognized side argument {}".format(side))
return out
def fd_taper(out, start, end, beta=8, side='left'):
"""Applies a taper to the given FrequencySeries.
A half-kaiser window is used for the roll-off.
Parameters
----------
out : FrequencySeries
The ``FrequencySeries`` to taper.
start : float
The frequency (in Hz) to start the taper window.
end : float
The frequency (in Hz) to end the taper window.
beta : int, optional
The beta parameter to use for the Kaiser window. See
``scipy.signal.kaiser`` for details. Default is 8.
side : {'left', 'right'}
The side to apply the taper to. If ``'left'`` (``'right'``), the taper
will roll up (down) between ``start`` and ``end``, with all values
before ``start`` (after ``end``) set to zero. Default is ``'left'``.
Returns
-------
FrequencySeries
The tapered frequency series.
"""
out = out.copy()
width = end - start
winlen = 2 * int(width / out.delta_f)
window = Array(signal.get_window(('kaiser', beta), winlen))
kmin = int(start / out.delta_f)
kmax = kmin + winlen//2
if side == 'left':
out[kmin:kmax] *= window[:winlen//2]
out[:kmin] *= 0.
elif side == 'right':
out[kmin:kmax] *= window[winlen//2:]
out[kmax:] *= 0.
else:
raise ValueError("unrecognized side argument {}".format(side))
return out
def fd_to_td(htilde, delta_t=None, left_window=None, right_window=None,
left_beta=8, right_beta=8):
"""Converts a FD waveform to TD.
A window can optionally be applied using ``fd_taper`` to the left or right
side of the waveform before being converted to the time domain.
Parameters
----------
htilde : FrequencySeries
The waveform to convert.
delta_t : float, optional
Make the returned time series have the given ``delta_t``.
left_window : tuple of float, optional
A tuple giving the start and end frequency of the FD taper to apply
on the left side. If None, no taper will be applied on the left.
right_window : tuple of float, optional
A tuple giving the start and end frequency of the FD taper to apply
on the right side. If None, no taper will be applied on the right.
left_beta : int, optional
The beta parameter to use for the left taper. See ``fd_taper`` for
details. Default is 8.
right_beta : int, optional
The beta parameter to use for the right taper. Default is 8.
Returns
-------
TimeSeries
The time-series representation of ``htilde``.
"""
if left_window is not None:
start, end = left_window
htilde = fd_taper(htilde, start, end, side='left', beta=left_beta)
if right_window is not None:
start, end = right_window
htilde = fd_taper(htilde, start, end, side='right', beta=right_beta)
return htilde.to_timeseries(delta_t=delta_t)
| gpl-3.0 |
nikhilsaraf/Twitter-Analytics | venv/lib/python2.7/site-packages/wheel/test/test_signatures.py | 565 | 1120 | from wheel import signatures
from wheel.signatures import djbec, ed25519py
from wheel.util import binary
def test_getlib():
signatures.get_ed25519ll()
def test_djbec():
djbec.dsa_test()
djbec.dh_test()
def test_ed25519py():
kp0 = ed25519py.crypto_sign_keypair(binary(' '*32))
kp = ed25519py.crypto_sign_keypair()
signed = ed25519py.crypto_sign(binary('test'), kp.sk)
ed25519py.crypto_sign_open(signed, kp.vk)
try:
ed25519py.crypto_sign_open(signed, kp0.vk)
except ValueError:
pass
else:
raise Exception("Expected ValueError")
try:
ed25519py.crypto_sign_keypair(binary(' '*33))
except ValueError:
pass
else:
raise Exception("Expected ValueError")
try:
ed25519py.crypto_sign(binary(''), binary(' ')*31)
except ValueError:
pass
else:
raise Exception("Expected ValueError")
try:
ed25519py.crypto_sign_open(binary(''), binary(' ')*31)
except ValueError:
pass
else:
raise Exception("Expected ValueError")
| gpl-3.0 |
faust64/ansible | lib/ansible/modules/cloud/cloudstack/cs_role.py | 38 | 6359 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cs_role
short_description: Manages user roles on Apache CloudStack based clouds.
description:
- Create, update, delete user roles.
version_added: "2.3"
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the role.
required: true
id:
description:
- ID of the role.
- If provided, C(id) is used as key.
required: false
default: null
aliases: [ 'uuid' ]
role_type:
description:
- Type of the role.
- Only considered for creation.
required: false
default: User
choices: [ 'User', 'DomainAdmin', 'ResourceAdmin', 'Admin' ]
description:
description:
- Description of the role.
required: false
default: null
state:
description:
- State of the role.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Ensure an user role is present
- local_action:
module: cs_role
name: myrole_user
# Ensure a role having particular ID is named as myrole_user
- local_action:
module: cs_role
name: myrole_user
id: 04589590-ac63-4ffc-93f5-b698b8ac38b6
# Ensure a role is absent
- local_action:
module: cs_role
name: myrole_user
state: absent
'''
RETURN = '''
---
id:
description: UUID of the role.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the role.
returned: success
type: string
sample: myrole
description:
description: Description of the role.
returned: success
type: string
sample: "This is my role description"
role_type:
description: Type of the role.
returned: success
type: string
sample: User
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import AnsibleCloudStack, CloudStackException, cs_argument_spec, cs_required_together
class AnsibleCloudStackRole(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackRole, self).__init__(module)
self.returns = {
'type': 'role_type',
}
def get_role(self):
uuid = self.module.params.get('uuid')
if uuid:
args = {
'id': uuid,
}
roles = self.cs.listRoles(**args)
if roles:
return roles['role'][0]
else:
args = {
'name': self.module.params.get('name'),
}
roles = self.cs.listRoles(**args)
if roles:
return roles['role'][0]
return None
def present_role(self):
role = self.get_role()
if role:
role = self._update_role(role)
else:
role = self._create_role(role)
return role
def _create_role(self, role):
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'type': self.module.params.get('role_type'),
'description': self.module.params.get('description'),
}
if not self.module.check_mode:
res = self.cs.createRole(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
role = res['role']
return role
def _update_role(self, role):
args = {
'id': role['id'],
'name': self.module.params.get('name'),
'description': self.module.params.get('description'),
}
if self.has_changed(args, role):
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.updateRole(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
# The API as in 4.9 does not return an updated role yet
if 'role' not in res:
role = self.get_role()
else:
role = res['role']
return role
def absent_role(self):
role = self.get_role()
if role:
self.result['changed'] = True
args = {
'id': role['id'],
}
if not self.module.check_mode:
res = self.cs.deleteRole(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
return role
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
uuid=dict(default=None, aliases=['id']),
name=dict(required=True),
description=dict(default=None),
role_type=dict(choices=['User', 'DomainAdmin', 'ResourceAdmin', 'Admin'], default='User'),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_role = AnsibleCloudStackRole(module)
state = module.params.get('state')
if state == 'absent':
role = acs_role.absent_role()
else:
role = acs_role.present_role()
result = acs_role.get_result(role)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
ktrappe/seqan | apps/searchjoin/tests/run_tests.py | 13 | 4847 | #!/usr/bin/env python
"""Execute the tests for search/join.
The golden test outputs are generated by the script generate_outputs.sh.
You have to give the root paths to the source and the binaries as arguments to
the program. These are the paths to the directory that contains the 'projects'
directory.
Usage: run_tests.py SOURCE_ROOT_PATH BINARY_ROOT_PATH
"""
import logging
import os.path
import sys
import glob
# Automagically add util/py_lib to PYTHONPATH environment variable.
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
'..', '..', 'util', 'py_lib'))
sys.path.insert(0, path)
import seqan.app_tests as app_tests
transforms = [
app_tests.UniqueTransform(left=False, right=True)
]
def main(source_base, binary_base):
"""Main entry point of the script."""
print 'Executing test for searchjoin'
print '==========================='
print
ph = app_tests.TestPathHelper(
source_base, binary_base,
'apps/searchjoin/tests') # tests dir
# ============================================================
# Auto-detect the binary path.
# ============================================================
path_to_join = app_tests.autolocateBinary(
binary_base, 'bin', 's4_join')
path_to_search = app_tests.autolocateBinary(
binary_base, 'bin', 's4_search')
# ============================================================
# Built TestConf list.
# ============================================================
# Build list with TestConf objects, analoguely to how the output
# was generated in generate_outputs.sh.
conf_list = []
# ============================================================
# Define program parameters.
# ============================================================
# Seed length
SL = {
'geo': ['5'],
'dna': ['10']
}
# Errors
K = {
'geo': ['0', '1', '3'],
'dna': ['0', '8', '16']
}
# Threads
THREADS = '4'
# ============================================================
# Configure Join Tests.
# ============================================================
for alphabet in ['geo', 'dna']:
for k in K[alphabet]:
for sl in SL[alphabet]:
conf = app_tests.TestConf(
program=path_to_join,
# redir_stdout=ph.outFile('join_%s_%s_%s.stdout' % (alphabet, k, sl)),
args=[ph.inFile('%s_database.csv' % alphabet), k,
'-i', alphabet,
'-t', THREADS,
'-sl', sl,
'-o', ph.outFile('join_%s_%s_%s.out' % (alphabet, k, sl))],
to_diff=[(ph.inFile('join_%s_%s.out' % (alphabet, k)),
ph.outFile('join_%s_%s_%s.out' % (alphabet, k, sl)),
transforms)])
conf_list.append(conf)
# ============================================================
# Configure Search Tests.
# ============================================================
for alphabet in ['geo', 'dna']:
for sl in SL[alphabet]:
conf = app_tests.TestConf(
program=path_to_search,
# redir_stdout=ph.outFile('search_%s_%s.stdout' % (alphabet, sl)),
args=[ph.inFile('%s_database.csv' % alphabet),
ph.inFile('%s_queries.csv' % alphabet),
'--no-wait',
'-i', alphabet,
'-t', THREADS,
'-sl', sl,
'-o', ph.outFile('search_%s_%s.out' % (alphabet, sl))],
to_diff=[(ph.inFile('search_%s.out' % (alphabet)),
ph.outFile('search_%s_%s.out' % (alphabet, sl)),
transforms)])
conf_list.append(conf)
# ============================================================
# Run Tests.
# ============================================================
# Execute the tests.
failures = 0
for conf in conf_list:
res = app_tests.runTest(conf)
# Output to the user.
print ' '.join([conf.program] + conf.args),
if res:
print 'OK'
else:
failures += 1
print 'FAILED'
# Cleanup.
ph.deleteTempDir()
print '=============================='
print ' total tests: %d' % len(conf_list)
print ' failed tests: %d' % failures
print 'successful tests: %d' % (len(conf_list) - failures)
print '=============================='
# Compute and return return code.
return failures != 0
if __name__ == '__main__':
sys.exit(app_tests.main(main))
| bsd-3-clause |
doda/imagy | imagy/smush/optimiser/formats/gif.py | 1 | 3803 | import os.path
import shutil
from ..optimiser import Optimiser
from animated_gif import OptimiseAnimatedGIF
import logging
class OptimiseGIF(Optimiser):
"""
Optimises gifs. If they aren't animated, it converts them to pngs with ImageMagick before
optimising them as for pngs.
Animated gifs get optimised according to the commands in OptimiseAnimatedGIF
"""
def __init__(self, **kwargs):
super(OptimiseGIF, self).__init__(**kwargs)
# the command to execute this optimiser
if kwargs.get('quiet') == True:
pngcrush = 'pngcrush -rem alla -brute -reduce -q "__INPUT__" "__OUTPUT__"'
else:
pngcrush = 'pngcrush -rem alla -brute -reduce "__INPUT__" "__OUTPUT__"'
self.commands = ('convert "__INPUT__" png:"__OUTPUT__"',
'pngnq -n 256 -o "__OUTPUT__" "__INPUT__"',
pngcrush)
# variable so we can easily determine whether a gif is animated or not
self.animated_gif_optimiser = OptimiseAnimatedGIF()
self.converted_to_png = False
self.is_animated = False
# format as returned by 'identify'
self.format = "GIF"
def set_input(self, input):
super(OptimiseGIF, self).set_input(input)
self.converted_to_png = False
self.is_animated = False
def _is_animated(self, input):
"""
Tests an image to see whether it's an animated gif
"""
return self.animated_gif_optimiser._is_acceptable_image(input)
def _keep_smallest_file(self, input, output):
"""
Compares the sizes of two files, and discards the larger one
"""
input_size = os.path.getsize(input)
output_size = os.path.getsize(output)
# if the image was optimised (output is smaller than input), overwrite the input file with the output
# file.
if (output_size < input_size):
try:
shutil.copyfile(output, input)
self.files_optimised += 1
self.bytes_saved += (input_size - output_size)
except IOError:
logging.error("Unable to copy %s to %s: %s" % (output, input, IOError))
sys.exit(1)
if self.iterations == 1 and not self.is_animated:
self.converted_to_png = True
# delete the output file
os.unlink(output)
def _get_command(self):
"""
Returns the next command to apply
"""
command = False
# for the first iteration, return the first command
if self.iterations == 0:
# if the GIF is animated, optimise it
if self._is_animated(self.input):
command = self.animated_gif_optimiser.commands[0]
self.is_animated = True
else: # otherwise convert it to PNG
command = self.commands[0]
# execute the png optimisations if the gif was converted to a png
elif self.converted_to_png and self.iterations < len(self.commands):
command = self.commands[self.iterations]
self.iterations += 1
return command
def _list_only(self, input, output):
"""
Always keeps input, but still compares the sizes of two files
"""
input_size = os.path.getsize(input)
output_size = os.path.getsize(output)
if (output_size > 0 and output_size < input_size):
self.files_optimised += 1
self.bytes_saved += (input_size - output_size)
self.array_optimised_file.append(input)
if self.iterations == 1 and not self.is_animated:
self.convert_to_png = True
# delete the output file
os.unlink(output)
| bsd-2-clause |
cjh1/vtkmodular | Utilities/vtk2xml.py | 9 | 5258 | #!/usr/bin/env python
"""Utility script to convert an old VTK file format to the new VTK XML
file format (serial format). The output XML file will contain *all*
the existing scalars, vectors and tensors in the input file.
This requires VTK 4.x or above.
Created May 2003, Prabhu Ramachandran <prabhu@aero.iitm.ernet.in>
Licence: VTK License.
"""
import sys
import vtk
import os.path
import getopt
def getReaderWriter(file_name, out_dir=None):
r = vtk.vtkDataSetReader()
r.SetFileName(file_name)
f_base = os.path.splitext(file_name)[0]
r.Update()
reader = None
writer = None
xmlsuffix = '.xml'
map = {'StructuredPoints': '.vti', 'StructuredGrid': '.vts',
'RectilinearGrid': '.vtr', 'UnstructuredGrid': '.vtu',
'PolyData': '.vtp'}
for i in ['StructuredPoints', 'StructuredGrid', 'RectilinearGrid',
'UnstructuredGrid', 'PolyData']:
if eval('r.IsFile%s()'%i):
reader = eval('vtk.vtk%sReader()'%i)
if i == 'StructuredPoints':
writer = eval('vtk.vtkXMLImageDataWriter()')
else:
writer = eval('vtk.vtkXML%sWriter()'%i)
xmlsuffix = map[i]
break
if not reader:
return None, None
reader.SetFileName(file_name)
reader.Update()
out_file = f_base + xmlsuffix
if out_dir:
out_file = os.path.join(out_dir,
os.path.basename(f_base) + xmlsuffix)
writer.SetFileName(out_file)
return reader, writer
def _getAttr(reader, lst, attr='Scalars'):
p_a = []
c_a = []
for i in lst:
eval('reader.Set%sName(i)'%attr)
reader.Update()
o = reader.GetOutput()
pd = o.GetPointData()
cd = o.GetCellData()
s = eval('pd.Get%s()'%attr)
if s and (s not in p_a):
p_a.append(s)
s = eval('cd.Get%s()'%attr)
if s and (s not in c_a):
c_a.append(s)
return p_a, c_a
def setAllAttributes(reader):
s_name = []
v_name = []
t_name = []
for i in range(reader.GetNumberOfScalarsInFile()):
s_name.append(reader.GetScalarsNameInFile(i))
for i in range(reader.GetNumberOfVectorsInFile()):
v_name.append(reader.GetVectorsNameInFile(i))
for i in range(reader.GetNumberOfTensorsInFile()):
t_name.append(reader.GetTensorsNameInFile(i))
p_s, c_s = _getAttr(reader, s_name, 'Scalars')
p_v, c_v = _getAttr(reader, v_name, 'Vectors')
p_t, c_t = _getAttr(reader, t_name, 'Tensors')
o = reader.GetOutput()
pd = o.GetPointData()
for i in p_s + p_v + p_t:
pd.AddArray(i)
cd = o.GetCellData()
for i in c_s + c_v + c_t:
cd.AddArray(i)
return o
def usage():
msg = """usage: vtk2xml.py [options] vtk_file1 vtk_file2 ...\n
This program converts VTK's old file format to the new XML format.
The default mode is to store the data as appended (compressed and
base64 encoded). Change this behaviour with the provided options.
This code requires VTK 4.x or above to run.
options:
-h, --help Show this help message and exit.
-b, --binary Store data as binary (compressed and base64 encoded).
-a, --ascii Store data as ascii.
-n, --no-encode Do not base64 encode the data. This violates the
XML specification but makes reading and writing fast
and files smaller.
-d, --output-dir <directory>
Output directory where the files should be generated.
Defaults to the same directory as the input file.
"""
return msg
def main():
options = "bahnd:"
long_opts = ['binary', 'ascii', 'help', 'no-encode', 'output-dir=']
try:
opts, args = getopt.getopt(sys.argv[1:], options, long_opts)
except getopt.error, msg:
print msg
print usage()
print '-'*70
print msg
sys.exit(1)
mode = 'p'
encode = 1
out_dir = None
for o, a in opts:
if o in ('-h', '--help'):
print usage()
sys.exit(0)
if o in ('-b', '--binary'):
mode = 'b'
if o in ('-a', '--ascii'):
mode = 'a'
if o in ('-n', '--no-encode'):
encode = 0
if o in ('-d', '--output-dir'):
out_dir = a
if not os.path.exists(out_dir):
print "Error: Directory %s does not exist!"%out_dir
sys.exit(1)
if len(args) < 1:
print "\nError: Incorrect number of arguments\n"
print usage()
sys.exit(1)
for i in args:
r, w = getReaderWriter(i, out_dir)
if not r:
print "\nError: Could not convert file: %s"%i
print "Unsupported data format!\n"
else:
o = setAllAttributes(r)
w.SetInput(o)
# set output modes
if mode == 'a':
w.SetDataModeToAscii()
elif mode == 'b':
w.SetDataModeToBinary()
else:
w.SetDataModeToAppended()
w.SetEncodeAppendedData(encode)
w.Write()
if __name__ == "__main__":
main()
| bsd-3-clause |
openstack/python-senlinclient | senlinclient/common/utils.py | 1 | 4795 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heatclient.common import template_utils
from oslo_serialization import jsonutils
from oslo_utils import importutils
import prettytable
import yaml
from senlinclient.common import exc
from senlinclient.common.i18n import _
def import_versioned_module(version, submodule=None):
module = 'senlinclient.v%s' % version
if submodule:
module = '.'.join((module, submodule))
return importutils.import_module(module)
def format_nested_dict(d, fields, column_names):
if d is None:
return ''
pt = prettytable.PrettyTable(caching=False, print_empty=False,
header=True, field_names=column_names)
for n in column_names:
pt.align[n] = 'l'
keys = sorted(d.keys())
for field in keys:
value = d[field]
if not isinstance(value, str):
value = jsonutils.dumps(value, indent=2, ensure_ascii=False)
if value is None:
value = '-'
pt.add_row([field, value.strip('"')])
return pt.get_string()
def nested_dict_formatter(d, column_names):
return lambda o: format_nested_dict(o, d, column_names)
def json_formatter(js):
return jsonutils.dumps(js, indent=2, ensure_ascii=False)
def list_formatter(record):
return '\n'.join(record or [])
def print_action_result(rid, res):
if res[0] == "OK":
output = _("accepted by action %s") % res[1]
else:
output = _("failed due to '%s'") % res[1]
print(_(" %(cid)s: %(output)s") % {"cid": rid, "output": output})
def format_parameters(params, parse_semicolon=True):
"""Reformat parameters into dict of format expected by the API."""
if not params or params == ['{}']:
return {}
if parse_semicolon:
# expect multiple invocations of --parameters but fall back to ';'
# delimited if only one --parameters is specified
if len(params) == 1:
params = params[0].split(';')
parameters = {}
for p in params:
try:
(n, v) = p.split(('='), 1)
except ValueError:
msg = _('Malformed parameter(%s). Use the key=value format.') % p
raise exc.CommandError(msg)
if n not in parameters:
parameters[n] = v
else:
if not isinstance(parameters[n], list):
parameters[n] = [parameters[n]]
parameters[n].append(v)
return parameters
def format_json_parameter(param):
'''Return JSON dict from JSON formatted param.
:parameter param JSON formatted string
:return JSON dict
'''
if not param:
return {}
try:
return jsonutils.loads(param)
except ValueError:
msg = _('Malformed parameter(%s). Use the JSON format.') % param
raise exc.CommandError(msg)
def get_spec_content(filename):
with open(filename, 'r') as f:
try:
data = yaml.safe_load(f)
except Exception as ex:
raise exc.CommandError(_('The specified file is not a valid '
'YAML file: %s') % str(ex))
return data
def process_stack_spec(spec):
# Heat stack is a headache, because it demands for client side file
# content processing
try:
tmplfile = spec.get('template', None)
except AttributeError as ex:
raise exc.FileFormatError(_('The specified file is not a valid '
'YAML file: %s') % str(ex))
if not tmplfile:
raise exc.FileFormatError(_('No template found in the given '
'spec file'))
tpl_files, template = template_utils.get_template_contents(
template_file=tmplfile)
env_files, env = template_utils.process_multiple_environments_and_files(
env_paths=spec.get('environment', None))
new_spec = {
# TODO(Qiming): add context support
'disable_rollback': spec.get('disable_rollback', True),
'context': spec.get('context', {}),
'parameters': spec.get('parameters', {}),
'timeout': spec.get('timeout', 60),
'template': template,
'files': dict(list(tpl_files.items()) + list(env_files.items())),
'environment': env
}
return new_spec
| apache-2.0 |
kunki/shadowsocks | shadowsocks/crypto/rc4_md5.py | 1042 | 1339 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import hashlib
from shadowsocks.crypto import openssl
__all__ = ['ciphers']
def create_cipher(alg, key, iv, op, key_as_bytes=0, d=None, salt=None,
i=1, padding=1):
md5 = hashlib.md5()
md5.update(key)
md5.update(iv)
rc4_key = md5.digest()
return openssl.OpenSSLCrypto(b'rc4', rc4_key, b'', op)
ciphers = {
'rc4-md5': (16, 16, create_cipher),
}
def test():
from shadowsocks.crypto import util
cipher = create_cipher('rc4-md5', b'k' * 32, b'i' * 16, 1)
decipher = create_cipher('rc4-md5', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
if __name__ == '__main__':
test()
| apache-2.0 |
tdyas/pants | src/python/pants/core/util_rules/strip_source_roots.py | 1 | 6519 | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import itertools
from dataclasses import dataclass
from pathlib import PurePath
from typing import Iterable, Optional, Tuple, Type, cast
from pants.core.target_types import FilesSources
from pants.engine.addresses import Address
from pants.engine.fs import (
EMPTY_SNAPSHOT,
Digest,
MergeDigests,
PathGlobs,
RemovePrefix,
Snapshot,
SnapshotSubset,
)
from pants.engine.rules import RootRule, SubsystemRule, rule
from pants.engine.selectors import Get, MultiGet
from pants.engine.target import HydratedSources, HydrateSourcesRequest
from pants.engine.target import Sources as SourcesField
from pants.source.source_root import NoSourceRootError, SourceRootConfig
from pants.util.meta import frozen_after_init
@dataclass(frozen=True)
class SourceRootStrippedSources:
"""Wrapper for a snapshot of files whose source roots have been stripped."""
snapshot: Snapshot
@dataclass(frozen=True)
class StripSnapshotRequest:
"""A request to strip source roots for every file in the snapshot.
The call site may optionally give the field `representative_path` if it is confident that all
the files in the snapshot will only have one source root. Using `representative_path` results in
better performance because we only need to call `SourceRoots.find_by_path()` on one single file
rather than every file.
"""
snapshot: Snapshot
representative_path: Optional[str] = None
@frozen_after_init
@dataclass(unsafe_hash=True)
class StripSourcesFieldRequest:
"""A request to strip source roots for every file in a `Sources` field.
The call site may optionally give a snapshot to `specified_files_snapshot` to only strip a
subset of the target's `sources`, rather than every `sources` file. This is useful when working
with precise file arguments.
"""
sources_field: SourcesField
for_sources_types: Tuple[Type[SourcesField], ...]
enable_codegen: bool
specified_files_snapshot: Optional[Snapshot]
def __init__(
self,
sources_field: SourcesField,
*,
for_sources_types: Iterable[Type[SourcesField]] = (SourcesField,),
enable_codegen: bool = False,
specified_files_snapshot: Optional[Snapshot] = None,
) -> None:
self.sources_field = sources_field
self.for_sources_types = tuple(for_sources_types)
self.enable_codegen = enable_codegen
self.specified_files_snapshot = specified_files_snapshot
@rule
async def strip_source_roots_from_snapshot(
request: StripSnapshotRequest, source_root_config: SourceRootConfig,
) -> SourceRootStrippedSources:
"""Removes source roots from a snapshot, e.g. `src/python/pants/util/strutil.py` ->
`pants/util/strutil.py`."""
source_roots_object = source_root_config.get_source_roots()
def determine_source_root(path: str) -> str:
source_root = source_roots_object.safe_find_by_path(path)
if source_root is not None:
return cast(str, source_root.path)
if source_root_config.options.unmatched == "fail":
raise NoSourceRootError(f"Could not find a source root for `{path}`.")
# Otherwise, create a source root by using the parent directory.
return PurePath(path).parent.as_posix()
if request.representative_path is not None:
resulting_digest = await Get[Digest](
RemovePrefix(
request.snapshot.digest, determine_source_root(request.representative_path),
)
)
resulting_snapshot = await Get[Snapshot](Digest, resulting_digest)
return SourceRootStrippedSources(snapshot=resulting_snapshot)
files_grouped_by_source_root = {
source_root: tuple(files)
for source_root, files in itertools.groupby(
request.snapshot.files, key=determine_source_root
)
}
snapshot_subsets = await MultiGet(
Get[Snapshot](SnapshotSubset(request.snapshot.digest, PathGlobs(files)))
for files in files_grouped_by_source_root.values()
)
resulting_digests = await MultiGet(
Get[Digest](RemovePrefix(snapshot.digest, source_root))
for snapshot, source_root in zip(snapshot_subsets, files_grouped_by_source_root.keys())
)
merged_result = await Get[Digest](MergeDigests(resulting_digests))
resulting_snapshot = await Get[Snapshot](Digest, merged_result)
return SourceRootStrippedSources(resulting_snapshot)
def representative_path_from_address(address: Address) -> str:
"""Generate a representative path as a performance hack so that we don't need to call
SourceRoots.find_by_path() on every single file belonging to a target."""
return PurePath(address.spec_path, "BUILD").as_posix()
@rule
async def strip_source_roots_from_sources_field(
request: StripSourcesFieldRequest,
) -> SourceRootStrippedSources:
"""Remove source roots from a target, e.g. `src/python/pants/util/strutil.py` ->
`pants/util/strutil.py`."""
if request.specified_files_snapshot is not None:
sources_snapshot = request.specified_files_snapshot
else:
hydrated_sources = await Get[HydratedSources](
HydrateSourcesRequest(
request.sources_field,
for_sources_types=request.for_sources_types,
enable_codegen=request.enable_codegen,
)
)
sources_snapshot = hydrated_sources.snapshot
if not sources_snapshot.files:
return SourceRootStrippedSources(EMPTY_SNAPSHOT)
# Unlike all other `Sources` subclasses, `FilesSources` (and its subclasses) do not remove
# their source root. This is so that filesystem APIs (e.g. Python's `open()`) may still access
# the files as they normally would, with the full path relative to the build root.
if isinstance(request.sources_field, FilesSources):
return SourceRootStrippedSources(sources_snapshot)
return await Get[SourceRootStrippedSources](
StripSnapshotRequest(
sources_snapshot,
representative_path=representative_path_from_address(request.sources_field.address),
)
)
def rules():
return [
strip_source_roots_from_snapshot,
strip_source_roots_from_sources_field,
SubsystemRule(SourceRootConfig),
RootRule(StripSnapshotRequest),
RootRule(StripSourcesFieldRequest),
]
| apache-2.0 |
jayfans3/jieba | jieba/__main__.py | 60 | 1938 | """Jieba command line interface."""
import sys
import jieba
from argparse import ArgumentParser
from ._compat import *
parser = ArgumentParser(usage="%s -m jieba [options] filename" % sys.executable, description="Jieba command line interface.", epilog="If no filename specified, use STDIN instead.")
parser.add_argument("-d", "--delimiter", metavar="DELIM", default=' / ',
nargs='?', const=' ',
help="use DELIM instead of ' / ' for word delimiter; or a space if it is used without DELIM")
parser.add_argument("-D", "--dict", help="use DICT as dictionary")
parser.add_argument("-u", "--user-dict",
help="use USER_DICT together with the default dictionary or DICT (if specified)")
parser.add_argument("-a", "--cut-all",
action="store_true", dest="cutall", default=False,
help="full pattern cutting")
parser.add_argument("-n", "--no-hmm", dest="hmm", action="store_false",
default=True, help="don't use the Hidden Markov Model")
parser.add_argument("-q", "--quiet", action="store_true", default=False,
help="don't print loading messages to stderr")
parser.add_argument("-V", '--version', action='version',
version="Jieba " + jieba.__version__)
parser.add_argument("filename", nargs='?', help="input file")
args = parser.parse_args()
if args.quiet:
jieba.setLogLevel(60)
delim = text_type(args.delimiter)
cutall = args.cutall
hmm = args.hmm
fp = open(args.filename, 'r') if args.filename else sys.stdin
if args.dict:
jieba.initialize(args.dict)
else:
jieba.initialize()
if args.user_dict:
jieba.load_userdict(args.user_dict)
ln = fp.readline()
while ln:
l = ln.rstrip('\r\n')
result = delim.join(jieba.cut(ln.rstrip('\r\n'), cutall, hmm))
if PY2:
result = result.encode(default_encoding)
print(result)
ln = fp.readline()
fp.close()
| mit |
martynovp/edx-platform | lms/djangoapps/verify_student/migrations/0005_auto__add_incoursereverificationconfiguration.py | 92 | 9300 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'InCourseReverificationConfiguration'
db.create_table('verify_student_incoursereverificationconfiguration', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('change_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('changed_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.PROTECT)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('verify_student', ['InCourseReverificationConfiguration'])
def backwards(self, orm):
# Deleting model 'InCourseReverificationConfiguration'
db.delete_table('verify_student_incoursereverificationconfiguration')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'reverification.midcoursereverificationwindow': {
'Meta': {'object_name': 'MidcourseReverificationWindow'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'verify_student.incoursereverificationconfiguration': {
'Meta': {'object_name': 'InCourseReverificationConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'verify_student.softwaresecurephotoverification': {
'Meta': {'ordering': "['-created_at']", 'object_name': 'SoftwareSecurePhotoVerification'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'display': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'error_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'error_msg': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'face_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'photo_id_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'photo_id_key': ('django.db.models.fields.TextField', [], {'max_length': '1024'}),
'receipt_id': ('django.db.models.fields.CharField', [], {'default': "'8a3c9d8a-b885-480e-8e1e-ca111326db42'", 'max_length': '255', 'db_index': 'True'}),
'reviewing_service': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'reviewing_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'photo_verifications_reviewed'", 'null': 'True', 'to': "orm['auth.User']"}),
'status': ('model_utils.fields.StatusField', [], {'default': "'created'", 'max_length': '100', u'no_check_for_status': 'True'}),
'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}),
'submitted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'window': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reverification.MidcourseReverificationWindow']", 'null': 'True'})
},
'verify_student.verificationcheckpoint': {
'Meta': {'unique_together': "(('course_id', 'checkpoint_name'),)", 'object_name': 'VerificationCheckpoint'},
'checkpoint_name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'photo_verification': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['verify_student.SoftwareSecurePhotoVerification']", 'symmetrical': 'False'})
},
'verify_student.verificationstatus': {
'Meta': {'object_name': 'VerificationStatus'},
'checkpoint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['verify_student.VerificationCheckpoint']"}),
'error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['verify_student']
| agpl-3.0 |
codingdojotw/gtest-mirror | scripts/upload_gtest.py | 1963 | 2851 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review.
This simple wrapper passes all command line flags and
--cc=googletestframework@googlegroups.com to upload.py.
USAGE: upload_gtest.py [options for upload.py]
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GTEST_GROUP = 'googletestframework@googlegroups.com'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Test discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GTEST_GROUP not in cc_list:
cc_list.append(GTEST_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GTEST_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
| bsd-3-clause |
jkyeung/XlsxWriter | examples/chart_pareto.py | 1 | 2128 | #######################################################################
#
# An example of creating of a Pareto chart with Python and XlsxWriter.
#
# Copyright 2013-2016, John McNamara, jmcnamara@cpan.org
#
import xlsxwriter
workbook = xlsxwriter.Workbook('chart_pareto.xlsx')
worksheet = workbook.add_worksheet()
# Formats used in the workbook.
bold = workbook.add_format({'bold': True})
percent_format = workbook.add_format({'num_format': '0.0%'})
# Widen the columns for visibility.
worksheet.set_column('A:A', 15)
worksheet.set_column('B:C', 10)
# Add the worksheet data that the charts will refer to.
headings = ['Reason', 'Number', 'Percentage']
reasons = [
'Traffic', 'Child care', 'Public Transport', 'Weather',
'Overslept', 'Emergency',
]
numbers = [60, 40, 20, 15, 10, 5]
percents = [0.44, 0.667, 0.8, 0.9, 0.967, 1]
worksheet.write_row('A1', headings, bold)
worksheet.write_column('A2', reasons)
worksheet.write_column('B2', numbers)
worksheet.write_column('C2', percents, percent_format)
# Create a new column chart. This will be the primary chart.
column_chart = workbook.add_chart({'type': 'column'})
# Add a series.
column_chart.add_series({
'categories': '=Sheet1!$A$2:$A$7',
'values': '=Sheet1!$B$2:$B$7',
})
# Add a chart title.
column_chart.set_title({'name': 'Reasons for lateness'})
# Turn off the chart legend.
column_chart.set_legend({'position': 'none'})
# Set the title and scale of the Y axes. Note, the secondary axis is set from
# the primary chart.
column_chart.set_y_axis({
'name': 'Respondents (number)',
'min': 0,
'max': 120
})
column_chart.set_y2_axis({'max': 1})
# Create a new line chart. This will be the secondary chart.
line_chart = workbook.add_chart({'type': 'line'})
# Add a series, on the secondary axis.
line_chart.add_series({
'categories': '=Sheet1!$A$2:$A$7',
'values': '=Sheet1!$C$2:$C$7',
'marker': {'type': 'automatic'},
'y2_axis': 1,
})
# Combine the charts.
column_chart.combine(line_chart)
# Insert the chart into the worksheet.
worksheet.insert_chart('F2', column_chart)
workbook.close()
| bsd-2-clause |
mastizada/kuma | vendor/packages/sqlalchemy/lib/sqlalchemy/dialects/sybase/pyodbc.py | 18 | 1963 | """
Support for Sybase via pyodbc.
http://pypi.python.org/pypi/pyodbc/
Connect strings are of the form::
sybase+pyodbc://<username>:<password>@<dsn>/
sybase+pyodbc://<username>:<password>@<host>/<database>
Unicode Support
---------------
The pyodbc driver currently supports usage of these Sybase types with
Unicode or multibyte strings::
CHAR
NCHAR
NVARCHAR
TEXT
VARCHAR
Currently *not* supported are::
UNICHAR
UNITEXT
UNIVARCHAR
"""
from sqlalchemy.dialects.sybase.base import SybaseDialect,\
SybaseExecutionContext
from sqlalchemy.connectors.pyodbc import PyODBCConnector
import decimal
from sqlalchemy import types as sqltypes, util, processors
class _SybNumeric_pyodbc(sqltypes.Numeric):
"""Turns Decimals with adjusted() < -6 into floats.
It's not yet known how to get decimals with many
significant digits or very large adjusted() into Sybase
via pyodbc.
"""
def bind_processor(self, dialect):
super_process = super(_SybNumeric_pyodbc,self).\
bind_processor(dialect)
def process(value):
if self.asdecimal and \
isinstance(value, decimal.Decimal):
if value.adjusted() < -6:
return processors.to_float(value)
if super_process:
return super_process(value)
else:
return value
return process
class SybaseExecutionContext_pyodbc(SybaseExecutionContext):
def set_ddl_autocommit(self, connection, value):
if value:
connection.autocommit = True
else:
connection.autocommit = False
class SybaseDialect_pyodbc(PyODBCConnector, SybaseDialect):
execution_ctx_cls = SybaseExecutionContext_pyodbc
colspecs = {
sqltypes.Numeric:_SybNumeric_pyodbc,
}
dialect = SybaseDialect_pyodbc
| mpl-2.0 |
rajul/mne-python | mne/io/__init__.py | 10 | 1063 | """FIF module for IO with .fif files"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
from .open import fiff_open, show_fiff, _fiff_get_fid
from .meas_info import read_fiducials, write_fiducials, read_info, write_info
from .proj import make_eeg_average_ref_proj
from . import array
from . import base
from . import brainvision
from . import bti
from . import constants
from . import edf
from . import egi
from . import fiff
from . import kit
from . import pick
from .array import RawArray
from .brainvision import read_raw_brainvision
from .bti import read_raw_bti
from .edf import read_raw_edf
from .egi import read_raw_egi
from .kit import read_raw_kit, read_epochs_kit
from .fiff import read_raw_fif
# for backward compatibility
from .fiff import RawFIF
from .fiff import RawFIF as Raw
from .base import concatenate_raws
from .chpi import get_chpi_positions
from .reference import set_eeg_reference, set_bipolar_reference, add_reference_channels
| bsd-3-clause |
pearsonlab/nipype | nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py | 12 | 1312 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..convert import CFFConverter
def test_CFFConverter_inputs():
input_map = dict(creator=dict(),
data_files=dict(),
description=dict(usedefault=True,
),
email=dict(),
gifti_labels=dict(),
gifti_surfaces=dict(),
gpickled_networks=dict(),
graphml_networks=dict(),
ignore_exception=dict(nohash=True,
usedefault=True,
),
license=dict(),
nifti_volumes=dict(),
out_file=dict(usedefault=True,
),
publisher=dict(),
references=dict(),
relation=dict(),
rights=dict(),
script_files=dict(),
species=dict(usedefault=True,
),
timeseries_files=dict(),
title=dict(),
tract_files=dict(),
)
inputs = CFFConverter.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_CFFConverter_outputs():
output_map = dict(connectome_file=dict(),
)
outputs = CFFConverter.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
peiwei/zulip | bots/zephyr_mirror.py | 2 | 2956 | #!/usr/bin/env python
# Copyright (C) 2012 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import print_function
import sys
import subprocess
import os
import traceback
import signal
from .zephyr_mirror_backend import parse_args
def die(signal, frame):
# We actually want to exit, so run os._exit (so as not to be caught and restarted)
os._exit(1)
signal.signal(signal.SIGINT, die)
(options, args) = parse_args()
sys.path[:0] = [os.path.join(options.root_path, 'api')]
from zulip import RandomExponentialBackoff
args = [os.path.join(options.root_path, "user_root", "zephyr_mirror_backend.py")]
args.extend(sys.argv[1:])
if options.sync_subscriptions:
subprocess.call(args)
sys.exit(0)
if options.forward_class_messages and not options.noshard:
sys.path.append("/home/zulip/zulip")
if options.on_startup_command is not None:
subprocess.call([options.on_startup_command])
from zerver.lib.parallel import run_parallel
print("Starting parallel zephyr class mirroring bot")
jobs = list("0123456789abcdef")
def run_job(shard):
subprocess.call(args + ["--shard=%s" % (shard,)])
return 0
for (status, job) in run_parallel(run_job, jobs, threads=16):
print("A mirroring shard died!")
pass
sys.exit(0)
backoff = RandomExponentialBackoff(timeout_success_equivalent=300)
while backoff.keep_going():
print("Starting zephyr mirroring bot")
try:
subprocess.call(args)
except:
traceback.print_exc()
backoff.fail()
print("")
print("")
print("ERROR: The Zephyr mirroring bot is unable to continue mirroring Zephyrs.")
print("This is often caused by failing to maintain unexpired Kerberos tickets")
print("or AFS tokens. See https://zulip.com/zephyr for documentation on how to")
print("maintain unexpired Kerberos tickets and AFS tokens.")
print("")
sys.exit(1)
| apache-2.0 |
pacifica/pacifica-archiveinterface | post_deployment_tests/deployment_test.py | 2 | 12003 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Module used for testing a deployed archive interface."""
import os
from queue import Queue
from threading import Thread
import unittest
import requests
# url for the archive interface just deployed.
# Clean Local files will remove all the test file generated from where the script is run if true
# Clean archive files will remove the files from the archive, assuming that this process can access the
# path for the archive files
# archive prefix is only used if cleaning archive files generated by this test script
ARCHIVEURL = os.getenv('ARCHIVE_URL', 'http://127.0.0.1:8080')
def unistr2binary(data_str):
"""Convert a string to binary in 2/3."""
return bytearray(data_str, 'utf8') # pragma: no cover python 3 only
class BasicArchiveTests(unittest.TestCase):
"""Class that contains basic text file tests."""
def test_simple_write(self):
"""Test writing a simple text file."""
fileid = '1234'
data = unistr2binary('Writing content for first file')
resp = requests.put('{}/{}'.format(ARCHIVEURL, fileid), data=data)
self.assertEqual(resp.status_code, 201)
respdata = resp.json()
self.assertEqual(int(respdata['total_bytes']), len(data))
self.assertEqual(respdata['message'], 'File added to archive')
def test_simple_status(self):
"""Test statusing a simple text file."""
fileid = '1235'
data = unistr2binary('Writing content for first file')
resp = requests.put('{}/{}'.format(ARCHIVEURL, fileid), data=data)
self.assertEqual(resp.status_code, 201)
resp = requests.head('{}/{}'.format(ARCHIVEURL, fileid))
self.assertEqual(resp.status_code, 204)
self.assertEqual(resp.headers['x-pacifica-file-storage-media'], 'disk')
self.assertEqual(resp.headers['x-content-length'], '30')
self.assertEqual(resp.headers['x-pacifica-messsage'], 'File was found')
def test_simple_stage(self):
"""test staging a simple text file."""
fileid = '1236'
data = unistr2binary('Writing content for first file')
resp = requests.put('{}/{}'.format(ARCHIVEURL, fileid), data=data)
self.assertEqual(resp.status_code, 201)
resp = requests.post('{}/{}'.format(ARCHIVEURL, fileid))
self.assertEqual(resp.status_code, 200)
data = resp.json()
self.assertEqual(data['message'], 'File was staged')
def test_simple_read(self):
"""test reading a simple text file."""
fileid = '1237'
data = unistr2binary('Writing content for first file')
resp = requests.put('{}/{}'.format(ARCHIVEURL, fileid), data=data)
self.assertEqual(resp.status_code, 201)
resp = requests.get('{}/{}'.format(ARCHIVEURL, fileid), stream=True)
data = resp.raw.read()
# the len of string 'Writing content for first file'
self.assertEqual(len(data), 30)
def test_simple_delete(self):
"""test reading a simple text file."""
fileid = '1238'
data = unistr2binary('Writing content for first file')
resp = requests.put('{}/{}'.format(ARCHIVEURL, fileid), data=data)
self.assertEqual(resp.status_code, 201)
resp = requests.delete('{}/{}'.format(ARCHIVEURL, fileid), stream=True)
# the len of string 'Writing content for first file'
self.assertEqual(resp.status_code, 200)
def test_file_rewrite(self):
"""Test trying to rewrite a file, rewrite should fail."""
fileid = '1239'
data = unistr2binary('Writing content for first file')
resp = requests.put('{}/{}'.format(ARCHIVEURL, fileid), data=data)
self.assertEqual(resp.status_code, 201)
resp = requests.put('{}/{}'.format(ARCHIVEURL, fileid), data=data)
self.assertEqual(resp.status_code, 500)
data = resp.json()
error_msg = 'Can\'t open'
# get error message length since the file path returned is unique per deploymnet while
# the rest of the error message is not
self.assertTrue(error_msg in data['traceback'])
class BinaryFileArchiveTests(unittest.TestCase):
"""Class for testing binary files through the archive workflow."""
def test_binary_file_write(self):
"""Write a binary file to the archive."""
fileid = '4321'
data = bytearray([123, 3, 255, 0, 100])
resp = requests.put('{}/{}'.format(ARCHIVEURL, fileid), data=data)
self.assertEqual(resp.status_code, 201)
respdata = resp.json()
self.assertEqual(int(respdata['total_bytes']), len(data))
self.assertEqual(respdata['message'], 'File added to archive')
def test_binary_file_status(self):
"""Get a status for a binary file in the archive."""
fileid = '4322'
data = bytearray([123, 3, 255, 0, 100])
resp = requests.put('{}/{}'.format(ARCHIVEURL, fileid), data=data)
self.assertEqual(resp.status_code, 201)
resp = requests.head('{}/{}'.format(ARCHIVEURL, fileid))
self.assertEqual(resp.status_code, 204)
self.assertEqual(resp.headers['x-pacifica-file-storage-media'], 'disk')
self.assertEqual(resp.headers['x-content-length'], '5')
self.assertEqual(resp.headers['x-pacifica-messsage'], 'File was found')
def test_binary_file_stage(self):
"""test staging a binary file."""
fileid = '4323'
data = bytearray([123, 3, 255, 0, 100])
resp = requests.put('{}/{}'.format(ARCHIVEURL, fileid), data=data)
self.assertEqual(resp.status_code, 201)
resp = requests.post('{}/{}'.format(ARCHIVEURL, fileid))
self.assertEqual(resp.status_code, 200)
data = resp.json()
self.assertEqual(data['message'], 'File was staged')
def test_binary_file_read(self):
"""test reading a binary file back form the archive."""
fileid = '4324'
data = bytearray([123, 3, 255, 0, 100])
resp = requests.put('{}/{}'.format(ARCHIVEURL, fileid), data=data)
self.assertEqual(resp.status_code, 201)
resp = requests.get('{}/{}'.format(ARCHIVEURL, fileid), stream=True)
data = resp.raw.read()
self.assertEqual(len(data), 5)
def test_binary_file_rewrite(self):
"""Test trying to rewrite a file, rewrite should fail."""
fileid = '4325'
data = bytearray([123, 3, 255, 0, 100])
resp = requests.put('{}/{}'.format(ARCHIVEURL, fileid), data=data)
self.assertEqual(resp.status_code, 201)
resp = requests.put('{}/{}'.format(ARCHIVEURL, fileid), data=data)
self.assertEqual(resp.status_code, 500)
data = resp.json()
error_msg = 'Can\'t open'
# get error message length since the file path returned is unique per deploymnet while
# the rest of the error message is not
self.assertTrue(error_msg in data['traceback'])
# pylint: disable=too-few-public-methods
class RandomFile:
"""Random File Object."""
def __init__(self, size):
"""Constructor for random file."""
self.len = size
self.bytes_read = 0
def read(self, size):
"""Read some random data."""
if self.bytes_read + size > self.len:
size = self.len - self.bytes_read
self.bytes_read += size
return os.urandom(size)
# pylint: enable=too-few-public-methods
class LargeBinaryFileArchiveTests(unittest.TestCase):
"""Class that tests the writing and reading of a large binary file."""
large_file_size = int(os.getenv('LARGE_FILE_SIZE', str(1024 * 1024 * 1024)))
def test_large_binary_file_write(self):
"""test writing a large binary file to the archive."""
fileid = '9999'
resp = requests.put('{}/{}'.format(ARCHIVEURL, fileid),
data=RandomFile(self.large_file_size))
self.assertEqual(resp.status_code, 201)
data = resp.json()
self.assertEqual(int(data['total_bytes']), self.large_file_size)
self.assertEqual(data['message'], 'File added to archive')
def test_large_binary_file_status(self):
"""test statusing a large binary file."""
fileid = '9998'
resp = requests.put('{}/{}'.format(ARCHIVEURL, fileid),
data=RandomFile(self.large_file_size))
self.assertEqual(resp.status_code, 201)
resp = requests.head('{}/{}'.format(ARCHIVEURL, fileid))
self.assertEqual(resp.status_code, 204)
self.assertEqual(resp.headers['x-pacifica-file-storage-media'], 'disk')
self.assertEqual(
resp.headers['x-content-length'], str(self.large_file_size))
self.assertEqual(resp.headers['x-pacifica-messsage'], 'File was found')
def test_large_binary_file_stage(self):
"""test staging a large binary file."""
fileid = '9997'
resp = requests.put('{}/{}'.format(ARCHIVEURL, fileid),
data=RandomFile(self.large_file_size))
self.assertEqual(resp.status_code, 201)
resp = requests.post('{}/{}'.format(ARCHIVEURL, fileid))
self.assertEqual(resp.status_code, 200)
data = resp.json()
self.assertEqual(data['message'], 'File was staged')
def test_large_binary_file_read(self):
"""test reading a large binary file."""
fileid = '9996'
resp = requests.put('{}/{}'.format(ARCHIVEURL, fileid),
data=RandomFile(self.large_file_size))
self.assertEqual(resp.status_code, 201)
resp = requests.get('{}/{}'.format(ARCHIVEURL, fileid), stream=True)
filesize = 0
buf = resp.raw.read(1024)
while buf:
filesize += len(buf)
buf = resp.raw.read(1024)
self.assertEqual(filesize, self.large_file_size)
class ManyFileArchiveTests(unittest.TestCase):
"""Class that tests the writing of many files at once."""
def test_many_file_write(self):
"""test writing many files to the archive."""
num_worker_threads = 8
job_id_queue = Queue()
def worker_put():
"""Thread worker to send the test data."""
work = job_id_queue.get()
while work:
data = unistr2binary('Writing content for first file')
resp = requests.put(
'{}/{}'.format(ARCHIVEURL, work), data=data)
self.assertEqual(resp.status_code, 201)
data = resp.json()
self.assertEqual(data['message'], 'File added to archive')
job_id_queue.task_done()
work = job_id_queue.get()
job_id_queue.task_done()
for i in range(num_worker_threads):
new_thread = Thread(target=worker_put)
new_thread.daemon = True
new_thread.start()
for i in range(3000, int(os.getenv('MANY_FILES_TEST_COUNT', '1000'))+3000):
job_id_queue.put(i)
for i in range(num_worker_threads):
job_id_queue.put(False)
job_id_queue.join()
def worker_get():
"""Thread worker to send the test data."""
work = job_id_queue.get()
while work:
resp = requests.get(
'{}/{}'.format(ARCHIVEURL, work))
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content, unistr2binary('Writing content for first file'))
job_id_queue.task_done()
work = job_id_queue.get()
job_id_queue.task_done()
for i in range(num_worker_threads):
new_thread = Thread(target=worker_get)
new_thread.daemon = True
new_thread.start()
for i in range(3000, int(os.getenv('MANY_FILES_TEST_COUNT', '1000'))+3000):
job_id_queue.put(i)
for i in range(num_worker_threads):
job_id_queue.put(False)
job_id_queue.join()
| lgpl-3.0 |
mgit-at/ansible | lib/ansible/plugins/lookup/etcd.py | 84 | 5572 | # (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
# (m) 2016, Mihai Moldovanu <mihaim@tfm.ro>
# (m) 2017, Juan Manuel Parrilla <jparrill@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author:
- Jan-Piet Mens (@jpmens)
lookup: etcd
version_added: "2.1"
short_description: get info from an etcd server
description:
- Retrieves data from an etcd server
options:
_terms:
description:
- the list of keys to lookup on the etcd server
type: list
elements: string
required: True
url:
description:
- Environment variable with the url for the etcd server
default: 'http://127.0.0.1:4001'
env:
- name: ANSIBLE_ETCD_URL
version:
description:
- Environment variable with the etcd protocol version
default: 'v1'
env:
- name: ANSIBLE_ETCD_VERSION
validate_certs:
description:
- toggle checking that the ssl certificates are valid, you normally only want to turn this off with self-signed certs.
default: True
type: boolean
'''
EXAMPLES = '''
- name: "a value from a locally running etcd"
debug: msg={{ lookup('etcd', 'foo/bar') }}
- name: "values from multiple folders on a locally running etcd"
debug: msg={{ lookup('etcd', 'foo', 'bar', 'baz') }}
- name: "since Ansible 2.5 you can set server options inline"
debug: msg="{{ lookup('etcd', 'foo', version='v2', url='http://192.168.0.27:4001') }}"
'''
RETURN = '''
_raw:
description:
- list of values associated with input keys
type: list
elements: strings
'''
import json
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.urls import open_url
# this can be made configurable, not should not use ansible.cfg
#
# Made module configurable from playbooks:
# If etcd v2 running on host 192.168.1.21 on port 2379
# we can use the following in a playbook to retrieve /tfm/network/config key
#
# - debug: msg={{lookup('etcd','/tfm/network/config', url='http://192.168.1.21:2379' , version='v2')}}
#
# Example Output:
#
# TASK [debug] *******************************************************************
# ok: [localhost] => {
# "msg": {
# "Backend": {
# "Type": "vxlan"
# },
# "Network": "172.30.0.0/16",
# "SubnetLen": 24
# }
# }
#
#
#
#
class Etcd:
def __init__(self, url, version, validate_certs):
self.url = url
self.version = version
self.baseurl = '%s/%s/keys' % (self.url, self.version)
self.validate_certs = validate_certs
def _parse_node(self, node):
# This function will receive all etcd tree,
# if the level requested has any node, the recursion starts
# create a list in the dir variable and it is passed to the
# recursive function, and so on, if we get a variable,
# the function will create a key-value at this level and
# undoing the loop.
path = {}
if node.get('dir', False):
for n in node.get('nodes', []):
path[n['key'].split('/')[-1]] = self._parse_node(n)
else:
path = node['value']
return path
def get(self, key):
url = "%s/%s?recursive=true" % (self.baseurl, key)
data = None
value = {}
try:
r = open_url(url, validate_certs=self.validate_certs)
data = r.read()
except Exception:
return None
try:
# I will not support Version 1 of etcd for folder parsing
item = json.loads(data)
if self.version == 'v1':
# When ETCD are working with just v1
if 'value' in item:
value = item['value']
else:
if 'node' in item:
# When a usual result from ETCD
value = self._parse_node(item['node'])
if 'errorCode' in item:
# Here return an error when an unknown entry responds
value = "ENOENT"
except Exception:
raise
return value
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
self.set_options(var_options=variables, direct=kwargs)
validate_certs = self.get_option('validate_certs')
url = self.get_option('url')
version = self.get_option('version')
etcd = Etcd(url=url, version=version, validate_certs=validate_certs)
ret = []
for term in terms:
key = term.split()[0]
value = etcd.get(key)
ret.append(value)
return ret
| gpl-3.0 |
SurfasJones/djcmsrc3 | venv/lib/python2.7/site-packages/django/contrib/gis/db/backends/spatialite/models.py | 222 | 1934 | """
The GeometryColumns and SpatialRefSys models for the SpatiaLite backend.
"""
from django.db import models
from django.contrib.gis.db.backends.base import SpatialRefSysMixin
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class GeometryColumns(models.Model):
"""
The 'geometry_columns' table from SpatiaLite.
"""
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
type = models.CharField(max_length=30)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
spatial_index_enabled = models.IntegerField()
class Meta:
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the
the feature table name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the
the feature geometry column.
"""
return 'f_geometry_column'
def __str__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class SpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from SpatiaLite.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
ref_sys_name = models.CharField(max_length=256)
proj4text = models.CharField(max_length=2048)
@property
def wkt(self):
from django.contrib.gis.gdal import SpatialReference
return SpatialReference(self.proj4text).wkt
class Meta:
db_table = 'spatial_ref_sys'
managed = False
| mit |
sghai/robottelo | tests/foreman/ui/test_classparameters.py | 1 | 61699 | # -*- encoding: utf-8 -*-
"""Test class for Smart/Puppet Class Parameter
:Requirement: Classparameters
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: UI
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from fauxfactory import gen_integer, gen_string
from nailgun import entities
from random import choice, uniform
from requests import HTTPError
from robottelo.api.utils import (
create_role_permissions,
delete_puppet_class,
publish_puppet_module,
)
from robottelo.constants import CUSTOM_PUPPET_REPO
from robottelo.datafactory import filtered_datapoint, generate_strings_list
from robottelo.decorators import (
run_in_one_thread,
run_only_on,
skip_if_bug_open,
stubbed,
tier1,
tier2,
upgrade
)
from robottelo.helpers import get_nailgun_config
from robottelo.test import UITestCase
from robottelo.ui.locators import common_locators, locators
from robottelo.ui.session import Session
@filtered_datapoint
def valid_sc_parameters_data():
"""Returns a list of valid smart class parameter types and values"""
return [
{
u'sc_type': 'string',
u'value': choice(generate_strings_list()),
},
{
u'sc_type': 'boolean',
u'value': choice(['true', 'false']),
},
{
u'sc_type': 'integer',
u'value': gen_string('numeric', 5).lstrip('0'),
},
{
u'sc_type': 'real',
u'value': str(uniform(-1000, 1000)),
},
{
u'sc_type': 'array',
u'value': u'["{0}","{1}","{2}"]'.format(
gen_string('alpha'),
gen_string('numeric').lstrip('0'),
gen_string('html'),
),
},
{
u'sc_type': 'hash',
u'value': '{0}: {1}'.format(
gen_string('alpha'), gen_string('alpha')),
},
{
u'sc_type': 'yaml',
u'value': '--- {0}=>{1}'.format(
gen_string('alpha'), gen_string('alpha')),
},
{
u'sc_type': 'json',
u'value': u'{{"{0}":"{1}","{2}":"{3}"}}'.format(
gen_string('alpha'),
gen_string('numeric').lstrip('0'),
gen_string('alpha'),
gen_string('alphanumeric')
),
},
]
@filtered_datapoint
def invalid_sc_parameters_data():
"""Returns a list of invalid smart class parameter types and values"""
return [
{
u'sc_type': 'boolean',
u'value': gen_string('alphanumeric'),
},
{
u'sc_type': 'integer',
u'value': gen_string('utf8'),
},
{
u'sc_type': 'real',
u'value': gen_string('alphanumeric'),
},
{
u'sc_type': 'array',
u'value': gen_string('alpha'),
},
{
u'sc_type': 'hash',
u'value': gen_string('alpha'),
},
{
u'sc_type': 'yaml',
u'value': '{{{0}:{1}}}'.format(
gen_string('alpha'), gen_string('alpha')),
},
{
u'sc_type': 'json',
u'value': u'{{{0}:{1},{2}:{3}}}'.format(
gen_string('alpha'),
gen_string('numeric').lstrip('0'),
gen_string('alpha'),
gen_string('alphanumeric')
),
}
]
@run_in_one_thread
class SmartClassParametersTestCase(UITestCase):
"""Implements Smart Class Parameter tests in UI"""
@classmethod
def set_session_org(cls):
"""Creates new organization to be used for current session the
session_user will login automatically with this org in context
"""
cls.session_org = entities.Organization().create()
@classmethod
def setUpClass(cls):
"""Import some parametrized puppet classes. This is required to make
sure that we have smart class parameter available.
Read all available smart class parameters for imported puppet class to
be able to work with unique entity for each specific test.
"""
super(SmartClassParametersTestCase, cls).setUpClass()
cls.pm_name = 'ui_test_classparameters'
cls.puppet_modules = [
{'author': 'robottelo', 'name': cls.pm_name},
]
cv = publish_puppet_module(
cls.puppet_modules, CUSTOM_PUPPET_REPO, cls.session_org)
cls.env = entities.Environment().search(
query={'search': u'content_view="{0}"'.format(cv.name)}
)[0].read()
cls.puppet_class = entities.PuppetClass().search(query={
'search': u'name = "{0}" and environment = "{1}"'.format(
cls.puppet_modules[0]['name'], cls.env.name)
})[0]
cls.sc_params_list = entities.SmartClassParameters().search(
query={
'search': 'puppetclass="{0}"'.format(cls.puppet_class.name),
'per_page': 1000
})
cls.host = entities.Host(organization=cls.session_org).create()
cls.host.environment = cls.env
cls.host.update(['environment'])
cls.host.add_puppetclass(data={'puppetclass_id': cls.puppet_class.id})
cls.domain_name = entities.Domain(id=cls.host.domain.id).read().name
@classmethod
def tearDownClass(cls):
"""Removes puppet class."""
super(SmartClassParametersTestCase, cls).tearDownClass()
delete_puppet_class(cls.puppet_class.name)
@run_only_on('sat')
@tier1
def test_positive_search(self):
"""Search for specific smart class parameter
:id: 76fcb049-2c3e-4ac1-944b-6dd7b0c69097
:expectedresults: Specified smart class parameter can be found in the
system
"""
sc_param = self.sc_params_list.pop()
with Session(self):
self.assertIsNotNone(self.sc_parameters.search(
sc_param.parameter, self.puppet_class.name))
@run_only_on('sat')
@tier2
def test_positive_search_with_non_admin_user(self):
"""Search for specific smart class parameter using non admin user
:id: 79bd4071-1baa-44af-91dd-1e093445af29
:expectedresults: Specified smart class parameter can be found in the
system
:BZ: 1391556
:CaseLevel: Integration
"""
sc_param = self.sc_params_list.pop()
username = gen_string('alpha')
password = gen_string('alpha')
required_user_permissions = {
'Puppetclass': [
'view_puppetclasses',
],
'PuppetclassLookupKey': [
'view_external_parameters',
'create_external_parameters',
'edit_external_parameters',
'destroy_external_parameters',
],
}
role = entities.Role().create()
create_role_permissions(role, required_user_permissions)
entities.User(
login=username,
password=password,
role=[role],
admin=False
).create()
# assert that the user is not an admin one and cannot read the current
# role info (note: view_roles is not in the required permissions)
cfg = get_nailgun_config()
cfg.auth = (username, password)
with self.assertRaises(HTTPError) as context:
entities.Role(cfg, id=role.id).read()
self.assertIn(
'403 Client Error: Forbidden', str(context.exception))
with Session(self, username, password):
self.assertIsNotNone(self.sc_parameters.search(
sc_param.parameter, self.puppet_class.name))
@run_only_on('sat')
@tier1
def test_positive_override_checkbox(self):
"""Override the Default Parameter value.
:id: e798b1be-b176-48e2-887f-3f3370efef90
:steps:
1. Check the Override checkbox.
2. Set the new valid Default Value.
3. Submit the changes.
:expectedresults: Parameter Value overridden with new value.
"""
new_value = gen_string('alpha')
sc_param = self.sc_params_list.pop()
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
default_value=new_value
)
default_value = self.sc_parameters.fetch_default_value(
sc_param.parameter, self.puppet_class.name)
self.assertEqual(default_value, new_value)
@run_only_on('sat')
@tier1
def test_positive_edit_parameter_dialog(self):
"""Validation, merging and matcher sections are accessible for enabled
'Override' checkbox.
:id: a6639110-1a58-4f68-8265-369b515f9c4a
:steps: Check the Override checkbox.
:expectedresults: Puppet Default, Hiding, Validation, Merging and
Matcher section enabled.
"""
sc_param = self.sc_params_list.pop()
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True
)
self.sc_parameters.click(self.sc_parameters.search(
sc_param.parameter, self.puppet_class.name))
self.sc_parameters.click(
locators['sc_parameters.optional_expander'])
locators_list = [
'sc_parameters.omit',
'sc_parameters.hidden_value',
'sc_parameters.validator_type',
'sc_parameters.matcher_priority',
'sc_parameters.add_matcher',
]
for locator in locators_list:
self.assertTrue(
self.sc_parameters.is_element_enabled(locators[locator]))
@run_only_on('sat')
@tier1
def test_negative_edit_parameter_dialog(self):
"""Validation, merging and matcher sections are not accessible for
disabled 'Override' checkbox.
:id: 30da3a17-05a5-4c19-8210-80c3a8dcf32b
:steps: Don't Check the Override checkbox.
:expectedresults: Default Value, Puppet Default, Hiding, Validation,
Merging and Matcher section is disabled.
"""
sc_param = self.sc_params_list.pop()
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=False
)
self.sc_parameters.click(self.sc_parameters.search(
sc_param.parameter, self.puppet_class.name))
self.sc_parameters.click(
locators['sc_parameters.optional_expander'])
locators_list = [
'sc_parameters.default_value',
'sc_parameters.omit',
'sc_parameters.hidden_value',
'sc_parameters.validator_type',
'sc_parameters.matcher_priority',
'sc_parameters.add_matcher',
]
for locator in locators_list:
self.assertFalse(
self.sc_parameters.is_element_enabled(locators[locator]))
@run_only_on('sat')
@tier1
@upgrade
def test_positive_update_parameter_type(self):
"""Positive Parameter Update for parameter types - Valid Value.
Types - string, boolean, integer, real, array, hash, yaml, json
:id: 5157c174-54f2-422f-8028-89604267c8e8
:steps:
1. Check the Override checkbox.
2. Update the Key Type.
3. Enter a 'valid' default Value.
4. Submit the changes.
:expectedresults: Parameter updated with a new type successfully.
:CaseImportance: Critical
"""
sc_param = self.sc_params_list.pop()
with Session(self):
for data in valid_sc_parameters_data():
with self.subTest(data):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
key_type=data['sc_type'],
default_value=data['value'],
)
value = self.sc_parameters.fetch_default_value(
sc_param.parameter, self.puppet_class.name)
# Application is adding some data for yaml type once
# variable is created
if data['sc_type'] == 'yaml':
data['value'] += '\n...'
self.assertEqual(value, data['value'])
@run_only_on('sat')
@tier1
def test_negative_update_parameter_type(self):
"""Negative Parameter Update for parameter types - Invalid Value.
Types - string, boolean, integer, real, array, hash, yaml, json
:id: 4285b397-0426-4523-8cc2-8e5d79f49aae
:steps:
1. Check the Override checkbox.
2. Update the Key Type.
3. Enter an 'Invalid' default Value.
4. Submit the changes.
:expectedresults: Parameter is not updated with invalid value for
specific type.
:caseimportance: critical
"""
sc_param = self.sc_params_list.pop()
with Session(self):
initial_value = self.sc_parameters.fetch_default_value(
sc_param.parameter, self.puppet_class.name)
for data in invalid_sc_parameters_data():
with self.subTest(data):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
key_type=data['sc_type'],
default_value=data['value'],
)
self.assertIsNotNone(
self.sc_parameters.wait_until_element(
common_locators['haserror'])
)
value = self.sc_parameters.fetch_default_value(
sc_param.parameter, self.puppet_class.name)
self.assertEqual(value, initial_value)
@run_only_on('sat')
@tier1
def test_positive_validate_puppet_default_value(self):
"""Validation doesn't works on puppet default value.
:id: 8671338d-5547-4259-9119-a8952b4d982d
:steps:
1. Check the Override checkbox.
2. Check 'Use Puppet Default' value.
3. Validate this value under section 'Optional Input Validator'.
:expectedresults: Validation shouldn't work with puppet default value.
:caseautomation: automated
"""
sc_param = self.sc_params_list.pop()
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
omit=True,
validator_type='list',
validator_rule='45, test, 75',
)
self.sc_parameters.click(self.sc_parameters.search(
sc_param.parameter, self.puppet_class.name))
self.assertTrue(self.sc_parameters.wait_until_element(
locators['sc_parameters.omit']).is_selected())
self.assertFalse(self.sc_parameters.is_element_enabled(
locators['sc_parameters.default_value']))
self.sc_parameters.click(
locators['sc_parameters.optional_expander'])
value = self.sc_parameters.wait_until_element(
locators['sc_parameters.validator_rule']
).get_attribute('value')
self.assertEqual(value, u'45, test, 75')
@run_only_on('sat')
@stubbed()
@tier2
def test_negative_validate_default_value_required_checkbox(self):
"""Error raised for blank default Value - Required checkbox.
:id: b665891a-15c2-4e7b-a118-f239ff45d37a
:steps:
1. Check the Override checkbox.
2. Don't provide any default value, keep blank.
3. Check Required checkbox in 'Optional Input Validator'.
4. Submit the change.
:expectedresults: Error raised for blank default value by 'Required'
checkbox.
:caseautomation: notautomated
:CaseLevel: Integration
"""
@run_only_on('sat')
@stubbed()
@tier2
def test_negative_validate_matcher_value_required_checkbox(self):
"""Error raised for blank matcher Value - Required checkbox.
:id: e88b1bc7-529a-4dbd-9e43-4738580e12ab
:steps:
1. Check the Override checkbox.
2. Create a matcher for Parameter for some attribute.
3. Dont provide Value for matcher. Keep blank.
4. Check Required checkbox in 'Optional Input Validator'.
5. Submit the change.
:expectedresults: Error raised for blank matcher value by 'Required'
checkbox.
:caseautomation: notautomated
:CaseLevel: Integration
"""
@run_only_on('sat')
@tier1
def test_negative_validate_default_value_with_regex(self):
"""Error raised for default value not matching with regex.
:id: 1854c189-8a5f-410c-b195-ea2a51e72b30
:steps:
1. Check the Override checkbox.
2. Provide default value that doesn't matches the regex of step 3.
3. Validate this value with regex validator type and rule.
4. Submit the change.
:expectedresults: Error raised for default value not matching with
regex.
"""
sc_param = self.sc_params_list.pop()
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
default_value=gen_string('alpha'),
validator_type='regexp',
validator_rule='[0-9]',
)
self.assertIsNotNone(
self.sc_parameters.wait_until_element(
common_locators['haserror'])
)
@run_only_on('sat')
@tier1
def test_positive_validate_default_value_with_regex(self):
"""Error not raised for default value matching with regex.
:id: 81ab3074-dc22-4c60-b638-62fdfabe60fb
:steps:
1. Check the Override checkbox.
2. Provide default value that matches the regex of step 3.
3. Validate this value with regex validator type and rule.
4. Submit the change.
:expectedresults: Error not raised for default value matching with
regex.
"""
sc_param = self.sc_params_list.pop()
initial_value = gen_string('numeric')
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
default_value=initial_value,
validator_type='regexp',
validator_rule='[0-9]',
)
self.assertIsNone(
self.sc_parameters.wait_until_element(
common_locators['haserror'], timeout=5)
)
value = self.sc_parameters.fetch_default_value(
sc_param.parameter, self.puppet_class.name)
self.assertEqual(value, initial_value)
@run_only_on('sat')
@tier1
def test_negative_validate_matcher_value_with_regex(self):
"""Error raised for matcher value not matching with regex.
:id: 77e56e9b-ddee-41e8-bd9f-e1a43f5053b2
:steps:
1. Check the Override checkbox.
2. Create a matcher with value that doesn't matches the regex of
step 3.
3. Validate this value with regex validator type and rule.
4. Submit the change.
:expectedresults: Error raised for matcher value not matching with
regex.
"""
sc_param = self.sc_params_list.pop()
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
default_value=gen_string('numeric'),
validator_type='regexp',
validator_rule='[0-9]',
matcher=[{
'matcher_attribute': 'os=rhel6',
'matcher_value': gen_string('alpha')
}]
)
self.assertIsNotNone(
self.sc_parameters.wait_until_element(
locators['sc_parameters.matcher_error'])
)
@run_only_on('sat')
@tier1
def test_positive_validate_matcher_value_with_regex(self):
"""Error not raised for matcher value matching with regex.
:id: e642fcb1-f1dc-4263-adf1-8f881ce06f68
:steps:
1. Check the Override checkbox.
2. Create a matcher with value that matches the regex of step 3.
3. Validate this value with regex validator type and rule.
4. Submit the change.
:expectedresults: Error not raised for matcher value matching with
regex.
"""
sc_param = self.sc_params_list.pop()
matcher_value = gen_string('numeric')
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
default_value=gen_string('numeric'),
validator_type='regexp',
validator_rule='[0-9]',
matcher=[{
'matcher_attribute': 'os=rhel6',
'matcher_value': matcher_value,
}]
)
self.assertIsNone(
self.sc_parameters.wait_until_element(
locators['sc_parameters.matcher_error'], timeout=5))
matchers_list = self.sc_parameters.fetch_matcher_values(
sc_param.parameter, self.puppet_class.name, 1)
self.assertEqual(matchers_list[0], matcher_value)
@run_only_on('sat')
@tier1
def test_negative_validate_default_value_with_list(self):
"""Error raised for default value not in list.
:id: 3d353610-97cd-45a3-8e99-425bc948ee51
:steps:
1. Check the Override checkbox.
2. Provide default value that doesn't matches the list of step 3.
3. Validate this value with list validator type and rule.
4. Submit the change.
:expectedresults: Error raised for default value not in list.
"""
sc_param = self.sc_params_list.pop()
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
default_value=gen_string('alphanumeric'),
validator_type='list',
validator_rule='45, test',
)
self.assertIsNotNone(
self.sc_parameters.wait_until_element(
common_locators['haserror'])
)
@run_only_on('sat')
@tier1
def test_positive_validate_default_value_with_list(self):
"""Error not raised for default value in list.
:id: 62658cde-becd-4083-ba06-1fd4c8904173
:steps:
1. Check the Override checkbox.
2. Provide default value that matches the list of step 3.
3. Validate this value with list validator type and rule.
4. Submit the change.
:expectedresults: Error not raised for default value in list.
"""
sc_param = self.sc_params_list.pop()
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
default_value='475',
validator_type='list',
validator_rule='true, 50, 475',
)
self.assertIsNone(
self.sc_parameters.wait_until_element(
common_locators['haserror'], timeout=5)
)
value = self.sc_parameters.fetch_default_value(
sc_param.parameter, self.puppet_class.name)
self.assertEqual(value, '475')
@run_only_on('sat')
@tier1
def test_negative_validate_matcher_value_with_list(self):
"""Error raised for matcher value not in list.
:id: ab8000e9-0110-473e-9c86-eeb709cbfd08
:steps:
1. Check the Override checkbox.
2. Create a matcher with value that doesn't matches the list of
step 3.
3. Validate this value with list validator type and rule.
4. Submit the change.
:expectedresults: Error raised for matcher value not in list.
"""
sc_param = self.sc_params_list.pop()
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
default_value='50',
validator_type='list',
validator_rule='25, example, 50',
matcher=[{
'matcher_attribute': 'os=rhel6',
'matcher_value': 'myexample'
}]
)
self.assertIsNone(
self.sc_parameters.wait_until_element(
common_locators['haserror'], timeout=5)
)
self.assertIsNotNone(
self.sc_parameters.wait_until_element(
locators['sc_parameters.matcher_error'])
)
@run_only_on('sat')
@tier1
def test_positive_validate_matcher_value_with_list(self):
"""Error not raised for matcher value in list.
:id: ae5b38e5-f7ea-4325-8a2c-917120470688
:steps:
1. Check the Override checkbox.
2. Create a matcher with value that matches the list of step 3.
3. Validate this value with list validator type and rule.
4. Submit the change.
:expectedresults: Error not raised for matcher value in list.
"""
sc_param = self.sc_params_list.pop()
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
default_value='test',
validator_type='list',
validator_rule='test, example, 30',
matcher=[{
'matcher_attribute': 'os=rhel6',
'matcher_value': '30'
}]
)
self.assertIsNone(
self.sc_parameters.wait_until_element(
locators['sc_parameters.matcher_error'], timeout=5))
matchers_list = self.sc_parameters.fetch_matcher_values(
sc_param.parameter, self.puppet_class.name, 1)
self.assertEqual(matchers_list[0], '30')
@run_only_on('sat')
@tier1
def test_negative_validate_matcher_value_with_default_type(self):
"""Error raised for matcher value not of default type.
:id: 70109667-c72e-4045-92a4-6b8bbbd615eb
:steps:
1. Check the Override checkbox.
2. Update parameter default type with valid value.
3. Create a matcher with value that doesn't matches the default
type.
4. Submit the change.
:expectedresults: Error raised for matcher value not of default type.
"""
sc_param = self.sc_params_list.pop()
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
default_value=gen_integer(),
key_type='integer',
matcher=[{
'matcher_attribute': 'os=rhel6',
'matcher_value': gen_string('alpha')
}]
)
self.assertIsNotNone(
self.sc_parameters.wait_until_element(
locators['sc_parameters.matcher_error'])
)
@run_only_on('sat')
@tier1
def test_positive_validate_matcher_value_with_default_type(self):
"""No error for matcher value of default type.
:id: 57d4d82a-fc5b-4d85-8e8e-9a7ca880c5a2
:steps:
1. Check the Override checkbox.
2. Update parameter default type with valid value.
3. Create a matcher with value that matches the default type.
4. Submit the change.
:expectedresults: Error not raised for matcher value of default type.
"""
sc_param = self.sc_params_list.pop()
matcher_value = gen_integer()
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
default_value=gen_integer(),
key_type='integer',
matcher=[{
'matcher_attribute': 'os=rhel6',
'matcher_value': matcher_value
}]
)
self.assertIsNone(
self.sc_parameters.wait_until_element(
locators['sc_parameters.matcher_error'], timeout=5))
matchers_list = self.sc_parameters.fetch_matcher_values(
sc_param.parameter, self.puppet_class.name, 1)
self.assertEqual(int(matchers_list[0]), matcher_value)
@run_only_on('sat')
@tier1
def test_negative_validate_matcher_and_default_value(self):
"""Error for invalid default and matcher value both at a time.
:id: 141b9e6a-7c4c-4ab0-8e30-7c3ddff0b9c8
:steps:
1. Check the Override checkbox.
2. Update parameter default type with Invalid value.
3. Create a matcher with value that doesn't matches the default
type.
4. Submit the change.
:expectedresults: Error raised for invalid default and matcher value
both.
"""
sc_param = self.sc_params_list.pop()
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
default_value=gen_string('alpha'),
key_type='integer',
matcher=[{
'matcher_attribute': 'os=rhel6',
'matcher_value': gen_string('alpha')
}]
)
self.assertIsNotNone(
self.sc_parameters.wait_until_element(
common_locators['haserror'])
)
self.assertIsNotNone(
self.sc_parameters.wait_until_element(
locators['sc_parameters.matcher_error'])
)
@run_only_on('sat')
@tier1
def test_negative_validate_matcher_non_existing_attribute(self):
"""Error while creating matcher for Non Existing Attribute.
:id: 46fd985d-2e7f-4a8a-8059-0c4d2ffcc8cd
:steps:
1. Check the Override checkbox.
2. Create a matcher with non existing attribute in org.
3. Attempt to submit the change.
:expectedresults: Error raised for non existing attribute.
"""
sc_param = self.sc_params_list.pop()
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
default_value=gen_string('alpha'),
matcher=[{
'matcher_attribute': 'hostgroup={0}'.format(
gen_string('alpha')),
'matcher_value': gen_string('alpha')
}]
)
self.assertIsNotNone(
self.sc_parameters.wait_until_element(
locators['sc_parameters.matcher_error'])
)
@run_only_on('sat')
@skip_if_bug_open('bugzilla', 1402036)
@tier1
def test_positive_validate_matcher_with_comma(self):
"""Create matcher for attribute that has comma in its value
:id: 2116e85b-add5-4bc3-aab7-5c9f0965c4a8
:steps:
1. Check the Override checkbox.
2. Create a matcher with attribute that has comma in its value
3. Submit the change.
:BZ: 1402036
:expectedresults: Matcher is created and its attribute is not modified
after update is submitted
"""
sc_param = self.sc_params_list.pop()
loc_name = '{0}, {1}'.format(gen_string('alpha'), gen_string('alpha'))
entities.Location(name=loc_name).create()
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
default_value=gen_string('alpha'),
matcher_priority='\n'.join(
['fqdn', 'hostgroup', 'os', 'domain', 'location']),
matcher=[{
'matcher_attribute': 'location={0}'.format(loc_name),
'matcher_value': gen_string('alpha')
}]
)
self.assertIsNone(
self.sc_parameters.wait_until_element(
locators['sc_parameters.matcher_error'], timeout=5))
self.sc_parameters.click(self.sc_parameters.search(
sc_param.parameter, self.puppet_class.name))
attribute_value = self.sc_parameters.wait_until_element(
locators['sc_parameters.matcher_attribute_value'] % 1
).get_attribute('value')
self.assertEqual(attribute_value, loc_name)
@run_only_on('sat')
@tier1
@upgrade
def test_positive_create_matcher_puppet_default_value(self):
"""Create matcher for attribute in parameter, where Value is puppet
default value.
:id: 656f25cd-4394-414a-bd8e-458f0e51c668
:steps:
1. Check the Override checkbox.
2. Set some default Value.
3. Click on 'Add Matcher' button to add matcher.
4. Choose valid attribute type, name and puppet default value.
5. Submit the change.
:expectedresults: The matcher has been created successfully.
:CaseImportance: Critical
"""
sc_param = self.sc_params_list.pop()
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
default_value=gen_integer(),
key_type='integer',
matcher=[{
'matcher_attribute': 'os=rhel6',
'matcher_value': '',
'matcher_omit': True
}]
)
self.assertIsNone(
self.sc_parameters.wait_until_element(
locators['sc_parameters.matcher_error'], timeout=5))
self.sc_parameters.click(self.sc_parameters.search(
sc_param.parameter, self.puppet_class.name))
self.assertFalse(self.sc_parameters.is_element_enabled(
locators['sc_parameters.matcher_value'] % 1))
@run_only_on('sat')
@tier1
def test_positive_update_with_long_priority_list(self):
"""Smart class parameter priority order list can contain more than 255
character inside of it
:id: f5e7847a-1f4b-455d-aa73-6b02774b6168
:customerscenario: true
:steps:
1. Check the Override checkbox.
2. Set some default Value.
3. Set long priority order list
:expectedresults: Smart class parameter is updated successfully and has
proper priority list
:BZ: 1458817
:CaseImportance: Medium
"""
sc_param = self.sc_params_list.pop()
order_value = '\n'.join(
[gen_string('alpha').lower() for _ in range(60)])
self.assertGreater(len(order_value), 255)
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
default_value=gen_string('alpha'),
matcher_priority=order_value,
)
self.sc_parameters.click(self.sc_parameters.search(
sc_param.parameter, self.puppet_class.name))
priority_list_value = self.sc_parameters.wait_until_element(
locators['sc_parameters.matcher_priority']).text
self.assertEqual(order_value, priority_list_value)
@run_only_on('sat')
@stubbed()
@tier2
def test_positive_create_matcher_merge_override_puppet_value(self):
"""Merge the values of all the associated matchers + puppet default value.
:id: 4eed74ac-0ead-4723-af4d-8638406691f6
:steps:
1. Check the Override checkbox.
2. Set some default Value.
3. Create first matcher for attribute fqdn with valid details.
4. Create second matcher for other attribute with value as puppet
default.
Note - The fqdn/host should have this attribute.
5. Create more matchers for some more attributes with value as
puppet default.
Note - The fqdn/host should have this attributes.
6. Select 'Merge overrides' checkbox.
7. Select 'Merge default' checkbox.
8. Submit the change.
9. Go to YAML output of associated host.
:expectedresults:
1. The YAML output has the value only for fqdn.
2. The YAML output doesn't have the puppet default values of
matchers.
3. Duplicate values in YAML output if any are displayed.
:caseautomation: notautomated
:CaseLevel: Integration
:CaseImportance: Critical
"""
@run_only_on('sat')
@stubbed()
@tier2
def test_positive_create_matcher_merge_puppet_default(self):
"""Merge the values of all the associated matchers + puppet default value.
:id: 8e9075c1-adc5-474e-8aa3-4252f507c155
:steps:
1. Check the Override checkbox.
2. Set default Value as puppet default value.
3. Create first matcher for attribute fqdn with valid details.
4. Create second matcher for other attribute with valid details.
Note - The fqdn/host should have this attribute.
5. Create more matchers for some more attributes if any.
Note - The fqdn/host should have this attributes.
6. Select 'Merge overrides' checkbox.
7. Select 'Merge default' checkbox.
8. Submit the change.
9. Go to YAML output of associated host.
:expectedresults:
1. The YAML output has the values merged from all the associated
matchers.
2. The YAML output doesn't have the puppet default value.
3. Duplicate values in YAML output if any are displayed.
:caseautomation: notautomated
:CaseLevel: Integration
:CaseImportance: Critical
"""
@run_only_on('sat')
@tier1
def test_positive_enable_merge_overrides_default_checkboxes(self):
"""Enable Merge Overrides, Merge Default and Avoid Duplicates
checkboxes for supported types.
:id: d6323648-4720-4c33-b25f-2b2b569d9df0
:steps:
1. Check the Override checkbox.
2. Set parameter type to array/hash.
:expectedresults: The Merge Overrides, Merge Default and Avoid
Duplicates checkboxes are enabled to check.
"""
sc_param = self.sc_params_list.pop()
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
default_value='[20]',
key_type='array',
)
self.assertIsNone(
self.sc_parameters.wait_until_element(
locators['sc_parameters.matcher_error'], timeout=5))
self.sc_parameters.click(self.sc_parameters.search(
sc_param.parameter, self.puppet_class.name))
self.assertTrue(self.sc_parameters.is_element_enabled(
locators['sc_parameters.merge_overrides']))
self.assertFalse(self.sc_parameters.is_element_enabled(
locators['sc_parameters.merge_default']))
self.assertFalse(self.sc_parameters.is_element_enabled(
locators['sc_parameters.avoid_duplicates']))
self.sc_parameters.click(locators['sc_parameters.merge_overrides'])
self.assertTrue(self.sc_parameters.is_element_enabled(
locators['sc_parameters.merge_default']))
self.assertTrue(self.sc_parameters.is_element_enabled(
locators['sc_parameters.avoid_duplicates']))
@run_only_on('sat')
@tier1
def test_negative_enable_merge_overrides_default_checkboxes(self):
"""Disable Merge Overrides, Merge Default checkboxes for non supported types.
:id: 58e42a4d-fabb-4a93-8787-3399cd6d3394
:steps:
1. Check the Override checkbox.
2. Set parameter type other than array/hash.
:expectedresults: The Merge Overrides, Merge Default checkboxes are not
enabled to check.
"""
sc_param = self.sc_params_list.pop()
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
default_value=gen_string('alpha'),
key_type='string',
)
self.sc_parameters.click(self.sc_parameters.search(
sc_param.parameter, self.puppet_class.name))
locators_list = [
'sc_parameters.merge_overrides',
'sc_parameters.merge_default',
'sc_parameters.avoid_duplicates',
]
for locator in locators_list:
self.assertFalse(
self.sc_parameters.is_element_enabled(locators[locator]))
@run_only_on('sat')
@tier2
def test_positive_impact_parameter_delete_attribute(self):
"""Impact on parameter after deleting associated attribute.
:id: 5d9bed6d-d9c0-4eb3-aaf7-bdda1f9203dd
:steps:
1. Override the parameter and create a matcher for some attribute.
2. Delete the attribute.
3. Recreate the attribute with same name as earlier.
:expectedresults:
1. The matcher for deleted attribute removed from parameter.
2. On recreating attribute, the matcher should not reappear
in parameter.
:CaseLevel: Integration
:caseautomation: automated
"""
sc_param = self.sc_params_list.pop()
hg_name = gen_string('alpha')
matcher_value = gen_string('alpha')
hostgroup = entities.HostGroup(
name=hg_name, environment=self.env).create()
hostgroup.add_puppetclass(
data={'puppetclass_id': self.puppet_class.id})
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
default_value=gen_string('alpha'),
key_type='string',
matcher=[{
'matcher_attribute': 'hostgroup={0}'.format(
hostgroup.name),
'matcher_value': matcher_value,
}]
)
self.assertTrue(self.sc_parameters.validate_smart_class_parameter(
sc_param.parameter,
self.puppet_class.name,
'overrides_number',
'1'
))
matchers_list = self.sc_parameters.fetch_matcher_values(
sc_param.parameter, self.puppet_class.name, 1)
self.assertEqual(matchers_list[0], matcher_value)
hostgroup.delete()
self.assertTrue(self.sc_parameters.validate_smart_class_parameter(
sc_param.parameter,
self.puppet_class.name,
'overrides_number',
'0',
))
self.sc_parameters.click(self.sc_parameters.search(
sc_param.parameter, self.puppet_class.name))
self.assertIsNone(self.sc_parameters.wait_until_element(
locators['sc_parameters.matcher_value'] % 1, timeout=5))
hostgroup = entities.HostGroup(
name=hg_name, environment=self.env).create()
hostgroup.add_puppetclass(
data={'puppetclass_id': self.puppet_class.id})
self.assertTrue(self.sc_parameters.validate_smart_class_parameter(
sc_param.parameter,
self.puppet_class.name,
'overrides_number',
'0',
))
@run_only_on('sat')
@stubbed()
@tier1
def test_positive_create_override_from_attribute(self):
"""Impact on parameter on overriding the parameter value from attribute.
:id: dcc8a9f5-191d-42d1-bff5-3083cc46cce1
:steps:
1. Check the override checkbox for the parameter.
2. Associate parameter with fqdn/hostgroup.
3. From host/hostgroup, override the parameter value.
4. Submit the changes.
:expectedresults:
1. The host/hostgroup is saved with changes.
2. New matcher for fqdn/hostgroup created inside parameter.
:caseautomation: notautomated
:CaseImportance: Critical
"""
@run_only_on('sat')
@stubbed()
@tier1
def test_negative_create_override_from_attribute(self):
"""No impact on parameter on overriding the parameter
with invalid value from attribute.
:id: 475d71cc-d52c-4a94-adb6-27ea52493176
:steps:
1. Check the override checkbox for the parameter.
2. Associate parameter with fqdn/hostgroup.
3. From host/hostgroup, Attempt to override the parameter with
some other key type of value.
:expectedresults:
1. Error thrown for invalid type value.
2. No matcher for fqdn/hostgroup is created inside parameter.
:caseautomation: notautomated
:CaseImportance: Critical
"""
@run_only_on('sat')
@stubbed()
@tier2
def test_positive_create_override_from_attribute_puppet_default(self):
"""Impact on parameter on overriding the parameter value
from attribute - puppet default.
:id: 2a013541-a4f2-4b54-b6ab-52932b17eb4a
:steps:
1. Check the override checkbox for the parameter.
2. Associate parameter with fqdn/hostgroup.
3. From host/hostgroup, override the parameter value as puppet
default value.
4. Submit the changes.
:expectedresults:
1. The host/hostgroup is saved with changes.
2. New matcher for fqdn/hostgroup created inside parameter.
3. In matcher, 'Use Puppet Default' checkbox is checked.
:caseautomation: notautomated
:CaseLevel: Integration
:CaseImportance: Critical
"""
@run_only_on('sat')
@stubbed()
@tier2
def test_negative_create_override_from_attribute_required_checked(self):
"""Error for empty value on overriding the parameter value
from attribute - Required checked.
:id: c4fadfa6-0747-475f-8fc5-227c147d585a
:steps:
1. Check the override checkbox for the parameter.
2. Check 'Required' checkbox in parameter.
3. Associate parameter with fqdn/hostgroup.
4. From host/hostgroup, Attempt to override the parameter with
empty value.
:expectedresults:
1. Error thrown for empty value as the value is required to pass.
2. The info icon changed to warning icon for that parameter.
3. No matcher for fqdn/hostgroup created inside parameter.
:caseautomation: notautomated
:CaseLevel: Integration
:CaseImportance: Critical
"""
@run_only_on('sat')
@stubbed()
@tier2
def test_positive_update_matcher_from_attribute(self):
"""Impact on parameter on editing the parameter value from attribute.
:id: a7b3ecde-a311-421c-be4b-0f72ab1f44ba
:steps:
1. Check the override checkbox for the parameter.
2. Associate parameter with fqdn/hostgroup.
3. Create a matcher for fqdn/hostgroup with valid details.
4. From host/hostgroup, edit the parameter value.
5. Submit the changes.
:expectedresults:
1. The host/hostgroup is saved with changes.
2. Matcher value in parameter is updated from fqdn/hostgroup.
:caseautomation: notautomated
:CaseLevel: Integration
:CaseImportance: Critical
"""
@run_only_on('sat')
@stubbed()
@tier2
def test_negative_update_matcher_from_attribute(self):
"""No Impact on parameter on editing the parameter with
invalid value from attribute.
:id: 554de8b7-0ddb-4f2e-b406-882b13eac882
:steps:
1. Check the override checkbox for the parameter.
2. Associate parameter with fqdn/hostgroup.
3. Create a matcher for fqdn/hostgroup with valid details.
4. From host/hostgroup, attempt to edit the parameter with invalid
value.
:expectedresults:
1. Error thrown for invalid value.
2. Matcher value in parameter is not updated from fqdn/hostgroup.
:caseautomation: notautomated
:CaseLevel: Integration
:CaseImportance: Critical
"""
@run_only_on('sat')
@skip_if_bug_open('bugzilla', 1295179)
@stubbed()
@tier2
def test_positive_update_parameter_in_nested_hostgroup(self):
"""Update parameter value in nested hostgroup.
:id: 9aacec96-593c-4089-ad14-d4bbbbd43ef8
:steps:
1. Check the override checkbox for the parameter.
2. Associate parameter with one hostgroup.
3. Create a nested hostgroup from above parent hostgroup.
4. And Update the value of parameter from nested hostgroup.
5. Submit the changes.
:expectedresults:
1. The parameter value updated in nested hostgroup.
2. Changes submitted successfully.
:caseautomation: notautomated
:CaseLevel: Integration
:CaseImportance: Critical
"""
@run_only_on('sat')
@tier1
def test_positive_hide_parameter_default_value(self):
"""Hide the default value of parameter.
:id: 8b3d294e-58e7-454c-b19e-ead1c6a6a342
:steps:
1. Check the override checkbox for the parameter.
2. Enter some valid default value.
3. Check 'Hidden Value' checkbox.
:expectedresults:
1. The default value shown in hidden state.
2. Changes submitted successfully.
3. Matcher values shown hidden if any.
"""
sc_param = self.sc_params_list.pop()
initial_value = gen_string('alpha')
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
default_value=initial_value,
matcher=[{
'matcher_attribute': 'os=rhel6',
'matcher_value': gen_string('alpha')
}],
hidden_value=True,
)
value = self.sc_parameters.fetch_default_value(
sc_param.parameter, self.puppet_class.name, hidden=True)
self.assertEqual(value, initial_value)
locator = self.sc_parameters.wait_until_element(
locators['sc_parameters.default_value'])
self.assertIn('masked-input', locator.get_attribute('class'))
matcher_value = self.sc_parameters.wait_until_element(
locators['sc_parameters.matcher_value'] % 1)
self.assertIn('masked-input', matcher_value.get_attribute('class'))
@run_only_on('sat')
@tier1
@upgrade
def test_positive_unhide_parameter_default_value(self):
"""Unhide the default value of parameter.
:id: fca478b9-eeb9-41ea-8c41-2d9601f4ea4f
:steps:
1. Check the override checkbox for the parameter.
2. Enter some valid default value.
3. Hide the value of parameter.
4. After hiding, uncheck the 'Hidden Value' checkbox.
:expectedresults:
1. The default value shown in unhidden state.
2. Changes submitted successfully.
3. Matcher values shown unhidden if any.
"""
sc_param = self.sc_params_list.pop()
initial_value = gen_string('alpha')
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
default_value=initial_value,
matcher=[{
'matcher_attribute': 'os=rhel6',
'matcher_value': gen_string('alpha')
}],
hidden_value=True,
)
self.sc_parameters.update(
sc_param.parameter, self.puppet_class.name, hidden_value=False)
value = self.sc_parameters.fetch_default_value(
sc_param.parameter, self.puppet_class.name, hidden=True)
self.assertEqual(value, initial_value)
locator = self.sc_parameters.wait_until_element(
locators['sc_parameters.default_value'])
self.assertNotIn('masked-input', locator.get_attribute('class'))
matcher_value = self.sc_parameters.wait_until_element(
locators['sc_parameters.matcher_value'] % 1)
self.assertNotIn(
'masked-input', matcher_value.get_attribute('class'))
@run_only_on('sat')
@stubbed()
@tier2
def test_positive_hide_default_value_in_attribute(self):
"""Hide the default value of parameter in attribute.
:id: a44d35df-877c-469b-b82c-e5f85e592e8d
:steps:
1. Check the override checkbox for the parameter.
2. Enter some valid default value.
3. Hide the default Value.
4. Submit the changes.
5. Associate parameter on host/hostgroup.
:expectedresults:
1. In host/hostgroup, the parameter value shown in hidden state.
2. The button for unhiding the value is displayed and accessible.
3. The button for overriding the value is displayed and
accessible.
:caseautomation: notautomated
:CaseLevel: Integration
"""
@run_only_on('sat')
@stubbed()
@tier2
def test_positive_unhide_default_value_in_attribute(self):
"""Unhide the default value of parameter in attribute.
:id: 76611ba0-e583-4c4b-b794-005a21240d26
:steps:
1. Check the override checkbox for the parameter.
2. Enter some valid default value.
3. Hide the default Value.
4. Submit the changes.
5. Associate parameter on host/hostgroup.
6. In host/hostgroup, Click Unhide button icon.
:expectedresults:
1. In host/hostgroup, the parameter value shown in unhidden state.
2. The button for hiding the value is displayed and accessible.
3. The button for overriding the value is displayed and
accessible.
4. In parameter, the default value is still hidden.
:caseautomation: notautomated
:CaseLevel: Integration
"""
@run_only_on('sat')
@tier1
def test_positive_update_hidden_value_in_parameter(self):
"""Update the hidden default value of parameter.
:id: e7a6e172-d0b9-48e4-81ae-16d866d6f63b
:steps:
1. Check the override checkbox for the parameter.
2. Enter some valid default value.
3. Hide the default Value.
4. Again update the default value.
5. Submit the changes.
:expectedresults:
1. The parameter default value is updated.
2. The parameter default value displayed as hidden.
:CaseImportance: Critical
"""
sc_param = self.sc_params_list.pop()
new_value = gen_string('alpha')
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
default_value=gen_string('alpha'),
hidden_value=True,
)
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
default_value=new_value,
)
value = self.sc_parameters.fetch_default_value(
sc_param.parameter, self.puppet_class.name, hidden=True)
self.assertEqual(value, new_value)
locator = self.sc_parameters.wait_until_element(
locators['sc_parameters.default_value'])
self.assertIn('masked-input', locator.get_attribute('class'))
@run_only_on('sat')
@stubbed()
@tier2
def test_positive_update_hidden_value_in_attribute(self):
"""Update the hidden default value of parameter in attribute.
:id: c10c20bd-0284-4e5d-b789-fddd3b81b81b
:steps:
1. Check the override checkbox for the parameter.
2. Enter some valid default value.
3. Hide the default Value.
4. Submit the changes.
5. Associate parameter on host/hostgroup.
6. In host/hostgroup, update the parameter value.
:expectedresults:
1. In host/hostgroup, the parameter value is updated.
2. The parameter Value displayed as hidden.
3. In parameter, new matcher created for fqdn/hostgroup.
4. And the value shown hidden.
:caseautomation: notautomated
:CaseLevel: Integration
:CaseImportance: Critical
"""
@run_only_on('sat')
@tier1
def test_positive_hide_empty_default_value(self):
"""Hiding the empty default value.
:id: 8b02e575-c7bf-45d1-a5eb-4640b65a4d60
:steps:
1. Check the override checkbox for the parameter.
2. Don't enter any value, keep blank.
3. Check the 'Hidden Value' icon.
4. Create a matcher with some value.
:expectedresults:
1. The 'Hidden Value' checkbox is enabled to check.
2. The default value shows empty on hide.
3. Matcher Value shown as hidden.
:caseautomation: automated
"""
sc_param = self.sc_params_list.pop()
matcher_value = gen_string('alpha')
with Session(self):
self.sc_parameters.update(
sc_param.parameter,
self.puppet_class.name,
override=True,
default_value='',
matcher=[{
'matcher_attribute': 'os=rhel6',
'matcher_value': matcher_value
}],
hidden_value=True,
)
value = self.sc_parameters.fetch_default_value(
sc_param.parameter, self.puppet_class.name, hidden=True)
self.assertEqual(value, '')
locator = self.sc_parameters.wait_until_element(
locators['sc_parameters.default_value'])
self.assertIn(
'masked-input', locator.get_attribute('class'))
value = self.sc_parameters.wait_until_element(
locators['sc_parameters.matcher_value'] % 1)
self.assertIn(
'masked-input', value.get_attribute('class'))
self.assertEqual(value.get_attribute('value'), matcher_value)
| gpl-3.0 |
nexdatas/configtool | test/ComponentItem_test.py | 1 | 23298 | #!/usr/bin/env python
# This file is part of nexdatas - Tango Server for NeXus data writer
#
# Copyright (C) 2012-2017 DESY, Jan Kotanski <jkotan@mail.desy.de>
#
# nexdatas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nexdatas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with nexdatas. If not, see <http://www.gnu.org/licenses/>.
# \package test nexdatas
# \file ComponentItemTest.py
# unittests for field Tags running Tango Server
#
import unittest
import os
import sys
import random
import struct
import binascii
import time
from PyQt5.QtXml import QDomNode, QDomDocument
from nxsconfigtool.ComponentItem import ComponentItem
# if 64-bit machione
IS64BIT = (struct.calcsize("P") == 8)
if sys.version_info > (3,):
long = int
# test fixture
class ComponentItemTest(unittest.TestCase):
# constructor
# \param methodName name of the test method
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
self._bint = "int64" if IS64BIT else "int32"
self._buint = "uint64" if IS64BIT else "uint32"
self._bfloat = "float64" if IS64BIT else "float32"
# MessageBox text
self.text = None
# MessageBox title
self.title = None
# action status
self.performed = False
try:
self.__seed = long(binascii.hexlify(os.urandom(16)), 16)
except NotImplementedError:
self.__seed = long(time.time() * 256)
# self.__seed =335783554629280825854815889576355181078
# self.__seed =56405821691954067238837069340540387163
self.__rnd = random.Random(self.__seed)
# test starter
# \brief Common set up
def setUp(self):
print("\nsetting up...")
print("SEED = %s" % self.__seed)
# test closer
# \brief Common tear down
def tearDown(self):
print("tearing down ...")
# constructor test
# \brief It tests default settings
def test_constructor(self):
fun = sys._getframe().f_code.co_name
qdn = QDomNode()
print("Run: %s.%s() " % (self.__class__.__name__, fun))
ci = ComponentItem(qdn)
self.assertEqual(ci.node, qdn)
self.assertEqual(ci.childNumber(), 0)
self.assertEqual(ci.child(0), None)
self.assertEqual(ci.child(1), None)
# constructor test
# \brief It tests default settings
def test_constructor_with_dom(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
doc.appendChild(qdn)
nkids = self.__rnd.randint(1, 10)
kds = []
for n in range(nkids):
kds.append(doc.createElement("kid%s" % n))
qdn.appendChild(kds[-1])
ci = ComponentItem(qdn)
self.assertEqual(ci.parent, None)
self.assertEqual(ci.node, qdn)
self.assertEqual(ci.childNumber(), 0)
self.assertEqual(ci.node.nodeName(), nname)
for k in range(nkids):
self.assertTrue(isinstance(ci.child(k), ComponentItem))
self.assertTrue(isinstance(ci.child(k).parent, ComponentItem))
self.assertEqual(ci.child(k).childNumber(), k)
self.assertEqual(ci.child(k).node, kds[k])
self.assertEqual(ci.child(k).parent.node, qdn)
self.assertEqual(ci.child(k).node.nodeName(), "kid%s" % k)
self.assertEqual(ci.child(k).parent, ci)
# constructor test
# \brief It tests default settings
def test_constructor_with_dom_nested(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
doc.appendChild(qdn)
nkids = self.__rnd.randint(1, 10)
kds = []
gkds = []
ngks = []
for n in range(nkids):
kds.append(doc.createElement("kid%s" % n))
qdn.appendChild(kds[-1])
ngkids = self.__rnd.randint(1, 10)
gkds.append([])
ngks.append(ngkids)
for g in range(ngkids):
gkds[n].append(doc.createElement("grandkid%s" % g))
kds[-1].appendChild(gkds[n][-1])
# print doc
ci = ComponentItem(qdn)
self.assertEqual(ci.parent, None)
self.assertEqual(ci.node, qdn)
self.assertEqual(ci.childNumber(), 0)
self.assertEqual(ci.node.nodeName(), nname)
for k in range(nkids):
ks = ci.child(k)
self.assertTrue(isinstance(ks, ComponentItem))
self.assertTrue(isinstance(ks.parent, ComponentItem))
self.assertEqual(ks.childNumber(), k)
self.assertEqual(ks.node, kds[k])
self.assertEqual(ks.parent.node, qdn)
self.assertEqual(ks.node.nodeName(), "kid%s" % k)
self.assertEqual(ks.parent, ci)
for g in range(ngks[k]):
self.assertTrue(isinstance(ks.child(g), ComponentItem))
self.assertTrue(isinstance(ks.child(g).parent, ComponentItem))
self.assertEqual(ks.child(g).childNumber(), g)
self.assertEqual(ks.child(g).node, gkds[k][g])
self.assertEqual(ks.child(g).parent.node, ks.node)
self.assertEqual(ks.child(g).node.nodeName(), "grandkid%s" % g)
self.assertEqual(ks.child(g).parent, ks)
self.assertEqual(ks.child(ngks[k]), None)
self.assertEqual(ks.child(-1), None)
self.assertEqual(ci.child(nkids), None)
self.assertEqual(ci.child(-1), None)
# constructor test
# \brief It tests default settings
def test_constructor_with_dom_nested_reverse(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
doc.appendChild(qdn)
nkids = self.__rnd.randint(1, 10)
kds = []
gkds = []
ngks = []
for n in range(nkids):
kds.append(doc.createElement("kid%s" % n))
qdn.appendChild(kds[-1])
ngkids = self.__rnd.randint(1, 10)
gkds.append([])
ngks.append(ngkids)
for g in range(ngkids):
gkds[n].append(doc.createElement("grandkid%s" % g))
kds[-1].appendChild(gkds[n][-1])
# print doc
ci = ComponentItem(qdn)
self.assertEqual(ci.parent, None)
self.assertEqual(ci.node, qdn)
self.assertEqual(ci.childNumber(), 0)
self.assertEqual(ci.node.nodeName(), nname)
for k in reversed(range(nkids)):
ks = ci.child(k)
self.assertTrue(isinstance(ks, ComponentItem))
self.assertTrue(isinstance(ks.parent, ComponentItem))
self.assertEqual(ks.childNumber(), k)
self.assertEqual(ks.node, kds[k])
self.assertEqual(ks.parent.node, qdn)
self.assertEqual(ks.node.nodeName(), "kid%s" % k)
self.assertEqual(ks.parent, ci)
for g in range(ngks[k]):
self.assertTrue(isinstance(ks.child(g), ComponentItem))
self.assertTrue(isinstance(ks.child(g).parent, ComponentItem))
self.assertEqual(ks.child(g).childNumber(), g)
self.assertEqual(ks.child(g).node, gkds[k][g])
self.assertEqual(ks.child(g).parent.node, ks.node)
self.assertEqual(ks.child(g).node.nodeName(), "grandkid%s" % g)
self.assertEqual(ks.child(g).parent, ks)
self.assertEqual(ks.child(ngks[k]), None)
self.assertEqual(ks.child(-1), None)
self.assertEqual(ci.child(nkids), None)
self.assertEqual(ci.child(-1), None)
# constructor test
# \brief It tests default settings
def test_constructor_remove_kids(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
doc.appendChild(qdn)
nkids = self.__rnd.randint(1, 20)
kds = []
gkds = []
ngks = []
for n in range(nkids):
kds.append(doc.createElement("kid%s" % n))
qdn.appendChild(kds[-1])
ngkids = self.__rnd.randint(1, 20)
gkds.append([])
ngks.append(ngkids)
for g in range(ngkids):
gkds[n].append(doc.createElement("grandkid%s" % g))
kds[-1].appendChild(gkds[n][-1])
# print doc
ci = ComponentItem(qdn)
self.assertEqual(ci.parent, None)
self.assertEqual(ci.node, qdn)
self.assertEqual(ci.childNumber(), 0)
self.assertEqual(ci.node.nodeName(), nname)
for k in range(nkids):
ks = ci.child(k)
self.assertTrue(isinstance(ks, ComponentItem))
self.assertTrue(isinstance(ks.parent, ComponentItem))
self.assertEqual(ks.childNumber(), k)
self.assertEqual(ks.node, kds[k])
self.assertEqual(ks.parent.node, qdn)
self.assertEqual(ks.node.nodeName(), "kid%s" % k)
self.assertEqual(ks.parent, ci)
for g in range(ngks[k]):
self.assertTrue(isinstance(ks.child(g), ComponentItem))
self.assertTrue(isinstance(ks.child(g).parent, ComponentItem))
self.assertEqual(ks.child(g).childNumber(), g)
self.assertEqual(ks.child(g).node, gkds[k][g])
self.assertEqual(ks.child(g).parent.node, ks.node)
self.assertEqual(ks.child(g).node.nodeName(), "grandkid%s" % g)
self.assertEqual(ks.child(g).parent, ks)
rmvd = self.__rnd.randint(1, nkids) - 1
kd = ci.child(rmvd)
self.assertEqual(ci.removeChildren(rmvd, 1), True)
self.assertEqual(ci.removeChildren(-1, 1), False)
self.assertEqual(ci.removeChildren(nkids, 1), False)
qdn.removeChild(kd.node)
for k in range(nkids):
if k == rmvd:
continue
kk = k if k < rmvd else k - 1
ks = ci.child(kk)
self.assertTrue(isinstance(ks, ComponentItem))
self.assertTrue(isinstance(ks.parent, ComponentItem))
self.assertEqual(ks.childNumber(), kk)
self.assertEqual(ks.node, kds[k])
self.assertEqual(ks.parent.node, qdn)
self.assertEqual(ks.node.nodeName(), "kid%s" % k)
self.assertEqual(ks.parent, ci)
for g in range(ngks[k]):
self.assertTrue(isinstance(ks.child(g), ComponentItem))
self.assertTrue(isinstance(ks.child(g).parent, ComponentItem))
self.assertEqual(ks.child(g).childNumber(), g)
self.assertEqual(ks.child(g).node, gkds[k][g])
self.assertEqual(ks.child(g).parent.node, ks.node)
self.assertEqual(ks.child(g).node.nodeName(), "grandkid%s" % g)
self.assertEqual(ks.child(g).parent, ks)
# constructor test
# \brief It tests default settings
def test_constructor_remove_more_kids(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
doc.appendChild(qdn)
nkids = self.__rnd.randint(1, 20)
kds = []
gkds = []
ngks = []
for n in range(nkids):
kds.append(doc.createElement("kid%s" % n))
qdn.appendChild(kds[-1])
ngkids = self.__rnd.randint(1, 20)
gkds.append([])
ngks.append(ngkids)
for g in range(ngkids):
gkds[n].append(doc.createElement("grandkid%s" % g))
kds[-1].appendChild(gkds[n][-1])
# print doc
ci = ComponentItem(qdn)
self.assertEqual(ci.parent, None)
self.assertEqual(ci.node, qdn)
self.assertEqual(ci.childNumber(), 0)
self.assertEqual(ci.node.nodeName(), nname)
for k in range(nkids):
ks = ci.child(k)
self.assertTrue(isinstance(ks, ComponentItem))
self.assertTrue(isinstance(ks.parent, ComponentItem))
self.assertEqual(ks.childNumber(), k)
self.assertEqual(ks.node, kds[k])
self.assertEqual(ks.parent.node, qdn)
self.assertEqual(ks.node.nodeName(), "kid%s" % k)
self.assertEqual(ks.parent, ci)
for g in range(ngks[k]):
self.assertTrue(isinstance(ks.child(g), ComponentItem))
self.assertTrue(isinstance(ks.child(g).parent, ComponentItem))
self.assertEqual(ks.child(g).childNumber(), g)
self.assertEqual(ks.child(g).node, gkds[k][g])
self.assertEqual(ks.child(g).parent.node, ks.node)
self.assertEqual(ks.child(g).node.nodeName(), "grandkid%s" % g)
self.assertEqual(ks.child(g).parent, ks)
rmvd = self.__rnd.randint(0, nkids - 1)
nrm = self.__rnd.randint(1, nkids-rmvd)
kd = []
for r in range(nrm):
kd.append(ci.child(rmvd+r))
self.assertEqual(ci.removeChildren(rmvd, nrm), True)
for r in range(nrm):
qdn.removeChild(kd[r].node)
for k in range(nkids):
if k >= rmvd and k <= rmvd + nrm:
continue
kk = k if k < rmvd else k - nrm
ks = ci.child(kk)
self.assertTrue(isinstance(ks, ComponentItem))
self.assertTrue(isinstance(ks.parent, ComponentItem))
self.assertEqual(ks.childNumber(), kk)
self.assertEqual(ks.node, kds[k])
self.assertEqual(ks.parent.node, qdn)
self.assertEqual(ks.node.nodeName(), "kid%s" % k)
self.assertEqual(ks.parent, ci)
for g in range(ngks[k]):
self.assertTrue(isinstance(ks.child(g), ComponentItem))
self.assertTrue(isinstance(ks.child(g).parent, ComponentItem))
self.assertEqual(ks.child(g).childNumber(), g)
self.assertEqual(ks.child(g).node, gkds[k][g])
self.assertEqual(ks.child(g).parent.node, ks.node)
self.assertEqual(ks.child(g).node.nodeName(), "grandkid%s" % g)
self.assertEqual(ks.child(g).parent, ks)
# constructor test
# \brief It tests default settings
def test_constructor_insert_kids(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
doc.appendChild(qdn)
nkids = self.__rnd.randint(1, 5)
kds = []
gkds = []
ngks = []
for n in range(nkids):
kds.append(doc.createElement("kid%s" % n))
qdn.appendChild(kds[-1])
ngkids = self.__rnd.randint(1, 5)
gkds.append([])
ngks.append(ngkids)
for g in range(ngkids):
gkds[n].append(doc.createElement("grandkid%s" % g))
kds[-1].appendChild(gkds[n][-1])
# print doc
ci = ComponentItem(qdn)
self.assertEqual(ci.parent, None)
self.assertEqual(ci.node, qdn)
self.assertEqual(ci.childNumber(), 0)
self.assertEqual(ci.node.nodeName(), nname)
for k in range(nkids):
ks = ci.child(k)
self.assertTrue(isinstance(ks, ComponentItem))
self.assertTrue(isinstance(ks.parent, ComponentItem))
self.assertEqual(ks.childNumber(), k)
self.assertEqual(ks.node, kds[k])
self.assertEqual(ks.parent.node, qdn)
self.assertEqual(ks.node.nodeName(), "kid%s" % k)
self.assertEqual(ks.parent, ci)
for g in range(ngks[k]):
self.assertTrue(isinstance(ks.child(g), ComponentItem))
self.assertTrue(isinstance(ks.child(g).parent, ComponentItem))
self.assertEqual(ks.child(g).childNumber(), g)
self.assertEqual(ks.child(g).node, gkds[k][g])
self.assertEqual(ks.child(g).parent.node, ks.node)
self.assertEqual(ks.child(g).node.nodeName(), "grandkid%s" % g)
self.assertEqual(ks.child(g).parent, ks)
insd = self.__rnd.randint(0, nkids)
inkd = doc.createElement("insertedkid")
if insd == nkids:
qdn.insertAfter(inkd, ci.child(nkids - 1).node)
else:
qdn.insertBefore(inkd, ci.child(insd).node)
self.assertEqual(ci.insertChildren(insd, 1), True)
self.assertEqual(ci.insertChildren(-1, 1), False)
self.assertEqual(ci.insertChildren(nkids+2, 1), False)
for k in range(nkids):
if k == insd:
ks = ci.child(k)
self.assertTrue(isinstance(ks, ComponentItem))
self.assertTrue(isinstance(ks.parent, ComponentItem))
self.assertEqual(ks.childNumber(), k)
self.assertEqual(ks.node, inkd)
self.assertEqual(ks.parent.node, qdn)
self.assertEqual(ks.node.nodeName(), "insertedkid")
self.assertEqual(ks.parent, ci)
continue
kk = k if k < insd else k-1
ks = ci.child(k)
self.assertTrue(isinstance(ks, ComponentItem))
self.assertTrue(isinstance(ks.parent, ComponentItem))
self.assertEqual(ks.childNumber(), k)
self.assertEqual(ks.node, kds[kk])
self.assertEqual(ks.parent.node, qdn)
self.assertEqual(ks.node.nodeName(), "kid%s" % kk)
self.assertEqual(ks.parent, ci)
for g in range(ngks[kk]):
self.assertTrue(isinstance(ks.child(g), ComponentItem))
self.assertTrue(isinstance(ks.child(g).parent, ComponentItem))
self.assertEqual(ks.child(g).childNumber(), g)
self.assertEqual(ks.child(g).node, gkds[kk][g])
self.assertEqual(ks.child(g).parent.node, ks.node)
self.assertEqual(ks.child(g).node.nodeName(), "grandkid%s" % g)
self.assertEqual(ks.child(g).parent, ks)
# constructor test
# \brief It tests default settings
def test_constructor_insert_more_kids(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
doc = QDomDocument()
nname = "definition"
qdn = doc.createElement(nname)
doc.appendChild(qdn)
nkids = self.__rnd.randint(1, 5)
kds = []
gkds = []
ngks = []
for n in range(nkids):
kds.append(doc.createElement("kid%s" % n))
qdn.appendChild(kds[-1])
ngkids = self.__rnd.randint(1, 5)
gkds.append([])
ngks.append(ngkids)
for g in range(ngkids):
gkds[n].append(doc.createElement("grandkid%s" % g))
kds[-1].appendChild(gkds[n][-1])
ci = ComponentItem(qdn)
self.assertEqual(ci.parent, None)
self.assertEqual(ci.node, qdn)
self.assertEqual(ci.childNumber(), 0)
self.assertEqual(ci.node.nodeName(), nname)
for k in range(nkids):
ks = ci.child(k)
self.assertTrue(isinstance(ks, ComponentItem))
self.assertTrue(isinstance(ks.parent, ComponentItem))
self.assertEqual(ks.childNumber(), k)
self.assertEqual(ks.node, kds[k])
self.assertEqual(ks.parent.node, qdn)
self.assertEqual(ks.node.nodeName(), "kid%s" % k)
self.assertEqual(ks.parent, ci)
for g in range(ngks[k]):
self.assertTrue(isinstance(ks.child(g), ComponentItem))
self.assertTrue(isinstance(ks.child(g).parent, ComponentItem))
self.assertEqual(ks.child(g).childNumber(), g)
self.assertEqual(ks.child(g).node, gkds[k][g])
self.assertEqual(ks.child(g).parent.node, ks.node)
self.assertEqual(
ks.child(g).node.nodeName(), "grandkid%s" % g)
self.assertEqual(ks.child(g).parent, ks)
insd = self.__rnd.randint(0, nkids)
nin = self.__rnd.randint(1, 5)
inkd = []
for n in range(nin):
inkd.append(doc.createElement("insertedkid%s" % n))
if insd == nkids+n:
if n == 0:
qdn.insertAfter(inkd[n], ci.child(nkids - 1).node)
else:
qdn.insertAfter(inkd[n], inkd[n - 1])
else:
qdn.insertBefore(inkd[n], ci.child(insd).node)
self.assertEqual(ci.insertChildren(insd, nin), True)
for k in range(nkids):
if k >= insd and k < insd + nin:
mnin = k - insd
ks = ci.child(k)
self.assertTrue(isinstance(ks, ComponentItem))
self.assertTrue(isinstance(ks.parent, ComponentItem))
self.assertEqual(ks.childNumber(), k)
self.assertEqual(ks.node, inkd[mnin])
self.assertEqual(ks.parent.node, qdn)
self.assertEqual(ks.node.nodeName(), "insertedkid%s" % mnin)
self.assertEqual(ks.parent, ci)
continue
kk = k if k < insd else k-nin
ks = ci.child(k)
self.assertTrue(isinstance(ks, ComponentItem))
self.assertTrue(isinstance(ks.parent, ComponentItem))
self.assertEqual(ks.childNumber(), k)
self.assertEqual(ks.node, kds[kk])
self.assertEqual(ks.parent.node, qdn)
self.assertEqual(ks.node.nodeName(), "kid%s" % kk)
self.assertEqual(ks.parent, ci)
for g in range(ngks[kk]):
self.assertTrue(isinstance(ks.child(g), ComponentItem))
self.assertTrue(isinstance(ks.child(g).parent, ComponentItem))
self.assertEqual(ks.child(g).childNumber(), g)
self.assertEqual(ks.child(g).node, gkds[kk][g])
self.assertEqual(ks.child(g).parent.node, ks.node)
self.assertEqual(
ks.child(g).node.nodeName(), "grandkid%s" % g)
self.assertEqual(ks.child(g).parent, ks)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
chienlieu2017/it_management | odoo/odoo/addons/base/res/res_font.py | 20 | 6052 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from reportlab.pdfbase import ttfonts
from odoo import api, fields, models
from odoo.report.render.rml2pdf import customfonts
"""This module allows the mapping of some system-available TTF fonts to
the reportlab engine.
This file could be customized per distro (although most Linux/Unix ones)
should have the same filenames, only need the code below).
Due to an awful configuration that ships with reportlab at many Linux
and Ubuntu distros, we have to override the search path, too.
"""
_logger = logging.getLogger(__name__)
# Alternatives for the [broken] builtin PDF fonts. Default order chosen to match
# the pre-v8 mapping from odoo.report.render.rml2pdf.customfonts.CustomTTFonts.
# Format: [ (BuiltinFontFamily, mode, [AlternativeFontName, ...]), ...]
BUILTIN_ALTERNATIVES = [
('Helvetica', "normal", ["DejaVuSans", "LiberationSans"]),
('Helvetica', "bold", ["DejaVuSans-Bold", "LiberationSans-Bold"]),
('Helvetica', 'italic', ["DejaVuSans-Oblique", "LiberationSans-Italic"]),
('Helvetica', 'bolditalic', ["DejaVuSans-BoldOblique", "LiberationSans-BoldItalic"]),
('Times', 'normal', ["LiberationSerif", "DejaVuSerif"]),
('Times', 'bold', ["LiberationSerif-Bold", "DejaVuSerif-Bold"]),
('Times', 'italic', ["LiberationSerif-Italic", "DejaVuSerif-Italic"]),
('Times', 'bolditalic', ["LiberationSerif-BoldItalic", "DejaVuSerif-BoldItalic"]),
('Courier', 'normal', ["FreeMono", "DejaVuSansMono"]),
('Courier', 'bold', ["FreeMonoBold", "DejaVuSansMono-Bold"]),
('Courier', 'italic', ["FreeMonoOblique", "DejaVuSansMono-Oblique"]),
('Courier', 'bolditalic', ["FreeMonoBoldOblique", "DejaVuSansMono-BoldOblique"]),
]
class ResFont(models.Model):
_name = "res.font"
_description = 'Fonts available'
_order = 'family,name,id'
_rec_name = 'family'
family = fields.Char(string="Font family", required=True)
name = fields.Char(string="Font Name", required=True)
path = fields.Char(required=True)
mode = fields.Char(required=True)
_sql_constraints = [
('name_font_uniq', 'unique(family, name)', 'You can not register two fonts with the same name'),
]
@api.model
def font_scan(self, lazy=False):
"""Action of loading fonts
In lazy mode will scan the filesystem only if there is no founts in the database and sync if no font in CustomTTFonts
In not lazy mode will force scan filesystem and sync
"""
if lazy:
# lazy loading, scan only if no fonts in db
fonts = self.search([('path', '!=', '/dev/null')])
if not fonts:
# no scan yet or no font found on the system, scan the filesystem
self._scan_disk()
elif len(customfonts.CustomTTFonts) == 0:
# CustomTTFonts list is empty
self._sync()
else:
self._scan_disk()
return True
def _scan_disk(self):
"""Scan the file system and register the result in database"""
found_fonts = []
for font_path in customfonts.list_all_sysfonts():
try:
font = ttfonts.TTFontFile(font_path)
_logger.debug("Found font %s at %s", font.name, font_path)
found_fonts.append((font.familyName, font.name, font_path, font.styleName))
except Exception, ex:
_logger.warning("Could not register Font %s: %s", font_path, ex)
for family, name, path, mode in found_fonts:
if not self.search([('family', '=', family), ('name', '=', name)]):
self.create({'family': family, 'name': name, 'path': path, 'mode': mode})
# remove fonts not present on the disk anymore
existing_font_names = [name for (family, name, path, mode) in found_fonts]
# Remove inexistent fonts
self.search([('name', 'not in', existing_font_names), ('path', '!=', '/dev/null')]).unlink()
self.pool.signal_caches_change()
return self._sync()
def _sync(self):
"""Set the customfonts.CustomTTFonts list to the content of the database"""
customfonts.CustomTTFonts = []
local_family_modes = set()
local_font_paths = {}
for font in self.search([('path', '!=', '/dev/null')]):
local_family_modes.add((font.family, font.mode))
local_font_paths[font.name] = font.path
customfonts.CustomTTFonts.append((font.family, font.name, font.path, font.mode))
# Attempt to remap the builtin fonts (Helvetica, Times, Courier) to better alternatives
# if available, because they only support a very small subset of unicode
# (missing 'č' for example)
for builtin_font_family, mode, alts in BUILTIN_ALTERNATIVES:
if (builtin_font_family, mode) not in local_family_modes:
# No local font exists with that name, try alternatives
for altern_font in alts:
if local_font_paths.get(altern_font):
altern_def = (builtin_font_family, altern_font,
local_font_paths[altern_font], mode)
customfonts.CustomTTFonts.append(altern_def)
_logger.debug("Builtin remapping %r", altern_def)
break
else:
_logger.warning("No local alternative found for builtin font `%s` (%s mode)."
"Consider installing the DejaVu fonts if you have problems "
"with unicode characters in RML reports",
builtin_font_family, mode)
return True
@classmethod
def clear_caches(cls):
"""Force worker to resync at next report loading by setting an empty font list"""
customfonts.CustomTTFonts = []
return super(ResFont, cls).clear_caches()
| gpl-3.0 |
Therp/odoo | openerp/addons/base/tests/test_ir_filters.py | 285 | 11000 | # -*- coding: utf-8 -*-
import functools
from openerp import exceptions
from openerp.tests import common
def noid(d):
""" Removes values that are not relevant for the test comparisons """
d.pop('id', None)
d.pop('action_id', None)
return d
class FiltersCase(common.TransactionCase):
def build(self, model, *args):
Model = self.registry(model)
for vars in args:
Model.create(self.cr, common.ADMIN_USER_ID, vars, {})
class TestGetFilters(FiltersCase):
def setUp(self):
super(TestGetFilters, self).setUp()
self.USER = self.registry('res.users').name_search(self.cr, self.uid, 'demo')[0]
self.USER_ID = self.USER[0]
def test_own_filters(self):
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='c', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='d', user_id=self.USER_ID, model_id='ir.filters'))
filters = self.registry('ir.filters').get_filters(
self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', is_default=False, user_id=self.USER, domain='[]', context='{}'),
dict(name='b', is_default=False, user_id=self.USER, domain='[]', context='{}'),
dict(name='c', is_default=False, user_id=self.USER, domain='[]', context='{}'),
dict(name='d', is_default=False, user_id=self.USER, domain='[]', context='{}'),
])
def test_global_filters(self):
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', user_id=False, model_id='ir.filters'),
dict(name='c', user_id=False, model_id='ir.filters'),
dict(name='d', user_id=False, model_id='ir.filters'),
)
filters = self.registry('ir.filters').get_filters(
self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', is_default=False, user_id=False, domain='[]', context='{}'),
dict(name='b', is_default=False, user_id=False, domain='[]', context='{}'),
dict(name='c', is_default=False, user_id=False, domain='[]', context='{}'),
dict(name='d', is_default=False, user_id=False, domain='[]', context='{}'),
])
def test_no_third_party_filters(self):
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', user_id=common.ADMIN_USER_ID, model_id='ir.filters'),
dict(name='c', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='d', user_id=common.ADMIN_USER_ID, model_id='ir.filters') )
filters = self.registry('ir.filters').get_filters(
self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', is_default=False, user_id=False, domain='[]', context='{}'),
dict(name='c', is_default=False, user_id=self.USER, domain='[]', context='{}'),
])
class TestOwnDefaults(FiltersCase):
def setUp(self):
super(TestOwnDefaults, self).setUp()
self.USER = self.registry('res.users').name_search(self.cr, self.uid, 'demo')[0]
self.USER_ID = self.USER[0]
def test_new_no_filter(self):
"""
When creating a @is_default filter with no existing filter, that new
filter gets the default flag
"""
Filters = self.registry('ir.filters')
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'a',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', user_id=self.USER, is_default=True,
domain='[]', context='{}')
])
def test_new_filter_not_default(self):
"""
When creating a @is_default filter with existing non-default filters,
the new filter gets the flag
"""
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', user_id=self.USER_ID, model_id='ir.filters'),
)
Filters = self.registry('ir.filters')
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'c',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', user_id=self.USER, is_default=False, domain='[]', context='{}'),
dict(name='b', user_id=self.USER, is_default=False, domain='[]', context='{}'),
dict(name='c', user_id=self.USER, is_default=True, domain='[]', context='{}'),
])
def test_new_filter_existing_default(self):
"""
When creating a @is_default filter where an existing filter is already
@is_default, the flag should be *moved* from the old to the new filter
"""
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=self.USER_ID, model_id='ir.filters'),
)
Filters = self.registry('ir.filters')
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'c',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', user_id=self.USER, is_default=False, domain='[]', context='{}'),
dict(name='b', user_id=self.USER, is_default=False, domain='[]', context='{}'),
dict(name='c', user_id=self.USER, is_default=True, domain='[]', context='{}'),
])
def test_update_filter_set_default(self):
"""
When updating an existing filter to @is_default, if an other filter
already has the flag the flag should be moved
"""
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=self.USER_ID, model_id='ir.filters'),
)
Filters = self.registry('ir.filters')
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'a',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', user_id=self.USER, is_default=True, domain='[]', context='{}'),
dict(name='b', user_id=self.USER, is_default=False, domain='[]', context='{}'),
])
class TestGlobalDefaults(FiltersCase):
def setUp(self):
super(TestGlobalDefaults, self).setUp()
self.USER = self.registry('res.users').name_search(self.cr, self.uid, 'demo')[0]
self.USER_ID = self.USER[0]
def test_new_filter_not_default(self):
"""
When creating a @is_default filter with existing non-default filters,
the new filter gets the flag
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', user_id=False, model_id='ir.filters'),
)
Filters = self.registry('ir.filters')
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'c',
'model_id': 'ir.filters',
'user_id': False,
'is_default': True,
})
filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', user_id=False, is_default=False, domain='[]', context='{}'),
dict(name='b', user_id=False, is_default=False, domain='[]', context='{}'),
dict(name='c', user_id=False, is_default=True, domain='[]', context='{}'),
])
def test_new_filter_existing_default(self):
"""
When creating a @is_default filter where an existing filter is already
@is_default, an error should be generated
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=False, model_id='ir.filters'),
)
Filters = self.registry('ir.filters')
with self.assertRaises(exceptions.Warning):
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'c',
'model_id': 'ir.filters',
'user_id': False,
'is_default': True,
})
def test_update_filter_set_default(self):
"""
When updating an existing filter to @is_default, if an other filter
already has the flag an error should be generated
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=False, model_id='ir.filters'),
)
Filters = self.registry('ir.filters')
with self.assertRaises(exceptions.Warning):
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'a',
'model_id': 'ir.filters',
'user_id': False,
'is_default': True,
})
def test_update_default_filter(self):
"""
Replacing the current default global filter should not generate any error
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=False, model_id='ir.filters'),
)
Filters = self.registry('ir.filters')
context_value = "{'some_key': True}"
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'b',
'model_id': 'ir.filters',
'user_id': False,
'context': context_value,
'is_default': True,
})
filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', user_id=False, is_default=False, domain='[]', context='{}'),
dict(name='b', user_id=False, is_default=True, domain='[]', context=context_value),
])
| agpl-3.0 |
tecnovert/particl-core | test/functional/feature_ins_timestampindex.py | 2 | 2059 | #!/usr/bin/env python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test timestampindex generation and fetching
#
import time
from test_framework.test_particl import ParticlTestFramework
from test_framework.util import assert_equal
class TimestampIndexTest(ParticlTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 4
self.extra_args = [
# Nodes 0/1 are "wallet" nodes
['-debug',],
['-debug','-timestampindex'],
# Nodes 2/3 are used for testing
['-debug',],
['-debug','-timestampindex'],]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes()
self.connect_nodes(0, 1)
self.connect_nodes(0, 2)
self.connect_nodes(0, 3)
self.sync_all()
def run_test(self):
nodes = self.nodes
# Stop staking
for i in range(len(nodes)):
nodes[i].reservebalance(True, 10000000)
self.import_genesis_coins_a(nodes[0])
blockhashes = []
self.stakeToHeight(1, False)
blockhashes.append(nodes[0].getblockhash(1))
time.sleep(3)
self.stakeToHeight(2, False)
blockhashes.append(nodes[0].getblockhash(2))
time.sleep(3)
self.stakeToHeight(3, False)
blockhashes.append(nodes[0].getblockhash(3))
self.sync_all()
low = self.nodes[1].getblock(blockhashes[0])['time']
high = low + 76
print('Checking timestamp index...')
hashes = self.nodes[1].getblockhashes(high, low)
assert_equal(len(hashes), len(blockhashes))
assert_equal(hashes, blockhashes)
print('Passed\n')
if __name__ == '__main__':
TimestampIndexTest().main()
| mit |
timm/timmnix | pypy3-v5.5.0-linux64/lib-python/3/test/test_importlib/builtin/test_finder.py | 2 | 1297 | from importlib import machinery
from .. import abc
from .. import util
from . import util as builtin_util
import sys
import unittest
class FinderTests(abc.FinderTests):
"""Test find_module() for built-in modules."""
def test_module(self):
# Common case.
with util.uncache(builtin_util.NAME):
found = machinery.BuiltinImporter.find_module(builtin_util.NAME)
self.assertTrue(found)
# Built-in modules cannot be a package.
test_package = test_package_in_package = test_package_over_module = None
# Built-in modules cannot be in a package.
test_module_in_package = None
def test_failure(self):
assert 'importlib' not in sys.builtin_module_names
loader = machinery.BuiltinImporter.find_module('importlib')
self.assertIsNone(loader)
def test_ignore_path(self):
# The value for 'path' should always trigger a failed import.
with util.uncache(builtin_util.NAME):
loader = machinery.BuiltinImporter.find_module(builtin_util.NAME,
['pkg'])
self.assertIsNone(loader)
def test_main():
from test.support import run_unittest
run_unittest(FinderTests)
if __name__ == '__main__':
test_main()
| mit |
thesoftwarejedi/bitcoin | qa/rpc-tests/invalidblockrequest.py | 4 | 4002 | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework import ComparisonTestFramework
from util import *
from comptool import TestManager, TestInstance
from mininode import *
from blocktools import *
import logging
import copy
import time
'''
In this test we connect to one node over p2p, and test block requests:
1) Valid blocks should be requested and become chain tip.
2) Invalid block with duplicated transaction should be re-requested.
3) Invalid block with bad coinbase value should be rejected and not
re-requested.
'''
# Use the ComparisonTestFramework with 1 node: only use --testbinary.
class InvalidBlockRequestTest(ComparisonTestFramework):
''' Can either run this test as 1 node with expected answers, or two and compare them.
Change the "outcome" variable from each TestInstance object to only do the comparison. '''
def __init__(self):
self.num_nodes = 1
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
self.tip = None
self.block_time = None
NetworkThread().start() # Start up network handling in another thread
test.run()
def get_tests(self):
if self.tip is None:
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
self.block_time = int(time.time())+1
'''
Create a new block with an anyone-can-spend coinbase
'''
block = create_block(self.tip, create_coinbase(), self.block_time)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
yield TestInstance([[block, True]])
'''
Now we need that block to mature so we can spend the coinbase.
'''
test = TestInstance(sync_every_block=False)
for i in xrange(100):
block = create_block(self.tip, create_coinbase(), self.block_time)
block.solve()
self.tip = block.sha256
self.block_time += 1
test.blocks_and_transactions.append([block, True])
yield test
'''
Now we use merkle-root malleability to generate an invalid block with
same blockheader.
Manufacture a block with 3 transactions (coinbase, spend of prior
coinbase, spend of that spend). Duplicate the 3rd transaction to
leave merkle root and blockheader unchanged but invalidate the block.
'''
block2 = create_block(self.tip, create_coinbase(), self.block_time)
self.block_time += 1
# chr(81) is OP_TRUE
tx1 = create_transaction(self.block1.vtx[0], 0, chr(81), 50*100000000)
tx2 = create_transaction(tx1, 0, chr(81), 50*100000000)
block2.vtx.extend([tx1, tx2])
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.rehash()
block2.solve()
orig_hash = block2.sha256
block2_orig = copy.deepcopy(block2)
# Mutate block 2
block2.vtx.append(tx2)
assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())
assert_equal(orig_hash, block2.rehash())
assert(block2_orig.vtx != block2.vtx)
self.tip = block2.sha256
yield TestInstance([[block2, False], [block2_orig, True]])
'''
Make sure that a totally screwed up block is not valid.
'''
block3 = create_block(self.tip, create_coinbase(), self.block_time)
self.block_time += 1
block3.vtx[0].vout[0].nValue = 100*100000000 # Too high!
block3.vtx[0].sha256=None
block3.vtx[0].calc_sha256()
block3.hashMerkleRoot = block3.calc_merkle_root()
block3.rehash()
block3.solve()
yield TestInstance([[block3, False]])
if __name__ == '__main__':
InvalidBlockRequestTest().main()
| mit |
woiddei/pattern | examples/05-vector/05-nb.py | 21 | 2468 | import os, sys; sys.path.insert(0, os.path.join("..", ".."))
from pattern.vector import Document, Model, NB
from pattern.db import Datasheet
# Naive Bayes is one of the oldest classifiers,
# but is is still popular because it is fast for models
# that have many documents and many features.
# It is outperformed by KNN and SVM, but useful as a baseline for tests.
# We'll test it with a corpus of spam e-mail messages,
# included in the test suite, stored as a CSV-file.
# The corpus contains mostly technical e-mail from developer mailing lists.
data = os.path.join(os.path.dirname(__file__), "..","..","test","corpora","spam-apache.csv")
data = Datasheet.load(data)
documents = []
for score, message in data:
document = Document(message, type=int(score) > 0)
documents.append(document)
m = Model(documents)
print "number of documents:", len(m)
print "number of words:", len(m.vector)
print "number of words (average):", sum(len(d.features) for d in m.documents) / float(len(m))
print
# Train Naive Bayes on all documents.
# Each document has a type: True for actual e-mail, False for spam.
# This results in a "binary" classifier that either answers True or False
# for unknown documents.
classifier = NB()
for document in m:
classifier.train(document)
# We can now ask it questions about unknown e-mails:
print classifier.classify("win money") # False: most likely spam.
print classifier.classify("fix bug") # True: most likely a real message.
print
print classifier.classify("customer") # False: people don't talk like this on developer lists...
print classifier.classify("guys") # True: because most likely everyone knows everyone.
print
# To test the accuracy of a classifier,
# we typically use 10-fold cross validation.
# This means that 10 individual tests are performed,
# each with 90% of the corpus as training data and 10% as testing data.
from pattern.vector import k_fold_cv
print k_fold_cv(NB, documents=m, folds=10)
# This yields 5 scores: (Accuracy, Precision, Recall, F-score, standard deviation).
# Accuracy in itself is not very useful,
# since some spam may have been regarded as real messages (false positives),
# and some real messages may have been regarded as spam (false negatives).
# Precision = how accurately false positives are discarded,
# Recall = how accurately false negatives are discarded.
# F-score = harmonic mean of precision and recall.
# stdev = folds' variation from average F-score. | bsd-3-clause |
yetsky/extra | packages/my-application/python-all/files/usr/lib/python2.7/distutils/emxccompiler.py | 250 | 11931 | """distutils.emxccompiler
Provides the EMXCCompiler class, a subclass of UnixCCompiler that
handles the EMX port of the GNU C compiler to OS/2.
"""
# issues:
#
# * OS/2 insists that DLLs can have names no longer than 8 characters
# We put export_symbols in a def-file, as though the DLL can have
# an arbitrary length name, but truncate the output filename.
#
# * only use OMF objects and use LINK386 as the linker (-Zomf)
#
# * always build for multithreading (-Zmt) as the accompanying OS/2 port
# of Python is only distributed with threads enabled.
#
# tested configurations:
#
# * EMX gcc 2.81/EMX 0.9d fix03
__revision__ = "$Id$"
import os,sys,copy
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
from distutils import log
class EMXCCompiler (UnixCCompiler):
compiler_type = 'emx'
obj_extension = ".obj"
static_lib_extension = ".lib"
shared_lib_extension = ".dll"
static_lib_format = "%s%s"
shared_lib_format = "%s%s"
res_extension = ".res" # compiled resource file
exe_extension = ".exe"
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
UnixCCompiler.__init__ (self, verbose, dry_run, force)
(status, details) = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" %
(status, details))
if status is not CONFIG_H_OK:
self.warn(
"Python's pyconfig.h doesn't seem to support your compiler. " +
("Reason: %s." % details) +
"Compiling may fail because of undefined preprocessor macros.")
(self.gcc_version, self.ld_version) = \
get_versions()
self.debug_print(self.compiler_type + ": gcc %s, ld %s\n" %
(self.gcc_version,
self.ld_version) )
# Hard-code GCC because that's what this is all about.
# XXX optimization, warnings etc. should be customizable.
self.set_executables(compiler='gcc -Zomf -Zmt -O3 -fomit-frame-pointer -mprobe -Wall',
compiler_so='gcc -Zomf -Zmt -O3 -fomit-frame-pointer -mprobe -Wall',
linker_exe='gcc -Zomf -Zmt -Zcrtdll',
linker_so='gcc -Zomf -Zmt -Zcrtdll -Zdll')
# want the gcc library statically linked (so that we don't have
# to distribute a version dependent on the compiler we have)
self.dll_libraries=["gcc"]
# __init__ ()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
if ext == '.rc':
# gcc requires '.rc' compiled to binary ('.res') files !!!
try:
self.spawn(["rc", "-r", src])
except DistutilsExecError, msg:
raise CompileError, msg
else: # for other files use the C-compiler
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# use separate copies, so we can modify the lists
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
# Additional libraries
libraries.extend(self.dll_libraries)
# handle export symbols by creating a def-file
# with executables this only works with gcc/ld as linker
if ((export_symbols is not None) and
(target_desc != self.EXECUTABLE)):
# (The linker doesn't do anything if output is up-to-date.
# So it would probably better to check if we really need this,
# but for this we had to insert some unchanged parts of
# UnixCCompiler, and this is not what we want.)
# we want to put some files in the same directory as the
# object files are, build_temp doesn't help much
# where are the object files
temp_dir = os.path.dirname(objects[0])
# name of dll to give the helper files the same base name
(dll_name, dll_extension) = os.path.splitext(
os.path.basename(output_filename))
# generate the filenames for these files
def_file = os.path.join(temp_dir, dll_name + ".def")
# Generate .def file
contents = [
"LIBRARY %s INITINSTANCE TERMINSTANCE" % \
os.path.splitext(os.path.basename(output_filename))[0],
"DATA MULTIPLE NONSHARED",
"EXPORTS"]
for sym in export_symbols:
contents.append(' "%s"' % sym)
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# next add options for def-file and to creating import libraries
# for gcc/ld the def-file is specified as any other object files
objects.append(def_file)
#end: if ((export_symbols is not None) and
# (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# who wants symbols and a many times larger output file
# should explicitly switch the debug mode on
# otherwise we let dllwrap/ld strip the output file
# (On my machine: 10KB < stripped_file < ??100KB
# unstripped_file = stripped_file + XXX KB
# ( XXX=254 for a typical python extension))
if not debug:
extra_preargs.append("-s")
UnixCCompiler.link(self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None, # export_symbols, we do this in our def-file
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang)
# link ()
# -- Miscellaneous methods -----------------------------------------
# override the object_filenames method from CCompiler to
# support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
# override the find_library_file method from UnixCCompiler
# to deal with file naming/searching differences
def find_library_file(self, dirs, lib, debug=0):
shortlib = '%s.lib' % lib
longlib = 'lib%s.lib' % lib # this form very rare
# get EMX's default library directory search path
try:
emx_dirs = os.environ['LIBRARY_PATH'].split(';')
except KeyError:
emx_dirs = []
for dir in dirs + emx_dirs:
shortlibp = os.path.join(dir, shortlib)
longlibp = os.path.join(dir, longlib)
if os.path.exists(shortlibp):
return shortlibp
elif os.path.exists(longlibp):
return longlibp
# Oops, didn't find it in *any* of 'dirs'
return None
# class EMXCCompiler
# Because these compilers aren't configured in Python's pyconfig.h file by
# default, we should at least warn the user if he is using a unmodified
# version.
CONFIG_H_OK = "ok"
CONFIG_H_NOTOK = "not ok"
CONFIG_H_UNCERTAIN = "uncertain"
def check_config_h():
"""Check if the current Python installation (specifically, pyconfig.h)
appears amenable to building extensions with GCC. Returns a tuple
(status, details), where 'status' is one of the following constants:
CONFIG_H_OK
all is well, go ahead and compile
CONFIG_H_NOTOK
doesn't look good
CONFIG_H_UNCERTAIN
not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
# XXX since this function also checks sys.version, it's not strictly a
# "pyconfig.h" check -- should probably be renamed...
from distutils import sysconfig
import string
# if sys.version contains GCC then python was compiled with
# GCC, and the pyconfig.h file should be OK
if string.find(sys.version,"GCC") >= 0:
return (CONFIG_H_OK, "sys.version mentions 'GCC'")
fn = sysconfig.get_config_h_filename()
try:
# It would probably better to read single lines to search.
# But we do this only once, and it is fast enough
f = open(fn)
try:
s = f.read()
finally:
f.close()
except IOError, exc:
# if we can't read this file, we cannot say it is wrong
# the compiler will complain later about this file as missing
return (CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
else:
# "pyconfig.h" contains an "#ifdef __GNUC__" or something similar
if string.find(s,"__GNUC__") >= 0:
return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn)
else:
return (CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn)
def get_versions():
""" Try to find out the versions of gcc and ld.
If not possible it returns None for it.
"""
from distutils.version import StrictVersion
from distutils.spawn import find_executable
import re
gcc_exe = find_executable('gcc')
if gcc_exe:
out = os.popen(gcc_exe + ' -dumpversion','r')
try:
out_string = out.read()
finally:
out.close()
result = re.search('(\d+\.\d+\.\d+)',out_string)
if result:
gcc_version = StrictVersion(result.group(1))
else:
gcc_version = None
else:
gcc_version = None
# EMX ld has no way of reporting version number, and we use GCC
# anyway - so we can link OMF DLLs
ld_version = None
return (gcc_version, ld_version)
| gpl-2.0 |
zhuwenping/python-for-android | python3-alpha/python3-src/Lib/lib2to3/fixes/fix_tuple_params.py | 203 | 5565 | """Fixer for function definitions with tuple parameters.
def func(((a, b), c), d):
...
->
def func(x, d):
((a, b), c) = x
...
It will also support lambdas:
lambda (x, y): x + y -> lambda t: t[0] + t[1]
# The parens are a syntax error in Python 3
lambda (x): x + y -> lambda x: x + y
"""
# Author: Collin Winter
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Assign, Name, Newline, Number, Subscript, syms
def is_docstring(stmt):
return isinstance(stmt, pytree.Node) and \
stmt.children[0].type == token.STRING
class FixTupleParams(fixer_base.BaseFix):
run_order = 4 #use a lower order since lambda is part of other
#patterns
BM_compatible = True
PATTERN = """
funcdef< 'def' any parameters< '(' args=any ')' >
['->' any] ':' suite=any+ >
|
lambda=
lambdef< 'lambda' args=vfpdef< '(' inner=any ')' >
':' body=any
>
"""
def transform(self, node, results):
if "lambda" in results:
return self.transform_lambda(node, results)
new_lines = []
suite = results["suite"]
args = results["args"]
# This crap is so "def foo(...): x = 5; y = 7" is handled correctly.
# TODO(cwinter): suite-cleanup
if suite[0].children[1].type == token.INDENT:
start = 2
indent = suite[0].children[1].value
end = Newline()
else:
start = 0
indent = "; "
end = pytree.Leaf(token.INDENT, "")
# We need access to self for new_name(), and making this a method
# doesn't feel right. Closing over self and new_lines makes the
# code below cleaner.
def handle_tuple(tuple_arg, add_prefix=False):
n = Name(self.new_name())
arg = tuple_arg.clone()
arg.prefix = ""
stmt = Assign(arg, n.clone())
if add_prefix:
n.prefix = " "
tuple_arg.replace(n)
new_lines.append(pytree.Node(syms.simple_stmt,
[stmt, end.clone()]))
if args.type == syms.tfpdef:
handle_tuple(args)
elif args.type == syms.typedargslist:
for i, arg in enumerate(args.children):
if arg.type == syms.tfpdef:
# Without add_prefix, the emitted code is correct,
# just ugly.
handle_tuple(arg, add_prefix=(i > 0))
if not new_lines:
return
# This isn't strictly necessary, but it plays nicely with other fixers.
# TODO(cwinter) get rid of this when children becomes a smart list
for line in new_lines:
line.parent = suite[0]
# TODO(cwinter) suite-cleanup
after = start
if start == 0:
new_lines[0].prefix = " "
elif is_docstring(suite[0].children[start]):
new_lines[0].prefix = indent
after = start + 1
for line in new_lines:
line.parent = suite[0]
suite[0].children[after:after] = new_lines
for i in range(after+1, after+len(new_lines)+1):
suite[0].children[i].prefix = indent
suite[0].changed()
def transform_lambda(self, node, results):
args = results["args"]
body = results["body"]
inner = simplify_args(results["inner"])
# Replace lambda ((((x)))): x with lambda x: x
if inner.type == token.NAME:
inner = inner.clone()
inner.prefix = " "
args.replace(inner)
return
params = find_params(args)
to_index = map_to_index(params)
tup_name = self.new_name(tuple_name(params))
new_param = Name(tup_name, prefix=" ")
args.replace(new_param.clone())
for n in body.post_order():
if n.type == token.NAME and n.value in to_index:
subscripts = [c.clone() for c in to_index[n.value]]
new = pytree.Node(syms.power,
[new_param.clone()] + subscripts)
new.prefix = n.prefix
n.replace(new)
### Helper functions for transform_lambda()
def simplify_args(node):
if node.type in (syms.vfplist, token.NAME):
return node
elif node.type == syms.vfpdef:
# These look like vfpdef< '(' x ')' > where x is NAME
# or another vfpdef instance (leading to recursion).
while node.type == syms.vfpdef:
node = node.children[1]
return node
raise RuntimeError("Received unexpected node %s" % node)
def find_params(node):
if node.type == syms.vfpdef:
return find_params(node.children[1])
elif node.type == token.NAME:
return node.value
return [find_params(c) for c in node.children if c.type != token.COMMA]
def map_to_index(param_list, prefix=[], d=None):
if d is None:
d = {}
for i, obj in enumerate(param_list):
trailer = [Subscript(Number(str(i)))]
if isinstance(obj, list):
map_to_index(obj, trailer, d=d)
else:
d[obj] = prefix + trailer
return d
def tuple_name(param_list):
l = []
for obj in param_list:
if isinstance(obj, list):
l.append(tuple_name(obj))
else:
l.append(obj)
return "_".join(l)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.