repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
owaiskhan/Retransmission-Combining | gnuradio-core/src/lib/filter/generate_gr_interp_fir_filter_XXX.py | 17 | 1397 | #!/bin/env python
# -*- python -*-
#
# Copyright 2003,2004 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import re
from generate_utils import *
roots = ['gr_interp_fir_filter_XXX']
def expand_h_cc_i (root, code3):
d = init_dict (root, code3)
expand_template (d, root + '.h.t')
expand_template (d, root + '.cc.t')
expand_template (d, root + '.i.t')
def init_dict (root, code3):
name = re.sub ('X+', code3, root)
d = standard_dict (name, code3)
d['FIR_TYPE'] = 'gr_fir_' + code3
return d
def generate ():
for r in roots:
for s in fir_signatures:
expand_h_cc_i (r, s)
if __name__ == '__main__':
generate ()
| gpl-3.0 |
xavierwu/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
boretom/pyload-apkg | source/py-mods-prebuilt-x86-64/site-packages/Crypto/SelfTest/Cipher/test_Blowfish.py | 119 | 5832 | # -*- coding: utf-8 -*-
#
# SelfTest/Cipher/test_Blowfish.py: Self-test for the Blowfish cipher
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Cipher.Blowfish"""
__revision__ = "$Id$"
from Crypto.Util.py3compat import *
# This is a list of (plaintext, ciphertext, key) tuples.
test_data = [
# Test vectors from http://www.schneier.com/code/vectors.txt
('0000000000000000', '4ef997456198dd78', '0000000000000000'),
('ffffffffffffffff', '51866fd5b85ecb8a', 'ffffffffffffffff'),
('1000000000000001', '7d856f9a613063f2', '3000000000000000'),
('1111111111111111', '2466dd878b963c9d', '1111111111111111'),
('1111111111111111', '61f9c3802281b096', '0123456789abcdef'),
('0123456789abcdef', '7d0cc630afda1ec7', '1111111111111111'),
('0000000000000000', '4ef997456198dd78', '0000000000000000'),
('0123456789abcdef', '0aceab0fc6a0a28d', 'fedcba9876543210'),
('01a1d6d039776742', '59c68245eb05282b', '7ca110454a1a6e57'),
('5cd54ca83def57da', 'b1b8cc0b250f09a0', '0131d9619dc1376e'),
('0248d43806f67172', '1730e5778bea1da4', '07a1133e4a0b2686'),
('51454b582ddf440a', 'a25e7856cf2651eb', '3849674c2602319e'),
('42fd443059577fa2', '353882b109ce8f1a', '04b915ba43feb5b6'),
('059b5e0851cf143a', '48f4d0884c379918', '0113b970fd34f2ce'),
('0756d8e0774761d2', '432193b78951fc98', '0170f175468fb5e6'),
('762514b829bf486a', '13f04154d69d1ae5', '43297fad38e373fe'),
('3bdd119049372802', '2eedda93ffd39c79', '07a7137045da2a16'),
('26955f6835af609a', 'd887e0393c2da6e3', '04689104c2fd3b2f'),
('164d5e404f275232', '5f99d04f5b163969', '37d06bb516cb7546'),
('6b056e18759f5cca', '4a057a3b24d3977b', '1f08260d1ac2465e'),
('004bd6ef09176062', '452031c1e4fada8e', '584023641aba6176'),
('480d39006ee762f2', '7555ae39f59b87bd', '025816164629b007'),
('437540c8698f3cfa', '53c55f9cb49fc019', '49793ebc79b3258f'),
('072d43a077075292', '7a8e7bfa937e89a3', '4fb05e1515ab73a7'),
('02fe55778117f12a', 'cf9c5d7a4986adb5', '49e95d6d4ca229bf'),
('1d9d5c5018f728c2', 'd1abb290658bc778', '018310dc409b26d6'),
('305532286d6f295a', '55cb3774d13ef201', '1c587f1c13924fef'),
('0123456789abcdef', 'fa34ec4847b268b2', '0101010101010101'),
('0123456789abcdef', 'a790795108ea3cae', '1f1f1f1f0e0e0e0e'),
('0123456789abcdef', 'c39e072d9fac631d', 'e0fee0fef1fef1fe'),
('ffffffffffffffff', '014933e0cdaff6e4', '0000000000000000'),
('0000000000000000', 'f21e9a77b71c49bc', 'ffffffffffffffff'),
('0000000000000000', '245946885754369a', '0123456789abcdef'),
('ffffffffffffffff', '6b5c5a9c5d9e0a5a', 'fedcba9876543210'),
('fedcba9876543210', 'f9ad597c49db005e', 'f0'),
('fedcba9876543210', 'e91d21c1d961a6d6', 'f0e1'),
('fedcba9876543210', 'e9c2b70a1bc65cf3', 'f0e1d2'),
('fedcba9876543210', 'be1e639408640f05', 'f0e1d2c3'),
('fedcba9876543210', 'b39e44481bdb1e6e', 'f0e1d2c3b4'),
('fedcba9876543210', '9457aa83b1928c0d', 'f0e1d2c3b4a5'),
('fedcba9876543210', '8bb77032f960629d', 'f0e1d2c3b4a596'),
('fedcba9876543210', 'e87a244e2cc85e82', 'f0e1d2c3b4a59687'),
('fedcba9876543210', '15750e7a4f4ec577', 'f0e1d2c3b4a5968778'),
('fedcba9876543210', '122ba70b3ab64ae0', 'f0e1d2c3b4a596877869'),
('fedcba9876543210', '3a833c9affc537f6', 'f0e1d2c3b4a5968778695a'),
('fedcba9876543210', '9409da87a90f6bf2', 'f0e1d2c3b4a5968778695a4b'),
('fedcba9876543210', '884f80625060b8b4', 'f0e1d2c3b4a5968778695a4b3c'),
('fedcba9876543210', '1f85031c19e11968', 'f0e1d2c3b4a5968778695a4b3c2d'),
('fedcba9876543210', '79d9373a714ca34f', 'f0e1d2c3b4a5968778695a4b3c2d1e'),
('fedcba9876543210', '93142887ee3be15c',
'f0e1d2c3b4a5968778695a4b3c2d1e0f'),
('fedcba9876543210', '03429e838ce2d14b',
'f0e1d2c3b4a5968778695a4b3c2d1e0f00'),
('fedcba9876543210', 'a4299e27469ff67b',
'f0e1d2c3b4a5968778695a4b3c2d1e0f0011'),
('fedcba9876543210', 'afd5aed1c1bc96a8',
'f0e1d2c3b4a5968778695a4b3c2d1e0f001122'),
('fedcba9876543210', '10851c0e3858da9f',
'f0e1d2c3b4a5968778695a4b3c2d1e0f00112233'),
('fedcba9876543210', 'e6f51ed79b9db21f',
'f0e1d2c3b4a5968778695a4b3c2d1e0f0011223344'),
('fedcba9876543210', '64a6e14afd36b46f',
'f0e1d2c3b4a5968778695a4b3c2d1e0f001122334455'),
('fedcba9876543210', '80c7d7d45a5479ad',
'f0e1d2c3b4a5968778695a4b3c2d1e0f00112233445566'),
('fedcba9876543210', '05044b62fa52d080',
'f0e1d2c3b4a5968778695a4b3c2d1e0f0011223344556677'),
]
def get_tests(config={}):
from Crypto.Cipher import Blowfish
from common import make_block_tests
return make_block_tests(Blowfish, "Blowfish", test_data)
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| gpl-3.0 |
hustodemon/spacewalk | backend/server/rhnServer/server_hardware.py | 1 | 34703 | #
# Copyright (c) 2008--2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# This file contains all the logic necessary to manipulate Hardware
# items - load, reload, instanciate and save
#
import string
import sys
from rhn.UserDictCase import UserDictCase
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnException import rhnFault
from spacewalk.common.rhnTB import Traceback
from spacewalk.server import rhnSQL
def kudzu_mapping(dict=None):
""" this is a class we use to get the mapping for a kudzu entry """
# This is the generic mapping we need
mapping = {
'desc': 'description',
}
# error handling if we get passed weird stuff.
if not dict:
return mapping
if not type(dict) == type({}) and not isinstance(dict, UserDictCase):
return mapping
hw_bus = dict.get("bus")
# we need to have a bus type to be able to continue
if not hw_bus:
return mapping
hw_bus = string.lower(hw_bus)
extra = {}
if hw_bus == "ddc":
extra = {
"id": None,
"horizsyncmin": "prop1",
"horizsyncmax": "prop2",
"vertrefreshmin": "prop3",
"vertrefreshmax": "prop4",
"modes": None,
"mem": None,
}
elif hw_bus == "ide":
extra = {
"physical": "prop1",
"logical": "prop2",
}
elif hw_bus in ["isapnp", "isa"]:
extra = {
"pdeviceid": "prop1",
"deviceid": "prop2",
"compat": "prop3",
"native": None,
"active": None,
"cardnum": None, # XXX: fix me
"logdev": "prop4",
"io": "prop2",
"irq": "prop1",
"dma": "prop3",
"mem": "prop4",
}
elif hw_bus == "keyboard":
extra = {}
elif hw_bus == "psaux":
extra = {}
elif hw_bus == "parallel":
extra = {
'pnpmfr': 'prop1',
'pnpdesc': 'prop2',
'pnpmodel': 'prop3',
'pnpmodes': 'prop4',
'pinfo': None,
'pinfo.xres': None,
'pinfo.yres': None,
'pinfo.color': None,
'pinfo.ascii': None,
}
elif hw_bus == "pci":
extra = {
'vendorid': 'prop1',
'deviceid': 'prop2',
'subvendorid': 'prop3',
'subdeviceid': 'prop4',
'network.hwaddr': None,
'pcibus': None,
'pcidev': None,
'pcifn': None,
'pcidom': None,
}
elif hw_bus == "sbus":
extra = {
"monitor": "prop1",
"width": "prop2",
"height": "prop3",
"freq": "prop4",
}
elif hw_bus == "scsi":
extra = {
'host': 'prop1',
'id': 'prop2',
'channel': 'prop3',
'lun': 'prop4',
'generic': None,
}
elif hw_bus == "serial":
extra = {
'pnpmfr': 'prop1',
'pnpdesc': 'prop2',
'pnpmodel': 'prop3',
'pnpcompat': "prop4",
}
elif hw_bus == "usb":
extra = {
"vendorid": "prop1",
"deviceid": "prop2",
"usbclass": "prop3",
"usbbus": "prop4",
"usblevel": "pciType",
"usbdev": None,
"usbprod": None,
"usbsubclass": None,
"usbprotocol": None,
"usbport": None,
"usbmfr": None,
"productname": None,
"productrevision": None,
'network.hwaddr': None,
}
elif hw_bus == "firewire":
extra = {
'vendorid': 'prop1',
'deviceid': 'prop2',
'subvendorid': 'prop3',
'subdeviceid': 'prop4',
}
elif hw_bus == 'pcmcia':
extra = {
'vendorid': 'prop1',
'deviceid': 'prop2',
'function': 'prop3',
'slot': 'prop4',
'network.hwaddr': None,
}
mapping.update(extra)
return mapping
def cleanse_ip_addr(ip_addr):
""" Cleans up things like 127.00.00.01 """
if ip_addr is None:
return None
# Make sure it's a string
ip_addr = str(ip_addr)
# If the ipaddr is empty, jus return empty str
if not len(ip_addr):
return ''
arr = ip_addr.split('.')
# lstrip will remove all leading zeros; if multiple zeros are present, it
# would remove too much, hence the or '0' here.
return '.'.join([x.lstrip('0') or '0' for x in arr])
class GenericDevice:
""" A generic device class """
table = "override-GenericDevice"
def __init__(self):
self.id = 0
self.status = 1 # just added
self.data = {}
# default to the hardware seq...
self.sequence = "rhn_hw_dev_id_seq"
self._autonull = ("description", "board")
def getid(self):
if self.id == 0:
self.id = rhnSQL.Sequence(self.sequence)()
return self.id
def must_save(self):
if self.id == 0 and self.status == 2: # deleted new item
return 0
if self.status == 0: # original item, unchanged
return 0
return 1
def save(self, sysid):
""" save data in the rhnDevice table """
log_debug(4, self.table, self.status, self.data)
if not self.must_save():
return 0
t = rhnSQL.Table(self.table, "id")
# check if we have to delete
if self.status == 2 and self.id:
# delete the entry
del t[self.id]
return 0
# set description to null if empty
self._null_columns([self.data], self._autonull)
# make sure we have a device id
devid = self.getid()
for k in self.data.keys():
if self.data[k] is None:
del self.data[k]
self.data["server_id"] = sysid
t[devid] = self.data
self.status = 0 # now it is saved
return 0
def reload(self, devid):
""" reload from rhnDevice table based on devid """
if not devid:
return -1
t = rhnSQL.Table(self.table, "id")
self.data = t[devid]
# clean up fields we don't want
if self.data:
for k in ["created", "modified"]:
if self.data.has_key(k):
del self.data[k]
self.id = devid
self.status = 0
return 0
def _null_columns(self, params, names=()):
""" Method searches for empty string in params dict with names
defined in names list and replaces them with None value which
is translated to NULL in SQL.
We do not allow empty strings in database for compatibility
reasons between Oracle and PostgreSQL.
"""
# list of dicts
for param in params:
for name in names:
if name in param and param[name] == '':
param[name] = None
class Device(GenericDevice):
""" This is the base Device class that supports instantiation from a
dictionarry. the __init__ takes the dictionary as its argument,
together with a list of valid fields to recognize and with a mapping
for dictionary keys into valid field names for self.data
The fields are required to know what fields we have in the
table. The mapping allows transformation from whatever comes in to
valid fields in the table Looks complicated but it isn't -- gafton
"""
def __init__(self, fields, dict=None, mapping=None):
GenericDevice.__init__(self)
x = {}
for k in fields:
x[k] = None
self.data = UserDictCase(x)
if not dict:
return
# make sure we get a UserDictCase to work with
if type(dict) == type({}):
dict = UserDictCase(dict)
if mapping is None or type(mapping) == type({}):
mapping = UserDictCase(mapping)
if not isinstance(dict, UserDictCase) or \
not isinstance(mapping, UserDictCase):
log_error("Argument passed is not a dictionary", dict, mapping)
raise TypeError("Argument passed is not a dictionary",
dict, mapping)
# make sure we have a platform
for k in dict.keys():
if dict[k] == '':
dict[k] = None
if self.data.has_key(k):
self.data[k] = dict[k]
continue
if mapping.has_key(k):
# the mapping dict might tell us to lose some fields
if mapping[k] is not None:
self.data[mapping[k]] = dict[k]
else:
log_error("Unknown HW key =`%s'" % k,
dict.dict(), mapping.dict())
# The try-except is added just so that we can send e-mails
try:
raise KeyError("Don't know how to parse key `%s''" % k,
dict.dict())
except:
Traceback(mail=1)
# Ignore this key
continue
# clean up this data
try:
for k in self.data.keys():
if type(self.data[k]) == type("") and len(self.data[k]):
self.data[k] = string.strip(self.data[k])
if not len(self.data[k]):
continue
if self.data[k][0] == '"' and self.data[k][-1] == '"':
self.data[k] = self.data[k][1:-1]
except IndexError:
raise IndexError, "Can not process data = %s, key = %s" % (
repr(self.data), k), sys.exc_info()[2]
class HardwareDevice(Device):
""" A more specific device based on the Device class """
table = "rhnDevice"
def __init__(self, dict=None):
fields = ['class', 'bus', 'device', 'driver', 'detached',
'description', 'pcitype', 'prop1', 'prop2',
'prop3', 'prop4']
# get a processed mapping
mapping = kudzu_mapping(dict)
# ... and do little to no work
Device.__init__(self, fields, dict, mapping)
# use the hardware id sequencer
self.sequence = "rhn_hw_dev_id_seq"
class CPUDevice(Device):
""" A class for handling CPU - mirrors the rhnCPU structure """
table = "rhnCPU"
def __init__(self, dict=None):
fields = ['cpu_arch_id', 'architecture', 'bogomips', 'cache',
'family', 'mhz', 'stepping', 'flags', 'model',
'version', 'vendor', 'nrcpu', 'acpiVersion',
'apic', 'apmVersion', 'chipset', 'nrsocket']
mapping = {
"bogomips": "bogomips",
"cache": "cache",
"model": "model",
"platform": "architecture",
"type": "vendor",
"model_rev": "stepping",
"model_number": "family",
"model_ver": "version",
"model_version": "version",
"speed": "mhz",
"count": "nrcpu",
"socket_count": "nrsocket",
"other": "flags",
"desc": None,
'class': None,
}
# now instantiate this class
Device.__init__(self, fields, dict, mapping)
self.sequence = "rhn_cpu_id_seq"
if not dict:
return
if self.data.get("cpu_arch_id") is not None:
return # all fine, we have the arch
# if we don't have an architecture, guess it
if not self.data.has_key("architecture"):
log_error("hash does not have a platform member: %s" % dict)
raise AttributeError, "Expected a hash value for member `platform'"
# now extract the arch field, which has to come out of rhnCpuArch
arch = self.data["architecture"]
row = rhnSQL.Table("rhnCpuArch", "label")[arch]
if row is None or not row.has_key("id"):
log_error("Can not find arch %s in rhnCpuArch" % arch)
raise AttributeError, "Invalid architecture for CPU: `%s'" % arch
self.data["cpu_arch_id"] = row["id"]
del self.data["architecture"]
if self.data.has_key("nrcpu"): # make sure this is a number
try:
self.data["nrcpu"] = int(self.data["nrcpu"])
except:
self.data["nrcpu"] = 1
if self.data["nrcpu"] == 0:
self.data["nrcpu"] = 1
class NetworkInformation(Device):
""" This is a wrapper class for the Network Information (rhnServerNetwork) """
table = "rhnServerNetwork"
def __init__(self, dict=None):
fields = ["hostname", "ipaddr", "ip6addr"]
mapping = {'class': None}
Device.__init__(self, fields, dict, mapping)
self._autonull = ('ipaddr', 'ip6addr')
# use our own sequence
self.sequence = "rhn_server_net_id_seq"
# bugzilla: 129840 kudzu (rhpl) will sometimes pad octets
# with leading zeros, causing confusion; clean those up
self.data['ipaddr'] = cleanse_ip_addr(self.data['ipaddr'])
class NetIfaceInformation(Device):
key_mapping = {
'hwaddr': 'hw_addr',
'module': 'module',
}
def __init__(self, dict=None):
log_debug(4, dict)
self.ifaces = {}
self.db_ifaces = []
# parameters which are not allowed to be empty and set to NULL
self._autonull = ('hw_addr', 'module')
if not dict:
return
for name, info in dict.items():
if name == 'class':
# Ignore it
continue
if not isinstance(info, type({})):
raise rhnFault(53, "Unexpected format for interface %s" %
name)
vdict = {}
for key, mapping in self.key_mapping.items():
# Look at the mapping first; if not found, look for the key
if info.has_key(mapping):
k = mapping
else:
k = key
if not info.has_key(k):
raise rhnFault(53, "Unable to find required field %s"
% key)
val = info[k]
vdict[mapping] = val
if 'ipaddr' in info and info['ipaddr']:
vdict['ipv4'] = NetIfaceAddress4(
[{'ipaddr': info['ipaddr'], 'broadcast': info['broadcast'], 'netmask': info['netmask']}])
if 'ipv6' in info and info['ipv6']:
vdict['ipv6'] = NetIfaceAddress6(info["ipv6"])
self.ifaces[name] = vdict
def __str__(self):
return "<%s Class at %d: %s>\n" % (
self.__class__.__name__,
id(self), {
"self.ifaces": self.ifaces,
"self.db_ifaces": self.db_ifaces,
})
__repr__ = __str__
def save(self, server_id):
log_debug(4, self.ifaces)
self.reload(server_id)
log_debug(4, "Interfaces in DB", self.db_ifaces)
# Compute updates, deletes and inserts
inserts = []
updates = []
deletes = []
ifaces = self.ifaces.copy()
for iface in self.db_ifaces:
name = iface['name']
if not self.ifaces.has_key(name):
# To be deleted
deletes.append({'server_id': server_id, 'name': name})
continue
uploaded_iface = ifaces[name].copy()
del ifaces[name]
if _hash_eq(uploaded_iface, iface):
# Same value
continue
uploaded_iface.update({'name': name, 'server_id': server_id})
if 'ipv4' in uploaded_iface:
del(uploaded_iface['ipv4'])
if 'ipv6' in uploaded_iface:
del(uploaded_iface['ipv6'])
updates.append(uploaded_iface)
# Everything else in self.ifaces has to be inserted
for name, info in ifaces.items():
iface = {}
iface['name'] = name
iface['server_id'] = server_id
iface['hw_addr'] = info['hw_addr']
iface['module'] = info['module']
inserts.append(iface)
log_debug(4, "Deletes", deletes)
log_debug(4, "Updates", updates)
log_debug(4, "Inserts", inserts)
self._update(updates)
self._insert(inserts)
ifaces = self.ifaces.copy()
for name, info in ifaces.items():
if not 'ipv6' in info:
info['ipv6'] = NetIfaceAddress6()
info['ipv6'].save(self.get_server_id(server_id, name))
if not 'ipv4' in info:
info['ipv4'] = NetIfaceAddress4()
info['ipv4'].save(self.get_server_id(server_id, name))
# delete address (if any) of deleted interaces
for d in deletes:
interface = NetIfaceAddress6()
interface.save(self.get_server_id(server_id, d['name']))
interface = NetIfaceAddress4()
interface.save(self.get_server_id(server_id, d['name']))
self._delete(deletes)
return 0
def get_server_id(self, server_id, name):
""" retrieve id for given server_id and name """
h = rhnSQL.prepare("select id from rhnServerNetInterface where server_id=:server_id and name=:name")
h.execute(server_id=server_id, name=name)
row = h.fetchone_dict()
if row:
return row['id']
else:
return None
def _insert(self, params):
q = """insert into rhnServerNetInterface
(%s) values (%s)"""
self._null_columns(params, self._autonull)
columns = self.key_mapping.values() + ['server_id', 'name']
columns.sort()
bind_params = string.join(map(lambda x: ':' + x, columns), ", ")
h = rhnSQL.prepare(q % (string.join(columns, ", "), bind_params))
return _dml(h, params)
def _delete(self, params):
q = """delete from rhnServerNetInterface
where %s"""
columns = ['server_id', 'name']
wheres = map(lambda x: '%s = :%s' % (x, x), columns)
h = rhnSQL.prepare(q % string.join(wheres, " and "))
return _dml(h, params)
def _update(self, params):
q = """update rhnServerNetInterface
set %s
where %s"""
self._null_columns(params, self._autonull)
wheres = ['server_id', 'name']
wheres = map(lambda x: '%s = :%s' % (x, x), wheres)
wheres = string.join(wheres, " and ")
updates = self.key_mapping.values()
updates.sort()
updates = map(lambda x: '%s = :%s' % (x, x), updates)
updates = string.join(updates, ", ")
h = rhnSQL.prepare(q % (updates, wheres))
return _dml(h, params)
def reload(self, server_id):
h = rhnSQL.prepare("""
select *
from rhnServerNetInterface
where server_id = :server_id
""")
h.execute(server_id=server_id)
self.db_ifaces = []
while 1:
row = h.fetchone_dict()
if not row:
break
hval = {'primary_id': row['id'], 'name': row['name'], 'server_id': server_id}
for key in self.key_mapping.values():
hval[key] = row[key]
hval['ipv4'] = NetIfaceAddress4()
hval['ipv4'].reload(hval['primary_id'])
hval['ipv6'] = NetIfaceAddress6()
hval['ipv6'].reload(hval['primary_id'])
self.db_ifaces.append(hval)
self.status = 0
return 0
class NetIfaceAddress(Device):
key_mapping = {
'netmask': 'netmask',
'address': 'address',
}
unique = ['address'] # to be overriden by child
table = 'rhnServerNetAddress' # to be overriden by child
def __init__(self, list_ifaces=None):
log_debug(4, list_ifaces)
self.ifaces = {}
self.db_ifaces = []
# parameters which are not allowed to be empty and set to NULL
self._autonull = ('address', 'netmask')
self.sequence = "rhn_srv_net_iface_id_seq"
if not list_ifaces:
return
for info in list_ifaces:
if not isinstance(info, type({})):
raise rhnFault(53, "Unexpected format for interface %s" %
info)
vdict = {}
for key, mapping in self.key_mapping.items():
# Look at the mapping first; if not found, look for the key
if info.has_key(mapping):
k = mapping
else:
k = key
if not info.has_key(k):
raise rhnFault(53, "Unable to find required field %s"
% (key))
val = info[k]
if mapping in ['ip_addr', 'netmask', 'broadcast', 'address']:
# bugzilla: 129840 kudzu (rhpl) will sometimes pad octets
# with leading zeros, causing confusion; clean those up
val = self.cleanse_ip_addr(val)
vdict[mapping] = val
self.ifaces[vdict['address']] = vdict
def __str__(self):
return "<%s Class at %d: %s>\n" % (
self.__class__.__name__,
id(self), {
"self.ifaces": self.ifaces,
"self.db_ifaces": self.db_ifaces,
})
__repr__ = __str__
def cleanse_ip_addr(self, val):
""" to be overriden by child """
return val
def save(self, interface_id):
log_debug(4, self.ifaces)
self.reload(interface_id)
log_debug(4, "Net addresses in DB", self.db_ifaces)
# Compute updates, deletes and inserts
inserts = []
updates = []
deletes = []
ifaces = self.ifaces.copy()
for iface in self.db_ifaces:
address = iface['address']
if not self.ifaces.has_key(iface['address']):
# To be deleted
# filter out params, which are not used in query
iface = dict((column, iface[column]) for column in self.unique)
deletes.append(iface)
continue
uploaded_iface = ifaces[address]
del ifaces[address]
# FIXME this is inefficient for IPv4 as it row is present it will be always update
if _hash_eq(uploaded_iface, iface):
# Same value
continue
uploaded_iface.update({'interface_id': interface_id})
updates.append(uploaded_iface)
# Everything else in self.ifaces has to be inserted
for name, iface in ifaces.items():
iface['address'] = iface['address']
iface['interface_id'] = interface_id
inserts.append(iface)
log_debug(4, "Deletes", deletes)
log_debug(4, "Updates", updates)
log_debug(4, "Inserts", inserts)
self._delete(deletes)
self._update(updates)
self._insert(inserts)
def _insert(self, params):
q = """insert into %s
(%s) values (%s)"""
self._null_columns(params, self._autonull)
columns = self.key_mapping.values() + ['interface_id']
columns.sort()
bind_params = string.join(map(lambda x: ':' + x, columns), ", ")
h = rhnSQL.prepare(q % (self.table, string.join(columns, ", "), bind_params))
return _dml(h, params)
def _delete(self, params):
q = """delete from %s
where %s"""
columns = self.unique
wheres = map(lambda x: '%s = :%s' % (x, x), columns)
h = rhnSQL.prepare(q % (self.table, string.join(wheres, " and ")))
return _dml(h, params)
def _update(self, params):
q = """update %s
set %s
where %s"""
self._null_columns(params, self._autonull)
wheres = self.unique
wheres = map(lambda x: '%s = :%s' % (x, x), wheres)
wheres = string.join(wheres, " and ")
updates = self.key_mapping.values()
updates.sort()
updates = map(lambda x: '%s = :%s' % (x, x), updates)
updates = string.join(updates, ", ")
h = rhnSQL.prepare(q % (self.table, updates, wheres))
return _dml(h, params)
def reload(self, interface_id):
h = rhnSQL.prepare("""
select *
from %s
where interface_id = :interface_id
order by interface_id
""" % self.table)
h.execute(interface_id=interface_id)
self.db_ifaces = []
while 1:
row = h.fetchone_dict()
if not row:
break
hval = {'interface_id': row['interface_id']}
for key in self.key_mapping.values():
hval[key] = row[key]
self.db_ifaces.append(hval)
self.status = 0
return 0
class NetIfaceAddress6(NetIfaceAddress):
""" IPv6 Network interface """
key_mapping = {
'netmask': 'netmask',
'addr': 'address',
'scope': 'scope',
}
table = 'rhnServerNetAddress6'
unique = ['interface_id', 'address', 'scope']
def __init__(self, addr_dict=None):
NetIfaceAddress.__init__(self, addr_dict)
self._autonull = ('address', 'netmask', 'scope')
class NetIfaceAddress4(NetIfaceAddress):
""" IPv4 Network interface """
key_mapping = {
'netmask': 'netmask',
'ipaddr': 'address',
'broadcast': 'broadcast',
}
table = 'rhnServerNetAddress4'
unique = ['interface_id']
def __init__(self, addr_dict=None):
NetIfaceAddress.__init__(self, addr_dict)
self._autonull = ('address', 'netmask', 'broadcast')
def cleanse_ip_addr(self, val):
return cleanse_ip_addr(val)
def _hash_eq(h1, h2):
""" Compares two hashes and return 1 if the first is a subset of the second """
log_debug(5, h1, h2)
for k, v in h1.items():
if not h2.has_key(k):
return 0
if h2[k] != v:
return 0
return 1
def _dml(statement, params):
log_debug(5, params)
if not params:
return 0
params = _transpose(params)
rowcount = statement.executemany(**params)
log_debug(5, "Affected rows", rowcount)
return rowcount
def _transpose(hasharr):
""" Transpose the array of hashes into a hash of arrays """
if not hasharr:
return {}
keys = hasharr[0].keys()
result = {}
for k in keys:
result[k] = []
for hval in hasharr:
for k in keys:
if hval.has_key(k):
result[k].append(hval[k])
else:
result[k].append(None)
return result
class MemoryInformation(Device):
""" Memory information """
table = "rhnRAM"
def __init__(self, dict=None):
fields = ["ram", "swap"]
mapping = {"class": None}
Device.__init__(self, fields, dict, mapping)
# use our own sequence
self.sequence = "rhn_ram_id_seq"
if not dict:
return
# Sometimes we get sent a NNNNL number and we need to strip the L
for k in fields:
if not self.data.has_key(k):
continue
if self.data[k] in [None, "None", ""]:
self.data[k] = -1
self.data[k] = str(self.data[k])
if self.data[k][-1] == 'L':
self.data[k] = self.data[k][:-1]
class DMIInformation(Device):
""" DMI information """
table = "rhnServerDMI"
def __init__(self, dict=None):
fields = ["vendor", "system", "product", "asset", "board",
"bios_vendor", "bios_version", "bios_release"]
mapping = {"class": None}
Device.__init__(self, fields, dict, mapping)
# use our own sequence
self.sequence = "rhn_server_dmi_id_seq"
self._autonull = ("vendor", "system", "product", "asset", "board",
"bios_vendor", "bios_version", "bios_release")
if not dict:
return
# deal with hardware with insanely long dmi strings...
for key, value in self.data.items():
# Some of the values may be None
if value and isinstance(value, type("")):
self.data[key] = value[:256]
class InstallInformation(Device):
""" Install information """
table = "rhnServerInstallInfo"
def __init__(self, dict=None):
fields = ['install_method', 'iso_status', 'mediasum']
mapping = {
'class': None,
'installmethod': 'install_method',
'isostatus': 'iso_status',
'mediasum': 'mediasum',
}
Device.__init__(self, fields, dict, mapping)
self.sequence = 'rhn_server_install_info_id_seq'
class Hardware:
""" Support for the hardware items """
def __init__(self):
self.__hardware = {}
self.__loaded = 0
self.__changed = 0
def hardware_by_class(self, device_class):
return self.__hardware[device_class]
def add_hardware(self, hardware):
""" add new hardware """
log_debug(4, hardware)
if not hardware:
return -1
if type(hardware) == type({}):
hardware = UserDictCase(hardware)
if not isinstance(hardware, UserDictCase):
log_error("argument type is not hash: %s" % hardware)
raise TypeError, "This function requires a hash as an argument"
# validation is important
hw_class = hardware.get("class")
if hw_class is None:
return -1
hw_class = string.lower(hw_class)
class_type = None
if hw_class in ["video", "audio", "audio_hd", "usb", "other", "hd", "floppy",
"mouse", "modem", "network", "cdrom", "scsi",
"unspec", "scanner", "tape", "capture", "raid",
"socket", "keyboard", "printer", "firewire", "ide"]:
class_type = HardwareDevice
elif hw_class == "cpu":
class_type = CPUDevice
elif hw_class == "netinfo":
class_type = NetworkInformation
elif hw_class == "memory":
class_type = MemoryInformation
elif hw_class == "dmi":
class_type = DMIInformation
elif hw_class == "installinfo":
class_type = InstallInformation
elif hw_class == "netinterfaces":
class_type = NetIfaceInformation
else:
log_error("UNKNOWN CLASS TYPE `%s'" % hw_class)
# Same trick: try-except and raise the exception so that Traceback
# can send the e-mail
try:
raise KeyError, "Unknwon class type `%s' for hardware '%s'" % (
hw_class, hardware)
except:
Traceback(mail=1)
return
# create the new device
new_dev = class_type(hardware)
if self.__hardware.has_key(class_type):
_l = self.__hardware[class_type]
else:
_l = self.__hardware[class_type] = []
_l.append(new_dev)
self.__changed = 1
return 0
def delete_hardware(self, sysid=None):
""" This function deletes all hardware. """
log_debug(4, sysid)
if not self.__loaded:
self.reload_hardware_byid(sysid)
hardware = self.__hardware
if hardware == {}:
# nothing to delete
return 0
self.__changed = 1
for device_type in hardware.keys():
for hw in hardware[device_type]:
hw.status = 2 # deleted
# filter out the hardware that was just added and then
# deleted before saving
hardware[device_type] = filter(lambda a:
not (a.status == 2 and hasattr(a, "id") and a.id == 0),
hardware[device_type])
return 0
def save_hardware_byid(self, sysid):
"""Save the hardware list """
log_debug(3, sysid, "changed = %s" % self.__changed)
hardware = self.__hardware
if hardware == {}: # nothing loaded
return 0
if not self.__changed:
return 0
for device_type, hw_list in hardware.items():
for hw in hw_list:
hw.save(sysid)
self.__changed = 0
return 0
def __load_from_db(self, DevClass, sysid):
""" Load a certain hardware class from the database """
if not self.__hardware.has_key(DevClass):
self.__hardware[DevClass] = []
h = rhnSQL.prepare("select id from %s where server_id = :sysid" % DevClass.table)
h.execute(sysid=sysid)
rows = h.fetchall_dict() or []
for device in rows:
dev_id = device['id']
dev = DevClass()
dev.reload(dev_id)
self.__hardware[DevClass].append(dev)
def reload_hardware_byid(self, sysid):
""" load all hardware devices for a server """
log_debug(4, sysid)
if not sysid:
return -1
self.__hardware = {} # discard what was already loaded
# load from all hardware databases
self.__load_from_db(HardwareDevice, sysid)
self.__load_from_db(CPUDevice, sysid)
self.__load_from_db(DMIInformation, sysid)
self.__load_from_db(NetworkInformation, sysid)
self.__load_from_db(MemoryInformation, sysid)
self.__load_from_db(InstallInformation, sysid)
net_iface_info = NetIfaceInformation()
net_iface_info.reload(sysid)
self.__hardware[NetIfaceInformation] = [net_iface_info]
# now set the flag
self.__changed = 0
self.__loaded = 1
return 0
| gpl-2.0 |
pomegranited/edx-platform | openedx/core/lib/api/authentication.py | 36 | 3458 | """ Common Authentication Handlers used across projects. """
from rest_framework.authentication import SessionAuthentication
from rest_framework_oauth.authentication import OAuth2Authentication
from rest_framework.exceptions import AuthenticationFailed
from rest_framework_oauth.compat import oauth2_provider, provider_now
class SessionAuthenticationAllowInactiveUser(SessionAuthentication):
"""Ensure that the user is logged in, but do not require the account to be active.
We use this in the special case that a user has created an account,
but has not yet activated it. We still want to allow the user to
enroll in courses, so we remove the usual restriction
on session authentication that requires an active account.
You should use this authentication class ONLY for end-points that
it's safe for an un-activated user to access. For example,
we can allow a user to update his/her own enrollments without
activating an account.
"""
def authenticate(self, request):
"""Authenticate the user, requiring a logged-in account and CSRF.
This is exactly the same as the `SessionAuthentication` implementation,
with the `user.is_active` check removed.
Args:
request (HttpRequest)
Returns:
Tuple of `(user, token)`
Raises:
PermissionDenied: The CSRF token check failed.
"""
# Get the underlying HttpRequest object
request = request._request # pylint: disable=protected-access
user = getattr(request, 'user', None)
# Unauthenticated, CSRF validation not required
# This is where regular `SessionAuthentication` checks that the user is active.
# We have removed that check in this implementation.
# But we added a check to prevent anonymous users since we require a logged-in account.
if not user or user.is_anonymous():
return None
self.enforce_csrf(request)
# CSRF passed with authenticated user
return (user, None)
class OAuth2AuthenticationAllowInactiveUser(OAuth2Authentication):
"""
This is a temporary workaround while the is_active field on the user is coupled
with whether or not the user has verified ownership of their claimed email address.
Once is_active is decoupled from verified_email, we will no longer need this
class override.
But until then, this authentication class ensures that the user is logged in,
but does not require that their account "is_active".
This class can be used for an OAuth2-accessible endpoint that allows users to access
that endpoint without having their email verified. For example, this is used
for mobile endpoints.
"""
def authenticate_credentials(self, request, access_token):
"""
Authenticate the request, given the access token.
Override base class implementation to discard failure if user is inactive.
"""
try:
token = oauth2_provider.oauth2.models.AccessToken.objects.select_related('user')
# provider_now switches to timezone aware datetime when
# the oauth2_provider version supports to it.
token = token.get(token=access_token, expires__gt=provider_now())
except oauth2_provider.oauth2.models.AccessToken.DoesNotExist:
raise AuthenticationFailed('Invalid token')
return token.user, token
| agpl-3.0 |
direvus/ansible | lib/ansible/modules/windows/win_get_url.py | 21 | 5031 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Paul Durivage <paul.durivage@rackspace.com>, and others
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# This is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_get_url
version_added: "1.7"
short_description: Downloads file from HTTP, HTTPS, or FTP to node
description:
- Downloads files from HTTP, HTTPS, or FTP to the remote server. The remote
server I(must) have direct access to the remote resource.
- For non-Windows targets, use the M(get_url) module instead.
author:
- Paul Durivage (@angstwad)
- Takeshi Kuramochi (@tksarah)
options:
url:
description:
- The full URL of a file to download.
required: yes
type: str
dest:
description:
- The location to save the file at the URL.
- Be sure to include a filename and extension as appropriate.
required: yes
type: path
force:
description:
- If C(yes), will always download the file. If C(no), will only
download the file if it does not exist or the remote file has been
modified more recently than the local file.
- This works by sending an http HEAD request to retrieve last modified
time of the requested resource, so for this to work, the remote web
server must support HEAD requests.
type: bool
default: 'yes'
version_added: "2.0"
headers:
description:
- Add custom HTTP headers to a request (as a dictionary).
type: dict
version_added: '2.4'
url_username:
description:
- Basic authentication username.
type: str
aliases: [ username ]
url_password:
description:
- Basic authentication password.
type: str
aliases: [ password ]
force_basic_auth:
description:
- If C(yes), will add a Basic authentication header on the initial request.
- If C(no), will use Microsoft's WebClient to handle authentication.
type: bool
default: 'no'
version_added: "2.5"
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
- If C(skip_certificate_validation) was set, it overrides this option.
type: bool
default: 'yes'
version_added: '2.4'
proxy_url:
description:
- The full URL of the proxy server to download through.
type: str
version_added: "2.0"
proxy_username:
description:
- Proxy authentication username.
type: str
version_added: "2.0"
proxy_password:
description:
- Proxy authentication password.
type: str
version_added: "2.0"
use_proxy:
description:
- If C(no), it will not use a proxy, even if one is defined in an environment
variable on the target hosts.
type: bool
default: 'yes'
version_added: '2.4'
timeout:
description:
- Timeout in seconds for URL request.
type: int
default: 10
version_added : '2.4'
notes:
- If your URL includes an escaped slash character (%2F) this module will convert it to a real slash.
This is a result of the behaviour of the System.Uri class as described in
L(the documentation,https://docs.microsoft.com/en-us/dotnet/framework/configure-apps/file-schema/network/schemesettings-element-uri-settings#remarks).
'''
EXAMPLES = r'''
- name: Download earthrise.jpg to specified path
win_get_url:
url: http://www.example.com/earthrise.jpg
dest: C:\Users\RandomUser\earthrise.jpg
- name: Download earthrise.jpg to specified path only if modified
win_get_url:
url: http://www.example.com/earthrise.jpg
dest: C:\Users\RandomUser\earthrise.jpg
force: no
- name: Download earthrise.jpg to specified path through a proxy server.
win_get_url:
url: http://www.example.com/earthrise.jpg
dest: C:\Users\RandomUser\earthrise.jpg
proxy_url: http://10.0.0.1:8080
proxy_username: username
proxy_password: password
- name: Download file from FTP with authentication
win_get_url:
url: ftp://server/file.txt
dest: '%TEMP%\ftp-file.txt'
url_username: ftp-user
url_password: ftp-password
'''
RETURN = r'''
dest:
description: destination file/path
returned: always
type: string
sample: C:\Users\RandomUser\earthrise.jpg
elapsed:
description: The elapsed seconds between the start of poll and the end of the module.
returned: always
type: float
sample: 2.1406487
url:
description: requested url
returned: always
type: string
sample: http://www.example.com/earthrise.jpg
msg:
description: Error message, or HTTP status message from web-server
returned: always
type: string
sample: OK
status_code:
description: HTTP status code
returned: always
type: int
sample: 200
'''
| gpl-3.0 |
apllicationCOM/youtube-dl-api-server | youtube_dl_server/youtube_dl/extractor/__init__.py | 2 | 20811 | from __future__ import unicode_literals
from .abc import ABCIE
from .abc7news import Abc7NewsIE
from .academicearth import AcademicEarthCourseIE
from .addanime import AddAnimeIE
from .adobetv import (
AdobeTVIE,
AdobeTVVideoIE,
)
from .adultswim import AdultSwimIE
from .aftenposten import AftenpostenIE
from .aftonbladet import AftonbladetIE
from .airmozilla import AirMozillaIE
from .aljazeera import AlJazeeraIE
from .alphaporno import AlphaPornoIE
from .anitube import AnitubeIE
from .anysex import AnySexIE
from .aol import AolIE
from .allocine import AllocineIE
from .aparat import AparatIE
from .appletrailers import AppleTrailersIE
from .archiveorg import ArchiveOrgIE
from .ard import ARDIE, ARDMediathekIE
from .arte import (
ArteTvIE,
ArteTVPlus7IE,
ArteTVCreativeIE,
ArteTVConcertIE,
ArteTVFutureIE,
ArteTVDDCIE,
ArteTVEmbedIE,
)
from .atresplayer import AtresPlayerIE
from .atttechchannel import ATTTechChannelIE
from .audiomack import AudiomackIE, AudiomackAlbumIE
from .azubu import AzubuIE
from .baidu import BaiduVideoIE
from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE
from .bbccouk import BBCCoUkIE
from .beeg import BeegIE
from .behindkink import BehindKinkIE
from .beatportpro import BeatportProIE
from .bet import BetIE
from .bild import BildIE
from .bilibili import BiliBiliIE
from .blinkx import BlinkxIE
from .bliptv import BlipTVIE, BlipTVUserIE
from .bloomberg import BloombergIE
from .bpb import BpbIE
from .br import BRIE
from .breakcom import BreakIE
from .brightcove import BrightcoveIE
from .buzzfeed import BuzzFeedIE
from .byutv import BYUtvIE
from .c56 import C56IE
from .camdemy import (
CamdemyIE,
CamdemyFolderIE
)
from .canal13cl import Canal13clIE
from .canalplus import CanalplusIE
from .canalc2 import Canalc2IE
from .cbs import CBSIE
from .cbsnews import CBSNewsIE
from .cbssports import CBSSportsIE
from .ccc import CCCIE
from .ceskatelevize import CeskaTelevizeIE
from .channel9 import Channel9IE
from .chilloutzone import ChilloutzoneIE
from .chirbit import (
ChirbitIE,
ChirbitProfileIE,
)
from .cinchcast import CinchcastIE
from .cinemassacre import CinemassacreIE
from .clipfish import ClipfishIE
from .cliphunter import CliphunterIE
from .clipsyndicate import ClipsyndicateIE
from .cloudy import CloudyIE
from .clubic import ClubicIE
from .cmt import CMTIE
from .cnet import CNETIE
from .cnn import (
CNNIE,
CNNBlogsIE,
CNNArticleIE,
)
from .collegehumor import CollegeHumorIE
from .collegerama import CollegeRamaIE
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
from .comcarcoff import ComCarCoffIE
from .commonmistakes import CommonMistakesIE, UnicodeBOMIE
from .condenast import CondeNastIE
from .cracked import CrackedIE
from .criterion import CriterionIE
from .crooksandliars import CrooksAndLiarsIE
from .crunchyroll import (
CrunchyrollIE,
CrunchyrollShowPlaylistIE
)
from .cspan import CSpanIE
from .ctsnews import CtsNewsIE
from .dailymotion import (
DailymotionIE,
DailymotionPlaylistIE,
DailymotionUserIE,
DailymotionCloudIE,
)
from .daum import DaumIE
from .dbtv import DBTVIE
from .dctp import DctpTvIE
from .deezer import DeezerPlaylistIE
from .dfb import DFBIE
from .dhm import DHMIE
from .dotsub import DotsubIE
from .douyutv import DouyuTVIE
from .dramafever import (
DramaFeverIE,
DramaFeverSeriesIE,
)
from .dreisat import DreiSatIE
from .drbonanza import DRBonanzaIE
from .drtuber import DrTuberIE
from .drtv import DRTVIE
from .dvtv import DVTVIE
from .dump import DumpIE
from .dumpert import DumpertIE
from .defense import DefenseGouvFrIE
from .discovery import DiscoveryIE
from .divxstage import DivxStageIE
from .dropbox import DropboxIE
from .eagleplatform import EaglePlatformIE
from .ebaumsworld import EbaumsWorldIE
from .echomsk import EchoMskIE
from .ehow import EHowIE
from .eighttracks import EightTracksIE
from .einthusan import EinthusanIE
from .eitb import EitbIE
from .ellentv import (
EllenTVIE,
EllenTVClipsIE,
)
from .elpais import ElPaisIE
from .embedly import EmbedlyIE
from .engadget import EngadgetIE
from .eporner import EpornerIE
from .eroprofile import EroProfileIE
from .escapist import EscapistIE
from .espn import ESPNIE
from .everyonesmixtape import EveryonesMixtapeIE
from .exfm import ExfmIE
from .expotv import ExpoTVIE
from .extremetube import ExtremeTubeIE
from .facebook import FacebookIE
from .faz import FazIE
from .fc2 import FC2IE
from .firstpost import FirstpostIE
from .firsttv import FirstTVIE
from .fivemin import FiveMinIE
from .fivetv import FiveTVIE
from .fktv import (
FKTVIE,
FKTVPosteckeIE,
)
from .flickr import FlickrIE
from .folketinget import FolketingetIE
from .footyroom import FootyRoomIE
from .fourtube import FourTubeIE
from .foxgay import FoxgayIE
from .foxnews import FoxNewsIE
from .foxsports import FoxSportsIE
from .franceculture import FranceCultureIE
from .franceinter import FranceInterIE
from .francetv import (
PluzzIE,
FranceTvInfoIE,
FranceTVIE,
GenerationQuoiIE,
CultureboxIE,
)
from .freesound import FreesoundIE
from .freespeech import FreespeechIE
from .freevideo import FreeVideoIE
from .funnyordie import FunnyOrDieIE
from .gamekings import GamekingsIE
from .gameone import (
GameOneIE,
GameOnePlaylistIE,
)
from .gamersyde import GamersydeIE
from .gamespot import GameSpotIE
from .gamestar import GameStarIE
from .gametrailers import GametrailersIE
from .gazeta import GazetaIE
from .gdcvault import GDCVaultIE
from .generic import GenericIE
from .gfycat import GfycatIE
from .giantbomb import GiantBombIE
from .giga import GigaIE
from .glide import GlideIE
from .globo import GloboIE
from .godtube import GodTubeIE
from .goldenmoustache import GoldenMoustacheIE
from .golem import GolemIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
from .gorillavid import GorillaVidIE
from .goshgay import GoshgayIE
from .groupon import GrouponIE
from .hark import HarkIE
from .hearthisat import HearThisAtIE
from .heise import HeiseIE
from .hellporno import HellPornoIE
from .helsinki import HelsinkiIE
from .hentaistigma import HentaiStigmaIE
from .historicfilms import HistoricFilmsIE
from .history import HistoryIE
from .hitbox import HitboxIE, HitboxLiveIE
from .hornbunny import HornBunnyIE
from .hostingbulk import HostingBulkIE
from .hotnewhiphop import HotNewHipHopIE
from .howcast import HowcastIE
from .howstuffworks import HowStuffWorksIE
from .huffpost import HuffPostIE
from .hypem import HypemIE
from .iconosquare import IconosquareIE
from .ign import IGNIE, OneUPIE
from .imdb import (
ImdbIE,
ImdbListIE
)
from .imgur import ImgurIE
from .ina import InaIE
from .infoq import InfoQIE
from .instagram import InstagramIE, InstagramUserIE
from .internetvideoarchive import InternetVideoArchiveIE
from .iprima import IPrimaIE
from .iqiyi import IqiyiIE
from .ivi import (
IviIE,
IviCompilationIE
)
from .izlesene import IzleseneIE
from .jadorecettepub import JadoreCettePubIE
from .jeuxvideo import JeuxVideoIE
from .jove import JoveIE
from .jukebox import JukeboxIE
from .jpopsukitv import JpopsukiIE
from .kaltura import KalturaIE
from .kanalplay import KanalPlayIE
from .kankan import KankanIE
from .karaoketv import KaraoketvIE
from .karrierevideos import KarriereVideosIE
from .keezmovies import KeezMoviesIE
from .khanacademy import KhanAcademyIE
from .kickstarter import KickStarterIE
from .keek import KeekIE
from .kontrtube import KontrTubeIE
from .krasview import KrasViewIE
from .ku6 import Ku6IE
from .la7 import LA7IE
from .laola1tv import Laola1TvIE
from .letv import (
LetvIE,
LetvTvIE,
LetvPlaylistIE
)
from .libsyn import LibsynIE
from .lifenews import (
LifeNewsIE,
LifeEmbedIE,
)
from .liveleak import LiveLeakIE
from .livestream import (
LivestreamIE,
LivestreamOriginalIE,
LivestreamShortenerIE,
)
from .lnkgo import LnkGoIE
from .lrt import LRTIE
from .lynda import (
LyndaIE,
LyndaCourseIE
)
from .m6 import M6IE
from .macgamestore import MacGameStoreIE
from .mailru import MailRuIE
from .malemotion import MalemotionIE
from .mdr import MDRIE
from .megavideoz import MegaVideozIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .mgoon import MgoonIE
from .minhateca import MinhatecaIE
from .ministrygrid import MinistryGridIE
from .miomio import MioMioIE
from .mit import TechTVMITIE, MITIE, OCWMITIE
from .mitele import MiTeleIE
from .mixcloud import MixcloudIE
from .mlb import MLBIE
from .mpora import MporaIE
from .moevideo import MoeVideoIE
from .mofosex import MofosexIE
from .mojvideo import MojvideoIE
from .moniker import MonikerIE
from .mooshare import MooshareIE
from .morningstar import MorningstarIE
from .motherless import MotherlessIE
from .motorsport import MotorsportIE
from .movieclips import MovieClipsIE
from .moviezine import MoviezineIE
from .movshare import MovShareIE
from .mtv import (
MTVIE,
MTVServicesEmbeddedIE,
MTVIggyIE,
)
from .muenchentv import MuenchenTVIE
from .musicplayon import MusicPlayOnIE
from .musicvault import MusicVaultIE
from .muzu import MuzuTVIE
from .myspace import MySpaceIE, MySpaceAlbumIE
from .myspass import MySpassIE
from .myvideo import MyVideoIE
from .myvidster import MyVidsterIE
from .nationalgeographic import NationalGeographicIE
from .naver import NaverIE
from .nba import NBAIE
from .nbc import (
NBCIE,
NBCNewsIE,
NBCSportsIE,
NBCSportsVPlayerIE,
)
from .ndr import (
NDRIE,
NJoyIE,
)
from .ndtv import NDTVIE
from .netzkino import NetzkinoIE
from .nerdcubed import NerdCubedFeedIE
from .nerdist import NerdistIE
from .newgrounds import NewgroundsIE
from .newstube import NewstubeIE
from .nextmedia import (
NextMediaIE,
NextMediaActionNewsIE,
AppleDailyIE,
)
from .nfb import NFBIE
from .nfl import NFLIE
from .nhl import (
NHLIE,
NHLNewsIE,
NHLVideocenterIE,
)
from .niconico import NiconicoIE, NiconicoPlaylistIE
from .ninegag import NineGagIE
from .noco import NocoIE
from .normalboots import NormalbootsIE
from .nosvideo import NosVideoIE
from .nova import NovaIE
from .novamov import NovaMovIE
from .nowness import NownessIE
from .nowtv import NowTVIE
from .nowvideo import NowVideoIE
from .npo import (
NPOIE,
NPOLiveIE,
NPORadioIE,
NPORadioFragmentIE,
TegenlichtVproIE,
)
from .nrk import (
NRKIE,
NRKPlaylistIE,
NRKTVIE,
)
from .ntvde import NTVDeIE
from .ntvru import NTVRuIE
from .nytimes import (
NYTimesIE,
NYTimesArticleIE,
)
from .nuvid import NuvidIE
from .odnoklassniki import OdnoklassnikiIE
from .oktoberfesttv import OktoberfestTVIE
from .onionstudios import OnionStudiosIE
from .ooyala import (
OoyalaIE,
OoyalaExternalIE,
)
from .openfilm import OpenFilmIE
from .orf import (
ORFTVthekIE,
ORFOE1IE,
ORFFM4IE,
ORFIPTVIE,
)
from .parliamentliveuk import ParliamentLiveUKIE
from .patreon import PatreonIE
from .pbs import PBSIE
from .philharmoniedeparis import PhilharmonieDeParisIE
from .phoenix import PhoenixIE
from .photobucket import PhotobucketIE
from .pinkbike import PinkbikeIE
from .planetaplay import PlanetaPlayIE
from .pladform import PladformIE
from .played import PlayedIE
from .playfm import PlayFMIE
from .playvid import PlayvidIE
from .playwire import PlaywireIE
from .podomatic import PodomaticIE
from .porn91 import Porn91IE
from .pornhd import PornHdIE
from .pornhub import (
PornHubIE,
PornHubPlaylistIE,
)
from .pornotube import PornotubeIE
from .pornovoisines import PornoVoisinesIE
from .pornoxo import PornoXOIE
from .primesharetv import PrimeShareTVIE
from .promptfile import PromptFileIE
from .prosiebensat1 import ProSiebenSat1IE
from .puls4 import Puls4IE
from .pyvideo import PyvideoIE
from .qqmusic import (
QQMusicIE,
QQMusicSingerIE,
QQMusicAlbumIE,
QQMusicToplistIE,
)
from .quickvid import QuickVidIE
from .r7 import R7IE
from .radiode import RadioDeIE
from .radiojavan import RadioJavanIE
from .radiobremen import RadioBremenIE
from .radiofrance import RadioFranceIE
from .rai import RaiIE
from .rbmaradio import RBMARadioIE
from .redtube import RedTubeIE
from .restudy import RestudyIE
from .reverbnation import ReverbNationIE
from .ringtv import RingTVIE
from .ro220 import Ro220IE
from .rottentomatoes import RottenTomatoesIE
from .roxwel import RoxwelIE
from .rtbf import RTBFIE
from .rte import RteIE
from .rtlnl import RtlNlIE
from .rtl2 import RTL2IE
from .rtp import RTPIE
from .rts import RTSIE
from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE
from .ruhd import RUHDIE
from .rutube import (
RutubeIE,
RutubeChannelIE,
RutubeEmbedIE,
RutubeMovieIE,
RutubePersonIE,
)
from .rutv import RUTVIE
from .ruutu import RuutuIE
from .sandia import SandiaIE
from .safari import (
SafariIE,
SafariCourseIE,
)
from .sapo import SapoIE
from .savefrom import SaveFromIE
from .sbs import SBSIE
from .scivee import SciVeeIE
from .screencast import ScreencastIE
from .screencastomatic import ScreencastOMaticIE
from .screenwavemedia import ScreenwaveMediaIE, TeamFourIE
from .senateisvp import SenateISVPIE
from .servingsys import ServingSysIE
from .sexu import SexuIE
from .sexykarma import SexyKarmaIE
from .shared import SharedIE
from .sharesix import ShareSixIE
from .sina import SinaIE
from .slideshare import SlideshareIE
from .slutload import SlutloadIE
from .smotri import (
SmotriIE,
SmotriCommunityIE,
SmotriUserIE,
SmotriBroadcastIE,
)
from .snagfilms import (
SnagFilmsIE,
SnagFilmsEmbedIE,
)
from .snotr import SnotrIE
from .sohu import SohuIE
from .soompi import (
SoompiIE,
SoompiShowIE,
)
from .soundcloud import (
SoundcloudIE,
SoundcloudSetIE,
SoundcloudUserIE,
SoundcloudPlaylistIE
)
from .soundgasm import (
SoundgasmIE,
SoundgasmProfileIE
)
from .southpark import (
SouthParkIE,
SouthParkDeIE,
SouthParkDkIE,
SouthParkEsIE,
SouthParkNlIE
)
from .space import SpaceIE
from .spankbang import SpankBangIE
from .spankwire import SpankwireIE
from .spiegel import SpiegelIE, SpiegelArticleIE
from .spiegeltv import SpiegeltvIE
from .spike import SpikeIE
from .sport5 import Sport5IE
from .sportbox import (
SportBoxIE,
SportBoxEmbedIE,
)
from .sportdeutschland import SportDeutschlandIE
from .srf import SrfIE
from .srmediathek import SRMediathekIE
from .ssa import SSAIE
from .stanfordoc import StanfordOpenClassroomIE
from .steam import SteamIE
from .streamcloud import StreamcloudIE
from .streamcz import StreamCZIE
from .streetvoice import StreetVoiceIE
from .sunporno import SunPornoIE
from .svt import (
SVTIE,
SVTPlayIE,
)
from .swrmediathek import SWRMediathekIE
from .syfy import SyfyIE
from .sztvhu import SztvHuIE
from .tagesschau import TagesschauIE
from .tapely import TapelyIE
from .tass import TassIE
from .teachertube import (
TeacherTubeIE,
TeacherTubeUserIE,
)
from .teachingchannel import TeachingChannelIE
from .teamcoco import TeamcocoIE
from .techtalks import TechTalksIE
from .ted import TEDIE
from .telebruxelles import TeleBruxellesIE
from .telecinco import TelecincoIE
from .telemb import TeleMBIE
from .teletask import TeleTaskIE
from .tenplay import TenPlayIE
from .testurl import TestURLIE
from .testtube import TestTubeIE
from .tf1 import TF1IE
from .theonion import TheOnionIE
from .theplatform import ThePlatformIE
from .thesixtyone import TheSixtyOneIE
from .thisamericanlife import ThisAmericanLifeIE
from .thisav import ThisAVIE
from .tinypic import TinyPicIE
from .tlc import TlcIE, TlcDeIE
from .tmz import (
TMZIE,
TMZArticleIE,
)
from .tnaflix import (
TNAFlixIE,
EMPFlixIE,
MovieFapIE,
)
from .thvideo import (
THVideoIE,
THVideoPlaylistIE
)
from .toutv import TouTvIE
from .toypics import ToypicsUserIE, ToypicsIE
from .traileraddict import TrailerAddictIE
from .trilulilu import TriluliluIE
from .trutube import TruTubeIE
from .tube8 import Tube8IE
from .tubitv import TubiTvIE
from .tudou import TudouIE
from .tumblr import TumblrIE
from .tunein import TuneInIE
from .turbo import TurboIE
from .tutv import TutvIE
from .tv2 import (
TV2IE,
TV2ArticleIE,
)
from .tv4 import TV4IE
from .tvc import (
TVCIE,
TVCArticleIE,
)
from .tvigle import TvigleIE
from .tvp import TvpIE, TvpSeriesIE
from .tvplay import TVPlayIE
from .tweakers import TweakersIE
from .twentyfourvideo import TwentyFourVideoIE
from .twentytwotracks import (
TwentyTwoTracksIE,
TwentyTwoTracksGenreIE
)
from .twitch import (
TwitchVideoIE,
TwitchChapterIE,
TwitchVodIE,
TwitchProfileIE,
TwitchPastBroadcastsIE,
TwitchBookmarksIE,
TwitchStreamIE,
)
from .twitter import TwitterCardIE
from .ubu import UbuIE
from .udemy import (
UdemyIE,
UdemyCourseIE
)
from .udn import UDNEmbedIE
from .ultimedia import UltimediaIE
from .unistra import UnistraIE
from .urort import UrortIE
from .ustream import UstreamIE, UstreamChannelIE
from .varzesh3 import Varzesh3IE
from .vbox7 import Vbox7IE
from .veehd import VeeHDIE
from .veoh import VeohIE
from .vessel import VesselIE
from .vesti import VestiIE
from .vevo import VevoIE
from .vgtv import (
BTArticleIE,
BTVestlendingenIE,
VGTVIE,
)
from .vh1 import VH1IE
from .vice import ViceIE
from .viddler import ViddlerIE
from .videobam import VideoBamIE
from .videodetective import VideoDetectiveIE
from .videolecturesnet import VideoLecturesNetIE
from .videofyme import VideofyMeIE
from .videomega import VideoMegaIE
from .videopremium import VideoPremiumIE
from .videott import VideoTtIE
from .videoweed import VideoWeedIE
from .vidme import VidmeIE
from .vidzi import VidziIE
from .vier import VierIE, VierVideosIE
from .viewster import ViewsterIE
from .vimeo import (
VimeoIE,
VimeoAlbumIE,
VimeoChannelIE,
VimeoGroupsIE,
VimeoLikesIE,
VimeoReviewIE,
VimeoUserIE,
VimeoWatchLaterIE,
)
from .vimple import VimpleIE
from .vine import (
VineIE,
VineUserIE,
)
from .viki import (
VikiIE,
VikiChannelIE,
)
from .vk import (
VKIE,
VKUserVideosIE,
)
from .vodlocker import VodlockerIE
from .voicerepublic import VoiceRepublicIE
from .vporn import VpornIE
from .vrt import VRTIE
from .vube import VubeIE
from .vuclip import VuClipIE
from .vulture import VultureIE
from .walla import WallaIE
from .washingtonpost import WashingtonPostIE
from .wat import WatIE
from .wayofthemaster import WayOfTheMasterIE
from .wdr import (
WDRIE,
WDRMobileIE,
WDRMausIE,
)
from .webofstories import WebOfStoriesIE
from .weibo import WeiboIE
from .wimp import WimpIE
from .wistia import WistiaIE
from .worldstarhiphop import WorldStarHipHopIE
from .wrzuta import WrzutaIE
from .wsj import WSJIE
from .xbef import XBefIE
from .xboxclips import XboxClipsIE
from .xhamster import (
XHamsterIE,
XHamsterEmbedIE,
)
from .xminus import XMinusIE
from .xnxx import XNXXIE
from .xstream import XstreamIE
from .xtube import XTubeUserIE, XTubeIE
from .xuite import XuiteIE
from .xvideos import XVideosIE
from .xxxymovies import XXXYMoviesIE
from .yahoo import (
YahooIE,
YahooSearchIE,
)
from .yam import YamIE
from .yandexmusic import (
YandexMusicTrackIE,
YandexMusicAlbumIE,
YandexMusicPlaylistIE,
)
from .yesjapan import YesJapanIE
from .ynet import YnetIE
from .youjizz import YouJizzIE
from .youku import YoukuIE
from .youporn import YouPornIE
from .yourupload import YourUploadIE
from .youtube import (
YoutubeIE,
YoutubeChannelIE,
YoutubeFavouritesIE,
YoutubeHistoryIE,
YoutubePlaylistIE,
YoutubeRecommendedIE,
YoutubeSearchDateIE,
YoutubeSearchIE,
YoutubeSearchURLIE,
YoutubeShowIE,
YoutubeSubscriptionsIE,
YoutubeTruncatedIDIE,
YoutubeTruncatedURLIE,
YoutubeUserIE,
YoutubeWatchLaterIE,
)
from .zapiks import ZapiksIE
from .zdf import ZDFIE, ZDFChannelIE
from .zingmp3 import (
ZingMp3SongIE,
ZingMp3AlbumIE,
)
_ALL_CLASSES = [
klass
for name, klass in globals().items()
if name.endswith('IE') and name != 'GenericIE'
]
_ALL_CLASSES.append(GenericIE)
def gen_extractors():
""" Return a list of an instance of every supported extractor.
The order does matter; the first extractor matched is the one handling the URL.
"""
return [klass() for klass in _ALL_CLASSES]
def list_extractors(age_limit):
"""
Return a list of extractors that are suitable for the given age,
sorted by extractor ID.
"""
return sorted(
filter(lambda ie: ie.is_suitable(age_limit), gen_extractors()),
key=lambda ie: ie.IE_NAME.lower())
def get_info_extractor(ie_name):
"""Returns the info extractor class with the given ie_name"""
return globals()[ie_name + 'IE']
| unlicense |
hgl888/chromium-crosswalk-efl | third_party/closure_linter/closure_linter/requireprovidesorter.py | 84 | 11383 | #!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains logic for sorting goog.provide and goog.require statements.
Closurized JavaScript files use goog.provide and goog.require statements at the
top of the file to manage dependencies. These statements should be sorted
alphabetically, however, it is common for them to be accompanied by inline
comments or suppression annotations. In order to sort these statements without
disrupting their comments and annotations, the association between statements
and comments/annotations must be maintained while sorting.
RequireProvideSorter: Handles checking/fixing of provide/require statements.
"""
from closure_linter import javascripttokens
from closure_linter import tokenutil
# Shorthand
Type = javascripttokens.JavaScriptTokenType
class RequireProvideSorter(object):
"""Checks for and fixes alphabetization of provide and require statements.
When alphabetizing, comments on the same line or comments directly above a
goog.provide or goog.require statement are associated with that statement and
stay with the statement as it gets sorted.
"""
def CheckProvides(self, token):
"""Checks alphabetization of goog.provide statements.
Iterates over tokens in given token stream, identifies goog.provide tokens,
and checks that they occur in alphabetical order by the object being
provided.
Args:
token: A token in the token stream before any goog.provide tokens.
Returns:
The first provide token in the token stream.
None is returned if all goog.provide statements are already sorted.
"""
provide_tokens = self._GetRequireOrProvideTokens(token, 'goog.provide')
provide_strings = self._GetRequireOrProvideTokenStrings(provide_tokens)
sorted_provide_strings = sorted(provide_strings)
if provide_strings != sorted_provide_strings:
return provide_tokens[0]
return None
def CheckRequires(self, token):
"""Checks alphabetization of goog.require statements.
Iterates over tokens in given token stream, identifies goog.require tokens,
and checks that they occur in alphabetical order by the dependency being
required.
Args:
token: A token in the token stream before any goog.require tokens.
Returns:
The first require token in the token stream.
None is returned if all goog.require statements are already sorted.
"""
require_tokens = self._GetRequireOrProvideTokens(token, 'goog.require')
require_strings = self._GetRequireOrProvideTokenStrings(require_tokens)
sorted_require_strings = sorted(require_strings)
if require_strings != sorted_require_strings:
return require_tokens[0]
return None
def FixProvides(self, token):
"""Sorts goog.provide statements in the given token stream alphabetically.
Args:
token: The first token in the token stream.
"""
self._FixProvidesOrRequires(
self._GetRequireOrProvideTokens(token, 'goog.provide'))
def FixRequires(self, token):
"""Sorts goog.require statements in the given token stream alphabetically.
Args:
token: The first token in the token stream.
"""
self._FixProvidesOrRequires(
self._GetRequireOrProvideTokens(token, 'goog.require'))
def _FixProvidesOrRequires(self, tokens):
"""Sorts goog.provide or goog.require statements.
Args:
tokens: A list of goog.provide or goog.require tokens in the order they
appear in the token stream. i.e. the first token in this list must
be the first goog.provide or goog.require token.
"""
strings = self._GetRequireOrProvideTokenStrings(tokens)
sorted_strings = sorted(strings)
# Make a separate pass to remove any blank lines between goog.require/
# goog.provide tokens.
first_token = tokens[0]
last_token = tokens[-1]
i = last_token
while i != first_token and i is not None:
if i.type is Type.BLANK_LINE:
tokenutil.DeleteToken(i)
i = i.previous
# A map from required/provided object name to tokens that make up the line
# it was on, including any comments immediately before it or after it on the
# same line.
tokens_map = self._GetTokensMap(tokens)
# Iterate over the map removing all tokens.
for name in tokens_map:
tokens_to_delete = tokens_map[name]
for i in tokens_to_delete:
tokenutil.DeleteToken(i)
# Save token to rest of file. Sorted token will be inserted before this.
rest_of_file = tokens_map[strings[-1]][-1].next
# Re-add all tokens in the map in alphabetical order.
insert_after = tokens[0].previous
for string in sorted_strings:
for i in tokens_map[string]:
if rest_of_file:
tokenutil.InsertTokenBefore(i, rest_of_file)
else:
tokenutil.InsertTokenAfter(i, insert_after)
insert_after = i
def _GetRequireOrProvideTokens(self, token, token_string):
"""Gets all goog.provide or goog.require tokens in the given token stream.
Args:
token: The first token in the token stream.
token_string: One of 'goog.provide' or 'goog.require' to indicate which
tokens to find.
Returns:
A list of goog.provide or goog.require tokens in the order they appear in
the token stream.
"""
tokens = []
while token:
if token.type == Type.IDENTIFIER:
if token.string == token_string:
tokens.append(token)
elif token.string not in [
'goog.provide', 'goog.require', 'goog.setTestOnly']:
# These 3 identifiers are at the top of the file. So if any other
# identifier is encountered, return.
break
token = token.next
return tokens
def _GetRequireOrProvideTokenStrings(self, tokens):
"""Gets a list of strings corresponding to the given list of tokens.
The string will be the next string in the token stream after each token in
tokens. This is used to find the object being provided/required by a given
goog.provide or goog.require token.
Args:
tokens: A list of goog.provide or goog.require tokens.
Returns:
A list of object names that are being provided or required by the given
list of tokens. For example:
['object.a', 'object.c', 'object.b']
"""
token_strings = []
for token in tokens:
if not token.is_deleted:
name = tokenutil.GetStringAfterToken(token)
token_strings.append(name)
return token_strings
def _GetTokensMap(self, tokens):
"""Gets a map from object name to tokens associated with that object.
Starting from the goog.provide/goog.require token, searches backwards in the
token stream for any lines that start with a comment. These lines are
associated with the goog.provide/goog.require token. Also associates any
tokens on the same line as the goog.provide/goog.require token with that
token.
Args:
tokens: A list of goog.provide or goog.require tokens.
Returns:
A dictionary that maps object names to the tokens associated with the
goog.provide or goog.require of that object name. For example:
{
'object.a': [JavaScriptToken, JavaScriptToken, ...],
'object.b': [...]
}
The list of tokens includes any comment lines above the goog.provide or
goog.require statement and everything after the statement on the same
line. For example, all of the following would be associated with
'object.a':
/** @suppress {extraRequire} */
goog.require('object.a'); // Some comment.
"""
tokens_map = {}
for token in tokens:
object_name = tokenutil.GetStringAfterToken(token)
# If the previous line starts with a comment, presume that the comment
# relates to the goog.require or goog.provide and keep them together when
# sorting.
first_token = token
previous_first_token = tokenutil.GetFirstTokenInPreviousLine(first_token)
while (previous_first_token and
previous_first_token.IsAnyType(Type.COMMENT_TYPES)):
first_token = previous_first_token
previous_first_token = tokenutil.GetFirstTokenInPreviousLine(
first_token)
# Find the last token on the line.
last_token = tokenutil.GetLastTokenInSameLine(token)
all_tokens = self._GetTokenList(first_token, last_token)
tokens_map[object_name] = all_tokens
return tokens_map
def _GetTokenList(self, first_token, last_token):
"""Gets a list of all tokens from first_token to last_token, inclusive.
Args:
first_token: The first token to get.
last_token: The last token to get.
Returns:
A list of all tokens between first_token and last_token, including both
first_token and last_token.
Raises:
Exception: If the token stream ends before last_token is reached.
"""
token_list = []
token = first_token
while token != last_token:
if not token:
raise Exception('ran out of tokens')
token_list.append(token)
token = token.next
token_list.append(last_token)
return token_list
def GetFixedRequireString(self, token):
"""Get fixed/sorted order of goog.require statements.
Args:
token: The first token in the token stream.
Returns:
A string for correct sorted order of goog.require.
"""
return self._GetFixedRequireOrProvideString(
self._GetRequireOrProvideTokens(token, 'goog.require'))
def GetFixedProvideString(self, token):
"""Get fixed/sorted order of goog.provide statements.
Args:
token: The first token in the token stream.
Returns:
A string for correct sorted order of goog.provide.
"""
return self._GetFixedRequireOrProvideString(
self._GetRequireOrProvideTokens(token, 'goog.provide'))
def _GetFixedRequireOrProvideString(self, tokens):
"""Sorts goog.provide or goog.require statements.
Args:
tokens: A list of goog.provide or goog.require tokens in the order they
appear in the token stream. i.e. the first token in this list must
be the first goog.provide or goog.require token.
Returns:
A string for sorted goog.require or goog.provide statements
"""
# A map from required/provided object name to tokens that make up the line
# it was on, including any comments immediately before it or after it on the
# same line.
tokens_map = self._GetTokensMap(tokens)
sorted_strings = sorted(tokens_map.keys())
new_order = ''
for string in sorted_strings:
for i in tokens_map[string]:
new_order += i.string
if i.IsLastInLine():
new_order += '\n'
return new_order
| bsd-3-clause |
AgostonSzepessy/servo | tests/wpt/css-tests/tools/html5lib/html5lib/tests/performance/concatenation.py | 451 | 1145 | from __future__ import absolute_import, division, unicode_literals
def f1():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x += y + z
def f2():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x = x + y + z
def f3():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x = "".join((x, y, z))
def f4():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x = "%s%s%s" % (x, y, z)
import timeit
for x in range(4):
statement = "f%s" % (x + 1)
t = timeit.Timer(statement, "from __main__ import " + statement)
r = t.repeat(3, 1000000)
print(r, min(r))
| mpl-2.0 |
terbolous/SickRage | lib/hachoir_parser/archive/ace.py | 95 | 9944 | """
ACE parser
From wotsit.org and the SDK header (bitflags)
Partial study of a new block type (5) I've called "new_recovery", as its
syntax is very close to the former one (of type 2).
Status: can only read totally file and header blocks.
Author: Christophe Gisquet <christophe.gisquet@free.fr>
Creation date: 19 january 2006
"""
from hachoir_parser import Parser
from hachoir_core.field import (StaticFieldSet, FieldSet,
Bit, Bits, NullBits, RawBytes, Enum,
UInt8, UInt16, UInt32,
PascalString8, PascalString16, String,
TimeDateMSDOS32)
from hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_parser.common.msdos import MSDOSFileAttr32
MAGIC = "**ACE**"
OS_MSDOS = 0
OS_WIN32 = 2
HOST_OS = {
0: "MS-DOS",
1: "OS/2",
2: "Win32",
3: "Unix",
4: "MAC-OS",
5: "Win NT",
6: "Primos",
7: "APPLE GS",
8: "ATARI",
9: "VAX VMS",
10: "AMIGA",
11: "NEXT",
}
COMPRESSION_TYPE = {
0: "Store",
1: "Lempel-Ziv 77",
2: "ACE v2.0",
}
COMPRESSION_MODE = {
0: "fastest",
1: "fast",
2: "normal",
3: "good",
4: "best",
}
# TODO: Computing the CRC16 would also prove useful
#def markerValidate(self):
# return not self["extend"].value and self["signature"].value == MAGIC and \
# self["host_os"].value<12
class MarkerFlags(StaticFieldSet):
format = (
(Bit, "extend", "Whether the header is extended"),
(Bit, "has_comment", "Whether the archive has a comment"),
(NullBits, "unused", 7, "Reserved bits"),
(Bit, "sfx", "SFX"),
(Bit, "limited_dict", "Junior SFX with 256K dictionary"),
(Bit, "multi_volume", "Part of a set of ACE archives"),
(Bit, "has_av_string", "This header holds an AV-string"),
(Bit, "recovery_record", "Recovery record preset"),
(Bit, "locked", "Archive is locked"),
(Bit, "solid", "Archive uses solid compression")
)
def markerFlags(self):
yield MarkerFlags(self, "flags", "Marker flags")
def markerHeader(self):
yield String(self, "signature", 7, "Signature")
yield UInt8(self, "ver_extract", "Version needed to extract archive")
yield UInt8(self, "ver_created", "Version used to create archive")
yield Enum(UInt8(self, "host_os", "OS where the files were compressed"), HOST_OS)
yield UInt8(self, "vol_num", "Volume number")
yield TimeDateMSDOS32(self, "time", "Date and time (MS DOS format)")
yield Bits(self, "reserved", 64, "Reserved size for future extensions")
flags = self["flags"]
if flags["has_av_string"].value:
yield PascalString8(self, "av_string", "AV String")
if flags["has_comment"].value:
size = filesizeHandler(UInt16(self, "comment_size", "Comment size"))
yield size
if size.value > 0:
yield RawBytes(self, "compressed_comment", size.value, \
"Compressed comment")
class FileFlags(StaticFieldSet):
format = (
(Bit, "extend", "Whether the header is extended"),
(Bit, "has_comment", "Presence of file comment"),
(Bits, "unused", 10, "Unused bit flags"),
(Bit, "encrypted", "File encrypted with password"),
(Bit, "previous", "File continued from previous volume"),
(Bit, "next", "File continues on the next volume"),
(Bit, "solid", "File compressed using previously archived files")
)
def fileFlags(self):
yield FileFlags(self, "flags", "File flags")
def fileHeader(self):
yield filesizeHandler(UInt32(self, "compressed_size", "Size of the compressed file"))
yield filesizeHandler(UInt32(self, "uncompressed_size", "Uncompressed file size"))
yield TimeDateMSDOS32(self, "ftime", "Date and time (MS DOS format)")
if self["/header/host_os"].value in (OS_MSDOS, OS_WIN32):
yield MSDOSFileAttr32(self, "file_attr", "File attributes")
else:
yield textHandler(UInt32(self, "file_attr", "File attributes"), hexadecimal)
yield textHandler(UInt32(self, "file_crc32", "CRC32 checksum over the compressed file)"), hexadecimal)
yield Enum(UInt8(self, "compression_type", "Type of compression"), COMPRESSION_TYPE)
yield Enum(UInt8(self, "compression_mode", "Quality of compression"), COMPRESSION_MODE)
yield textHandler(UInt16(self, "parameters", "Compression parameters"), hexadecimal)
yield textHandler(UInt16(self, "reserved", "Reserved data"), hexadecimal)
# Filename
yield PascalString16(self, "filename", "Filename")
# Comment
if self["flags/has_comment"].value:
yield filesizeHandler(UInt16(self, "comment_size", "Size of the compressed comment"))
if self["comment_size"].value > 0:
yield RawBytes(self, "comment_data", self["comment_size"].value, "Comment data")
def fileBody(self):
size = self["compressed_size"].value
if size > 0:
yield RawBytes(self, "compressed_data", size, "Compressed data")
def fileDesc(self):
return "File entry: %s (%s)" % (self["filename"].value, self["compressed_size"].display)
def recoveryHeader(self):
yield filesizeHandler(UInt32(self, "rec_blk_size", "Size of recovery data"))
self.body_size = self["rec_blk_size"].size
yield String(self, "signature", 7, "Signature, normally '**ACE**'")
yield textHandler(UInt32(self, "relative_start",
"Relative start (to this block) of the data this block is mode of"),
hexadecimal)
yield UInt32(self, "num_blocks", "Number of blocks the data is split into")
yield UInt32(self, "size_blocks", "Size of these blocks")
yield UInt16(self, "crc16_blocks", "CRC16 over recovery data")
# size_blocks blocks of size size_blocks follow
# The ultimate data is the xor data of all those blocks
size = self["size_blocks"].value
for index in xrange(self["num_blocks"].value):
yield RawBytes(self, "data[]", size, "Recovery block %i" % index)
yield RawBytes(self, "xor_data", size, "The XOR value of the above data blocks")
def recoveryDesc(self):
return "Recovery block, size=%u" % self["body_size"].display
def newRecoveryHeader(self):
"""
This header is described nowhere
"""
if self["flags/extend"].value:
yield filesizeHandler(UInt32(self, "body_size", "Size of the unknown body following"))
self.body_size = self["body_size"].value
yield textHandler(UInt32(self, "unknown[]", "Unknown field, probably 0"),
hexadecimal)
yield String(self, "signature", 7, "Signature, normally '**ACE**'")
yield textHandler(UInt32(self, "relative_start",
"Offset (=crc16's) of this block in the file"), hexadecimal)
yield textHandler(UInt32(self, "unknown[]",
"Unknown field, probably 0"), hexadecimal)
class BaseFlags(StaticFieldSet):
format = (
(Bit, "extend", "Whether the header is extended"),
(NullBits, "unused", 15, "Unused bit flags")
)
def parseFlags(self):
yield BaseFlags(self, "flags", "Unknown flags")
def parseHeader(self):
if self["flags/extend"].value:
yield filesizeHandler(UInt32(self, "body_size", "Size of the unknown body following"))
self.body_size = self["body_size"].value
def parseBody(self):
if self.body_size > 0:
yield RawBytes(self, "body_data", self.body_size, "Body data, unhandled")
class Block(FieldSet):
TAG_INFO = {
0: ("header", "Archiver header", markerFlags, markerHeader, None),
1: ("file[]", fileDesc, fileFlags, fileHeader, fileBody),
2: ("recovery[]", recoveryDesc, recoveryHeader, None, None),
5: ("new_recovery[]", None, None, newRecoveryHeader, None)
}
def __init__(self, parent, name, description=None):
FieldSet.__init__(self, parent, name, description)
self.body_size = 0
self.desc_func = None
type = self["block_type"].value
if type in self.TAG_INFO:
self._name, desc, self.parseFlags, self.parseHeader, self.parseBody = self.TAG_INFO[type]
if desc:
if isinstance(desc, str):
self._description = desc
else:
self.desc_func = desc
else:
self.warning("Processing as unknown block block of type %u" % type)
if not self.parseFlags:
self.parseFlags = parseFlags
if not self.parseHeader:
self.parseHeader = parseHeader
if not self.parseBody:
self.parseBody = parseBody
def createFields(self):
yield textHandler(UInt16(self, "crc16", "Archive CRC16 (from byte 4 on)"), hexadecimal)
yield filesizeHandler(UInt16(self, "head_size", "Block size (from byte 4 on)"))
yield UInt8(self, "block_type", "Block type")
# Flags
for flag in self.parseFlags(self):
yield flag
# Rest of the header
for field in self.parseHeader(self):
yield field
size = self["head_size"].value - (self.current_size//8) + (2+2)
if size > 0:
yield RawBytes(self, "extra_data", size, "Extra header data, unhandled")
# Body in itself
for field in self.parseBody(self):
yield field
def createDescription(self):
if self.desc_func:
return self.desc_func(self)
else:
return "Block: %s" % self["type"].display
class AceFile(Parser):
endian = LITTLE_ENDIAN
PARSER_TAGS = {
"id": "ace",
"category": "archive",
"file_ext": ("ace",),
"mime": (u"application/x-ace-compressed",),
"min_size": 50*8,
"description": "ACE archive"
}
def validate(self):
if self.stream.readBytes(7*8, len(MAGIC)) != MAGIC:
return "Invalid magic"
return True
def createFields(self):
while not self.eof:
yield Block(self, "block[]")
| gpl-3.0 |
janicduplessis/buck | third-party/py/pex/pex/archiver.py | 55 | 1560 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import contextlib
import os
import tarfile
import zipfile
from .common import safe_mkdtemp
class Archiver(object):
class Error(Exception): pass
class UnpackError(Error): pass
class InvalidArchive(Error): pass
EXTENSIONS = {
'.tar': (tarfile.TarFile.open, tarfile.ReadError),
'.tar.gz': (tarfile.TarFile.open, tarfile.ReadError),
'.tar.bz2': (tarfile.TarFile.open, tarfile.ReadError),
'.tgz': (tarfile.TarFile.open, tarfile.ReadError),
'.zip': (zipfile.ZipFile, zipfile.BadZipfile)
}
@classmethod
def first_nontrivial_dir(cls, path):
files = os.listdir(path)
if len(files) == 1 and os.path.isdir(os.path.join(path, files[0])):
return cls.first_nontrivial_dir(os.path.join(path, files[0]))
else:
return path
@classmethod
def get_extension(cls, filename):
for ext in cls.EXTENSIONS:
if filename.endswith(ext):
return ext
@classmethod
def unpack(cls, filename, location=None):
path = location or safe_mkdtemp()
ext = cls.get_extension(filename)
if ext is None:
raise cls.InvalidArchive('Unknown archive format: %s' % filename)
archive_class, error_class = cls.EXTENSIONS[ext]
try:
with contextlib.closing(archive_class(filename)) as package:
package.extractall(path=path)
except error_class:
raise cls.UnpackError('Could not extract %s' % filename)
return cls.first_nontrivial_dir(path)
| apache-2.0 |
ThirdProject/android_external_chromium_org | third_party/python_gflags/gflags2man.py | 407 | 18864 | #!/usr/bin/env python
# Copyright (c) 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""gflags2man runs a Google flags base program and generates a man page.
Run the program, parse the output, and then format that into a man
page.
Usage:
gflags2man <program> [program] ...
"""
# TODO(csilvers): work with windows paths (\) as well as unix (/)
# This may seem a bit of an end run, but it: doesn't bloat flags, can
# support python/java/C++, supports older executables, and can be
# extended to other document formats.
# Inspired by help2man.
import os
import re
import sys
import stat
import time
import gflags
_VERSION = '0.1'
def _GetDefaultDestDir():
home = os.environ.get('HOME', '')
homeman = os.path.join(home, 'man', 'man1')
if home and os.path.exists(homeman):
return homeman
else:
return os.environ.get('TMPDIR', '/tmp')
FLAGS = gflags.FLAGS
gflags.DEFINE_string('dest_dir', _GetDefaultDestDir(),
'Directory to write resulting manpage to.'
' Specify \'-\' for stdout')
gflags.DEFINE_string('help_flag', '--help',
'Option to pass to target program in to get help')
gflags.DEFINE_integer('v', 0, 'verbosity level to use for output')
_MIN_VALID_USAGE_MSG = 9 # if fewer lines than this, help is suspect
class Logging:
"""A super-simple logging class"""
def error(self, msg): print >>sys.stderr, "ERROR: ", msg
def warn(self, msg): print >>sys.stderr, "WARNING: ", msg
def info(self, msg): print msg
def debug(self, msg): self.vlog(1, msg)
def vlog(self, level, msg):
if FLAGS.v >= level: print msg
logging = Logging()
class App:
def usage(self, shorthelp=0):
print >>sys.stderr, __doc__
print >>sys.stderr, "flags:"
print >>sys.stderr, str(FLAGS)
def run(self):
main(sys.argv)
app = App()
def GetRealPath(filename):
"""Given an executable filename, find in the PATH or find absolute path.
Args:
filename An executable filename (string)
Returns:
Absolute version of filename.
None if filename could not be found locally, absolutely, or in PATH
"""
if os.path.isabs(filename): # already absolute
return filename
if filename.startswith('./') or filename.startswith('../'): # relative
return os.path.abspath(filename)
path = os.getenv('PATH', '')
for directory in path.split(':'):
tryname = os.path.join(directory, filename)
if os.path.exists(tryname):
if not os.path.isabs(directory): # relative directory
return os.path.abspath(tryname)
return tryname
if os.path.exists(filename):
return os.path.abspath(filename)
return None # could not determine
class Flag(object):
"""The information about a single flag."""
def __init__(self, flag_desc, help):
"""Create the flag object.
Args:
flag_desc The command line forms this could take. (string)
help The help text (string)
"""
self.desc = flag_desc # the command line forms
self.help = help # the help text
self.default = '' # default value
self.tips = '' # parsing/syntax tips
class ProgramInfo(object):
"""All the information gleaned from running a program with --help."""
# Match a module block start, for python scripts --help
# "goopy.logging:"
module_py_re = re.compile(r'(\S.+):$')
# match the start of a flag listing
# " -v,--verbosity: Logging verbosity"
flag_py_re = re.compile(r'\s+(-\S+):\s+(.*)$')
# " (default: '0')"
flag_default_py_re = re.compile(r'\s+\(default:\s+\'(.*)\'\)$')
# " (an integer)"
flag_tips_py_re = re.compile(r'\s+\((.*)\)$')
# Match a module block start, for c++ programs --help
# "google/base/commandlineflags":
module_c_re = re.compile(r'\s+Flags from (\S.+):$')
# match the start of a flag listing
# " -v,--verbosity: Logging verbosity"
flag_c_re = re.compile(r'\s+(-\S+)\s+(.*)$')
# Match a module block start, for java programs --help
# "com.google.common.flags"
module_java_re = re.compile(r'\s+Flags for (\S.+):$')
# match the start of a flag listing
# " -v,--verbosity: Logging verbosity"
flag_java_re = re.compile(r'\s+(-\S+)\s+(.*)$')
def __init__(self, executable):
"""Create object with executable.
Args:
executable Program to execute (string)
"""
self.long_name = executable
self.name = os.path.basename(executable) # name
# Get name without extension (PAR files)
(self.short_name, self.ext) = os.path.splitext(self.name)
self.executable = GetRealPath(executable) # name of the program
self.output = [] # output from the program. List of lines.
self.desc = [] # top level description. List of lines
self.modules = {} # { section_name(string), [ flags ] }
self.module_list = [] # list of module names in their original order
self.date = time.localtime(time.time()) # default date info
def Run(self):
"""Run it and collect output.
Returns:
1 (true) If everything went well.
0 (false) If there were problems.
"""
if not self.executable:
logging.error('Could not locate "%s"' % self.long_name)
return 0
finfo = os.stat(self.executable)
self.date = time.localtime(finfo[stat.ST_MTIME])
logging.info('Running: %s %s </dev/null 2>&1'
% (self.executable, FLAGS.help_flag))
# --help output is often routed to stderr, so we combine with stdout.
# Re-direct stdin to /dev/null to encourage programs that
# don't understand --help to exit.
(child_stdin, child_stdout_and_stderr) = os.popen4(
[self.executable, FLAGS.help_flag])
child_stdin.close() # '</dev/null'
self.output = child_stdout_and_stderr.readlines()
child_stdout_and_stderr.close()
if len(self.output) < _MIN_VALID_USAGE_MSG:
logging.error('Error: "%s %s" returned only %d lines: %s'
% (self.name, FLAGS.help_flag,
len(self.output), self.output))
return 0
return 1
def Parse(self):
"""Parse program output."""
(start_line, lang) = self.ParseDesc()
if start_line < 0:
return
if 'python' == lang:
self.ParsePythonFlags(start_line)
elif 'c' == lang:
self.ParseCFlags(start_line)
elif 'java' == lang:
self.ParseJavaFlags(start_line)
def ParseDesc(self, start_line=0):
"""Parse the initial description.
This could be Python or C++.
Returns:
(start_line, lang_type)
start_line Line to start parsing flags on (int)
lang_type Either 'python' or 'c'
(-1, '') if the flags start could not be found
"""
exec_mod_start = self.executable + ':'
after_blank = 0
start_line = 0 # ignore the passed-in arg for now (?)
for start_line in range(start_line, len(self.output)): # collect top description
line = self.output[start_line].rstrip()
# Python flags start with 'flags:\n'
if ('flags:' == line
and len(self.output) > start_line+1
and '' == self.output[start_line+1].rstrip()):
start_line += 2
logging.debug('Flags start (python): %s' % line)
return (start_line, 'python')
# SWIG flags just have the module name followed by colon.
if exec_mod_start == line:
logging.debug('Flags start (swig): %s' % line)
return (start_line, 'python')
# C++ flags begin after a blank line and with a constant string
if after_blank and line.startswith(' Flags from '):
logging.debug('Flags start (c): %s' % line)
return (start_line, 'c')
# java flags begin with a constant string
if line == 'where flags are':
logging.debug('Flags start (java): %s' % line)
start_line += 2 # skip "Standard flags:"
return (start_line, 'java')
logging.debug('Desc: %s' % line)
self.desc.append(line)
after_blank = (line == '')
else:
logging.warn('Never found the start of the flags section for "%s"!'
% self.long_name)
return (-1, '')
def ParsePythonFlags(self, start_line=0):
"""Parse python/swig style flags."""
modname = None # name of current module
modlist = []
flag = None
for line_num in range(start_line, len(self.output)): # collect flags
line = self.output[line_num].rstrip()
if not line: # blank
continue
mobj = self.module_py_re.match(line)
if mobj: # start of a new module
modname = mobj.group(1)
logging.debug('Module: %s' % line)
if flag:
modlist.append(flag)
self.module_list.append(modname)
self.modules.setdefault(modname, [])
modlist = self.modules[modname]
flag = None
continue
mobj = self.flag_py_re.match(line)
if mobj: # start of a new flag
if flag:
modlist.append(flag)
logging.debug('Flag: %s' % line)
flag = Flag(mobj.group(1), mobj.group(2))
continue
if not flag: # continuation of a flag
logging.error('Flag info, but no current flag "%s"' % line)
mobj = self.flag_default_py_re.match(line)
if mobj: # (default: '...')
flag.default = mobj.group(1)
logging.debug('Fdef: %s' % line)
continue
mobj = self.flag_tips_py_re.match(line)
if mobj: # (tips)
flag.tips = mobj.group(1)
logging.debug('Ftip: %s' % line)
continue
if flag and flag.help:
flag.help += line # multiflags tack on an extra line
else:
logging.info('Extra: %s' % line)
if flag:
modlist.append(flag)
def ParseCFlags(self, start_line=0):
"""Parse C style flags."""
modname = None # name of current module
modlist = []
flag = None
for line_num in range(start_line, len(self.output)): # collect flags
line = self.output[line_num].rstrip()
if not line: # blank lines terminate flags
if flag: # save last flag
modlist.append(flag)
flag = None
continue
mobj = self.module_c_re.match(line)
if mobj: # start of a new module
modname = mobj.group(1)
logging.debug('Module: %s' % line)
if flag:
modlist.append(flag)
self.module_list.append(modname)
self.modules.setdefault(modname, [])
modlist = self.modules[modname]
flag = None
continue
mobj = self.flag_c_re.match(line)
if mobj: # start of a new flag
if flag: # save last flag
modlist.append(flag)
logging.debug('Flag: %s' % line)
flag = Flag(mobj.group(1), mobj.group(2))
continue
# append to flag help. type and default are part of the main text
if flag:
flag.help += ' ' + line.strip()
else:
logging.info('Extra: %s' % line)
if flag:
modlist.append(flag)
def ParseJavaFlags(self, start_line=0):
"""Parse Java style flags (com.google.common.flags)."""
# The java flags prints starts with a "Standard flags" "module"
# that doesn't follow the standard module syntax.
modname = 'Standard flags' # name of current module
self.module_list.append(modname)
self.modules.setdefault(modname, [])
modlist = self.modules[modname]
flag = None
for line_num in range(start_line, len(self.output)): # collect flags
line = self.output[line_num].rstrip()
logging.vlog(2, 'Line: "%s"' % line)
if not line: # blank lines terminate module
if flag: # save last flag
modlist.append(flag)
flag = None
continue
mobj = self.module_java_re.match(line)
if mobj: # start of a new module
modname = mobj.group(1)
logging.debug('Module: %s' % line)
if flag:
modlist.append(flag)
self.module_list.append(modname)
self.modules.setdefault(modname, [])
modlist = self.modules[modname]
flag = None
continue
mobj = self.flag_java_re.match(line)
if mobj: # start of a new flag
if flag: # save last flag
modlist.append(flag)
logging.debug('Flag: %s' % line)
flag = Flag(mobj.group(1), mobj.group(2))
continue
# append to flag help. type and default are part of the main text
if flag:
flag.help += ' ' + line.strip()
else:
logging.info('Extra: %s' % line)
if flag:
modlist.append(flag)
def Filter(self):
"""Filter parsed data to create derived fields."""
if not self.desc:
self.short_desc = ''
return
for i in range(len(self.desc)): # replace full path with name
if self.desc[i].find(self.executable) >= 0:
self.desc[i] = self.desc[i].replace(self.executable, self.name)
self.short_desc = self.desc[0]
word_list = self.short_desc.split(' ')
all_names = [ self.name, self.short_name, ]
# Since the short_desc is always listed right after the name,
# trim it from the short_desc
while word_list and (word_list[0] in all_names
or word_list[0].lower() in all_names):
del word_list[0]
self.short_desc = '' # signal need to reconstruct
if not self.short_desc and word_list:
self.short_desc = ' '.join(word_list)
class GenerateDoc(object):
"""Base class to output flags information."""
def __init__(self, proginfo, directory='.'):
"""Create base object.
Args:
proginfo A ProgramInfo object
directory Directory to write output into
"""
self.info = proginfo
self.dirname = directory
def Output(self):
"""Output all sections of the page."""
self.Open()
self.Header()
self.Body()
self.Footer()
def Open(self): raise NotImplementedError # define in subclass
def Header(self): raise NotImplementedError # define in subclass
def Body(self): raise NotImplementedError # define in subclass
def Footer(self): raise NotImplementedError # define in subclass
class GenerateMan(GenerateDoc):
"""Output a man page."""
def __init__(self, proginfo, directory='.'):
"""Create base object.
Args:
proginfo A ProgramInfo object
directory Directory to write output into
"""
GenerateDoc.__init__(self, proginfo, directory)
def Open(self):
if self.dirname == '-':
logging.info('Writing to stdout')
self.fp = sys.stdout
else:
self.file_path = '%s.1' % os.path.join(self.dirname, self.info.name)
logging.info('Writing: %s' % self.file_path)
self.fp = open(self.file_path, 'w')
def Header(self):
self.fp.write(
'.\\" DO NOT MODIFY THIS FILE! It was generated by gflags2man %s\n'
% _VERSION)
self.fp.write(
'.TH %s "1" "%s" "%s" "User Commands"\n'
% (self.info.name, time.strftime('%x', self.info.date), self.info.name))
self.fp.write(
'.SH NAME\n%s \\- %s\n' % (self.info.name, self.info.short_desc))
self.fp.write(
'.SH SYNOPSIS\n.B %s\n[\\fIFLAGS\\fR]...\n' % self.info.name)
def Body(self):
self.fp.write(
'.SH DESCRIPTION\n.\\" Add any additional description here\n.PP\n')
for ln in self.info.desc:
self.fp.write('%s\n' % ln)
self.fp.write(
'.SH OPTIONS\n')
# This shows flags in the original order
for modname in self.info.module_list:
if modname.find(self.info.executable) >= 0:
mod = modname.replace(self.info.executable, self.info.name)
else:
mod = modname
self.fp.write('\n.P\n.I %s\n' % mod)
for flag in self.info.modules[modname]:
help_string = flag.help
if flag.default or flag.tips:
help_string += '\n.br\n'
if flag.default:
help_string += ' (default: \'%s\')' % flag.default
if flag.tips:
help_string += ' (%s)' % flag.tips
self.fp.write(
'.TP\n%s\n%s\n' % (flag.desc, help_string))
def Footer(self):
self.fp.write(
'.SH COPYRIGHT\nCopyright \(co %s Google.\n'
% time.strftime('%Y', self.info.date))
self.fp.write('Gflags2man created this page from "%s %s" output.\n'
% (self.info.name, FLAGS.help_flag))
self.fp.write('\nGflags2man was written by Dan Christian. '
' Note that the date on this'
' page is the modification date of %s.\n' % self.info.name)
def main(argv):
argv = FLAGS(argv) # handles help as well
if len(argv) <= 1:
app.usage(shorthelp=1)
return 1
for arg in argv[1:]:
prog = ProgramInfo(arg)
if not prog.Run():
continue
prog.Parse()
prog.Filter()
doc = GenerateMan(prog, FLAGS.dest_dir)
doc.Output()
return 0
if __name__ == '__main__':
app.run()
| bsd-3-clause |
joone/chromium-crosswalk | tools/grit/grit/format/resource_map_unittest.py | 16 | 10682 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.format.resource_map'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import StringIO
import unittest
from grit import grd_reader
from grit import util
from grit.format import resource_map
class FormatResourceMapUnittest(unittest.TestCase):
def testFormatResourceMap(self):
grd = grd_reader.Parse(StringIO.StringIO(
'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en" current_release="3"
base_dir=".">
<outputs>
<output type="rc_header" filename="the_rc_header.h" />
<output type="resource_map_header"
filename="the_resource_map_header.h" />
</outputs>
<release seq="3">
<structures first_id="300">
<structure type="menu" name="IDC_KLONKMENU"
file="grit\\testdata\\klonk.rc" encoding="utf-16" />
</structures>
<includes first_id="10000">
<include type="foo" file="abc" name="IDS_FIRSTPRESENT" />
<if expr="False">
<include type="foo" file="def" name="IDS_MISSING" />
</if>
<if expr="lang != 'es'">
<include type="foo" file="ghi" name="IDS_LANGUAGESPECIFIC" />
</if>
<if expr="lang == 'es'">
<include type="foo" file="jkl" name="IDS_LANGUAGESPECIFIC" />
</if>
<include type="foo" file="mno" name="IDS_THIRDPRESENT" />
</includes>
</release>
</grit>'''), util.PathFromRoot('.'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_header')(grd, 'en', '.')))
self.assertEqual('''\
#include <stddef.h>
#ifndef GRIT_RESOURCE_MAP_STRUCT_
#define GRIT_RESOURCE_MAP_STRUCT_
struct GritResourceMap {
const char* const name;
int value;
};
#endif // GRIT_RESOURCE_MAP_STRUCT_
extern const GritResourceMap kTheRcHeader[];
extern const size_t kTheRcHeaderSize;''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/macros.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"IDC_KLONKMENU", IDC_KLONKMENU},
{"IDS_FIRSTPRESENT", IDS_FIRSTPRESENT},
{"IDS_MISSING", IDS_MISSING},
{"IDS_LANGUAGESPECIFIC", IDS_LANGUAGESPECIFIC},
{"IDS_THIRDPRESENT", IDS_THIRDPRESENT},
};
const size_t kTheRcHeaderSize = arraysize(kTheRcHeader);''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_file_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/macros.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"grit/testdata/klonk.rc", IDC_KLONKMENU},
{"abc", IDS_FIRSTPRESENT},
{"def", IDS_MISSING},
{"ghi", IDS_LANGUAGESPECIFIC},
{"jkl", IDS_LANGUAGESPECIFIC},
{"mno", IDS_THIRDPRESENT},
};
const size_t kTheRcHeaderSize = arraysize(kTheRcHeader);''', output)
def testFormatResourceMapWithOutputAllEqualsFalseForStructures(self):
grd = grd_reader.Parse(StringIO.StringIO(
'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en" current_release="3"
base_dir="." output_all_resource_defines="false">
<outputs>
<output type="rc_header" filename="the_rc_header.h" />
<output type="resource_map_header"
filename="the_resource_map_header.h" />
<output type="resource_map_source"
filename="the_resource_map_header.cc" />
</outputs>
<release seq="3">
<structures first_id="300">
<structure type="chrome_scaled_image" name="IDR_KLONKMENU"
file="foo.png" />
<if expr="False">
<structure type="chrome_scaled_image" name="IDR_MISSING"
file="bar.png" />
</if>
</structures>
</release>
</grit>'''), util.PathFromRoot('.'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_header')(grd, 'en', '.')))
self.assertEqual('''\
#include <stddef.h>
#ifndef GRIT_RESOURCE_MAP_STRUCT_
#define GRIT_RESOURCE_MAP_STRUCT_
struct GritResourceMap {
const char* const name;
int value;
};
#endif // GRIT_RESOURCE_MAP_STRUCT_
extern const GritResourceMap kTheRcHeader[];
extern const size_t kTheRcHeaderSize;''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/macros.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"IDR_KLONKMENU", IDR_KLONKMENU},
};
const size_t kTheRcHeaderSize = arraysize(kTheRcHeader);''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/macros.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"IDR_KLONKMENU", IDR_KLONKMENU},
};
const size_t kTheRcHeaderSize = arraysize(kTheRcHeader);''', output)
def testFormatResourceMapWithOutputAllEqualsFalseForIncludes(self):
grd = grd_reader.Parse(StringIO.StringIO(
'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en" current_release="3"
base_dir="." output_all_resource_defines="false">
<outputs>
<output type="rc_header" filename="the_rc_header.h" />
<output type="resource_map_header"
filename="the_resource_map_header.h" />
</outputs>
<release seq="3">
<structures first_id="300">
<structure type="menu" name="IDC_KLONKMENU"
file="grit\\testdata\\klonk.rc" encoding="utf-16" />
</structures>
<includes first_id="10000">
<include type="foo" file="abc" name="IDS_FIRSTPRESENT" />
<if expr="False">
<include type="foo" file="def" name="IDS_MISSING" />
</if>
<include type="foo" file="mno" name="IDS_THIRDPRESENT" />
</includes>
</release>
</grit>'''), util.PathFromRoot('.'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_header')(grd, 'en', '.')))
self.assertEqual('''\
#include <stddef.h>
#ifndef GRIT_RESOURCE_MAP_STRUCT_
#define GRIT_RESOURCE_MAP_STRUCT_
struct GritResourceMap {
const char* const name;
int value;
};
#endif // GRIT_RESOURCE_MAP_STRUCT_
extern const GritResourceMap kTheRcHeader[];
extern const size_t kTheRcHeaderSize;''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/macros.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"IDC_KLONKMENU", IDC_KLONKMENU},
{"IDS_FIRSTPRESENT", IDS_FIRSTPRESENT},
{"IDS_THIRDPRESENT", IDS_THIRDPRESENT},
};
const size_t kTheRcHeaderSize = arraysize(kTheRcHeader);''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_file_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_resource_map_header.h"
#include <stddef.h>
#include "base/macros.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"grit/testdata/klonk.rc", IDC_KLONKMENU},
{"abc", IDS_FIRSTPRESENT},
{"mno", IDS_THIRDPRESENT},
};
const size_t kTheRcHeaderSize = arraysize(kTheRcHeader);''', output)
def testFormatStringResourceMap(self):
grd = grd_reader.Parse(StringIO.StringIO(
'''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en" current_release="3"
base_dir=".">
<outputs>
<output type="rc_header" filename="the_rc_header.h" />
<output type="resource_map_header" filename="the_rc_map_header.h" />
<output type="resource_map_source" filename="the_rc_map_source.cc" />
</outputs>
<release seq="1" allow_pseudo="false">
<messages fallback_to_english="true">
<message name="IDS_PRODUCT_NAME" desc="The application name">
Application
</message>
<if expr="True">
<message name="IDS_DEFAULT_TAB_TITLE_TITLE_CASE"
desc="In Title Case: The default title in a tab.">
New Tab
</message>
</if>
<if expr="False">
<message name="IDS_DEFAULT_TAB_TITLE"
desc="The default title in a tab.">
New tab
</message>
</if>
</messages>
</release>
</grit>'''), util.PathFromRoot('.'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_header')(grd, 'en', '.')))
self.assertEqual('''\
#include <stddef.h>
#ifndef GRIT_RESOURCE_MAP_STRUCT_
#define GRIT_RESOURCE_MAP_STRUCT_
struct GritResourceMap {
const char* const name;
int value;
};
#endif // GRIT_RESOURCE_MAP_STRUCT_
extern const GritResourceMap kTheRcHeader[];
extern const size_t kTheRcHeaderSize;''', output)
output = util.StripBlankLinesAndComments(''.join(
resource_map.GetFormatter('resource_map_source')(grd, 'en', '.')))
self.assertEqual('''\
#include "the_rc_map_header.h"
#include <stddef.h>
#include "base/macros.h"
#include "the_rc_header.h"
const GritResourceMap kTheRcHeader[] = {
{"IDS_PRODUCT_NAME", IDS_PRODUCT_NAME},
{"IDS_DEFAULT_TAB_TITLE_TITLE_CASE", IDS_DEFAULT_TAB_TITLE_TITLE_CASE},
};
const size_t kTheRcHeaderSize = arraysize(kTheRcHeader);''', output)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
vrjuggler/maestro | maestro/gui/EnsembleModel.py | 2 | 6008 | # Maestro is Copyright (C) 2006-2008 by Infiscape Corporation
#
# Original Author: Aron Bierbaum
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from PyQt4 import QtCore, QtGui
import maestro.core
const = maestro.core.const
class EnsembleModel(QtCore.QAbstractListModel):
ensemble_mime_type = 'application/maestro-cluster-nodes'
def __init__(self, ensemble, parent=None):
QtCore.QAbstractListModel.__init__(self, parent)
# Set the new ensemble configuration.
self.mEnsemble = ensemble
# Connect the new ensemble.
self.connect(self.mEnsemble, QtCore.SIGNAL("ensembleChanged"), self.onEnsembleChanged)
self.connect(self.mEnsemble, QtCore.SIGNAL("nodeChanged"), self.onNodeChanged)
def onNodeChanged(self, node):
""" Slot that is called when a node's state changes. If the currently
selected node changes, we need to update the target list and the
current default target.
@param nodeId: The id of the node that changed.
"""
if node in self.mEnsemble.mNodes:
node_index = self.mEnsemble.mNodes.index(node)
changed_index = self.index(node_index)
self.emit(QtCore.SIGNAL("dataChanged(QModelIndex,QModelIndex)"),
changed_index, changed_index)
def onEnsembleChanged(self):
""" Slot that is called when the ensemble has changed. This will
force all views to be updated.
"""
self.emit(QtCore.SIGNAL("modelReset()"))
def flags(self, index):
default_flags = QtCore.QAbstractListModel.flags(self, index)
default_flags |= QtCore.Qt.ItemIsEditable
if index.isValid():
return QtCore.Qt.ItemIsDragEnabled | QtCore.Qt.ItemIsDropEnabled | default_flags
else:
return QtCore.Qt.ItemIsDropEnabled | default_flags
def data(self, index, role=QtCore.Qt.DisplayRole):
""" Returns the data representation of each node in the cluster.
"""
if not index.isValid():
return QtCore.QVariant()
# Get the cluster node we want data for.
cluster_node = self.mEnsemble.getNode(index.row())
if role == QtCore.Qt.UserRole:
return cluster_node
if cluster_node is not None:
# Return an icon representing the operating system.
if role == QtCore.Qt.DecorationRole:
if const.mOsIcons.has_key(cluster_node.getState()):
return QtCore.QVariant(const.mOsIcons[cluster_node.getState()])
else:
return QtCore.QVariant()
# Return the name of the node.
elif role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
return QtCore.QVariant(str(cluster_node.getName()))
elif role == QtCore.Qt.UserRole:
return cluster_node
return QtCore.QVariant()
def setData(self, index, value, role):
""" Set the name of the cluster node at the given index. """
if not index.isValid():
return False
if role == QtCore.Qt.EditRole and index.row() < self.rowCount():
cluster_node = self.mEnsemble.getNode(index.row())
if cluster_node is not None:
new_name = str(value.toString())
cluster_node.setName(new_name)
self.emit(QtCore.SIGNAL("dataChanged(QModelIndex,QModelIndex)"), index, index)
self.emit(QtCore.SIGNAL("dataChanged(int)"), index.row())
return True
return False
def supportedDropActions(self):
# Hold shift when copying to change drag modes.
return (QtCore.Qt.CopyAction | QtCore.Qt.MoveAction)
def mimeTypes(self):
""" List of types we can represent. """
types = QtCore.QStringList()
types.append(EnsembleModel.ensemble_mime_type)
return types
def mimeData(self, indexes):
node_list_str = ''
for index in indexes:
if index.isValid():
node_list_str += str(index.row()) + ','
node_list_str = node_list_str.rstrip(',')
mime_data = QtCore.QMimeData()
text = "maestro-node-ids:%s" % node_list_str
mime_data.setData(EnsembleModel.ensemble_mime_type, text)
return mime_data
def dropMimeData(self, mimeData, action, row, column, parent):
""" Called when we drop a node.
if row and col == (-1,-1) then just need to parent the node.
Otherwise, the row is saying which child number we would like to be.
"""
if not parent.isValid():
return False
if not mimeData.hasFormat(EnsembleModel.ensemble_mime_type):
return False
if action == QtCore.Qt.IgnoreAction:
return True
if column > 0:
return False
# Get node index list out of mime data.
data = str(mimeData.data(EnsembleModel.ensemble_mime_type))
(data_type, node_rows) = data.split(":")
for row_str in node_rows.split(','):
row = int(row_str)
node = self.mEnsemble.getNode(row)
new_index = parent.row()
self.mEnsemble.moveNode(node, new_index)
return True
def rowCount(self, parent=QtCore.QModelIndex()):
""" Returns the number of nodes in the current cluster configuration.
"""
# If the parent is not valid, then we have no children.
if parent.isValid():
return 0
else:
return self.mEnsemble.getNumNodes()
| gpl-2.0 |
beacloudgenius/edx-platform | common/djangoapps/config_models/admin.py | 83 | 2777 | """
Admin site models for managing :class:`.ConfigurationModel` subclasses
"""
from django.forms import models
from django.contrib import admin
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
# pylint: disable=protected-access
class ConfigurationModelAdmin(admin.ModelAdmin):
"""
:class:`~django.contrib.admin.ModelAdmin` for :class:`.ConfigurationModel` subclasses
"""
date_hierarchy = 'change_date'
def get_actions(self, request):
return {
'revert': (ConfigurationModelAdmin.revert, 'revert', 'Revert to the selected configuration')
}
def get_list_display(self, request):
return self.model._meta.get_all_field_names()
# Don't allow deletion of configuration
def has_delete_permission(self, request, obj=None):
return False
# Make all fields read-only when editing an object
def get_readonly_fields(self, request, obj=None):
if obj: # editing an existing object
return self.model._meta.get_all_field_names()
return self.readonly_fields
def add_view(self, request, form_url='', extra_context=None):
# Prepopulate new configuration entries with the value of the current config
get = request.GET.copy()
get.update(models.model_to_dict(self.model.current()))
request.GET = get
return super(ConfigurationModelAdmin, self).add_view(request, form_url, extra_context)
# Hide the save buttons in the change view
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
extra_context['readonly'] = True
return super(ConfigurationModelAdmin, self).change_view(
request,
object_id,
form_url,
extra_context=extra_context
)
def save_model(self, request, obj, form, change):
obj.changed_by = request.user
super(ConfigurationModelAdmin, self).save_model(request, obj, form, change)
def revert(self, request, queryset):
"""
Admin action to revert a configuration back to the selected value
"""
if queryset.count() != 1:
self.message_user(request, "Please select a single configuration to revert to.")
return
target = queryset[0]
target.id = None
self.save_model(request, target, None, False)
self.message_user(request, "Reverted configuration.")
return HttpResponseRedirect(
reverse(
'admin:{}_{}_change'.format(
self.model._meta.app_label,
self.model._meta.module_name,
),
args=(target.id,),
)
)
| agpl-3.0 |
skearnes/pylearn2 | pylearn2/gui/patch_viewer.py | 4 | 16076 | """
Functionality for display and saving images of collections of images patches.
"""
import numpy as np
from pylearn2.datasets.dense_design_matrix import DefaultViewConverter
from pylearn2.utils.image import Image, ensure_Image
from pylearn2.utils.image import show
from pylearn2.utils import py_integer_types
import warnings
def make_viewer(mat, grid_shape=None, patch_shape=None,
activation=None, pad=None, is_color = False, rescale = True):
"""
Given filters in rows, guesses dimensions of patches
and nice dimensions for the PatchViewer and returns a PatchViewer
containing visualizations of the filters.
Parameters
----------
mat : ndarray
Values should lie in [-1, 1] if `rescale` is False.
0. always indicates medium gray, with negative values drawn as
blacker and positive values drawn as whiter.
A matrix with each row being a different image patch, OR
a 4D tensor in ('b', 0, 1, 'c') format.
If matrix, we assume it was flattened using the same procedure as a
('b', 0, 1, 'c') DefaultViewConverter uses.
grid_shape : tuple, optional
A tuple of two ints specifying the shape of the grad in the
PatchViewer, in (rows, cols) format. If not specified, this
function does its best to choose an aesthetically pleasing
value.
patch_shape : tupe, optional
A tuple of two ints specifying the shape of the patch.
If `mat` is 4D, this function gets the patch shape from the shape of
`mat`. If `mat` is 2D and patch_shape is not specified, this function
assumes the patches are perfectly square.
activation : iterable
An iterable collection describing some kind of activation value
associated with each patch. This is indicated with a border around the
patch whose color intensity increases with activation value.
The individual activation values may be single floats to draw one
border or iterable collections of floats to draw multiple borders with
differing intensities around the patch.
pad : int, optional
The amount of padding to add between patches in the displayed image.
is_color : int
If True, assume the images are in color.
Note needed if `mat` is in ('b', 0, 1, 'c') format since we can just
look at its shape[-1].
rescale : bool
If True, rescale each patch so that its highest magnitude pixel
reaches a value of either 0 or 1 depending on the sign of that pixel.
Returns
-------
patch_viewer : PatchViewer
A PatchViewer containing the patches stored in `mat`.
"""
num_channels = 1
if is_color:
num_channels = 3
if grid_shape is None:
grid_shape = PatchViewer.pick_shape(mat.shape[0] )
if mat.ndim > 2:
patch_shape = mat.shape[1:3]
topo_view = mat
num_channels = mat.shape[3]
is_color = num_channels > 1
else:
if patch_shape is None:
assert mat.shape[1] % num_channels == 0
patch_shape = PatchViewer.pick_shape(mat.shape[1] / num_channels,
exact = True)
assert mat.shape[1] == (patch_shape[0] *
patch_shape[1] *
num_channels)
topo_shape = (patch_shape[0], patch_shape[1], num_channels)
view_converter = DefaultViewConverter(topo_shape)
topo_view = view_converter.design_mat_to_topo_view(mat)
rval = PatchViewer(grid_shape, patch_shape, pad=pad, is_color = is_color)
for i in xrange(mat.shape[0]):
if activation is not None:
if hasattr(activation[0], '__iter__'):
act = [a[i] for a in activation]
else:
act = activation[i]
else:
act = None
patch = topo_view[i, :]
rval.add_patch(patch, rescale=rescale,
activation=act)
return rval
class PatchViewer(object):
"""
A class for viewing collections of image patches arranged in a grid.
Parameters
----------
grid_shape : tuple
A tuple in format (rows, cols), in units of patches. This determines
the size of the display. e.g. if you want to display 100 patches at
a time you might use (10, 10).
patch_shape : tuple
A tuple in format (rows, cols), in units of pixels. The patches must
be at most this large. It will be possible to display smaller patches
in this `PatchViewer`, but each patch will appear in the center of a
rectangle of this size.
is_color : bool
Whether the PatchViewer should be color (True) or grayscale (False)
pad : tuple
Tuple of ints in the form (pad vertical, pad horizontal). Number of
pixels to put between each patch in each direction.
background : float or 3-tuple
The color of the background of the display. Either a float in [0, 1]
if `is_color` is `False` or a 3-tuple/3d ndarray array of floats in
[0, 1] for RGB color if `is_color` is `True`.
"""
def __init__(self, grid_shape, patch_shape, is_color=False, pad = None,
background = None ):
if background is None:
if is_color:
background = np.zeros((3,))
else:
background = 0.
self.background = background
assert len(grid_shape) == 2
assert len(patch_shape) == 2
for shape in [grid_shape, patch_shape]:
for elem in shape:
if not isinstance(elem, py_integer_types):
raise ValueError("Expected grid_shape and patch_shape to"
"be pairs of ints, but they are %s and "
"%s respectively." % (str(grid_shape),
str(patch_shape)))
self.is_color = is_color
if pad is None:
self.pad = (5, 5)
else:
self.pad = pad
# these are the colors of the activation shells
self.colors = [np.asarray([1, 1, 0]),
np.asarray([1, 0, 1]),
np.asarray([0, 1, 0])]
height = (self.pad[0] * (1 + grid_shape[0]) + grid_shape[0] *
patch_shape[0])
width = (self.pad[1] * (1 + grid_shape[1]) + grid_shape[1] *
patch_shape[1])
self.patch_shape = patch_shape
self.grid_shape = grid_shape
image_shape = (height, width, 3)
self.image = np.zeros(image_shape)
assert self.image.shape[1] == (self.pad[1] *
(1 + self.grid_shape[1]) +
self.grid_shape[1] *
self.patch_shape[1])
self.cur_pos = (0, 0)
#needed to render in the background color
self.clear()
def clear(self):
"""
.. todo::
WRITEME
"""
if self.is_color:
for i in xrange(3):
self.image[:, :, i] = self.background[i] * .5 + .5
else:
self.image[:] = self.background * .5 + .5
self.cur_pos = (0, 0)
#0 is perfect gray. If not rescale, assumes images are in [-1,1]
def add_patch(self, patch, rescale=True, recenter=True, activation=None,
warn_blank_patch = True):
"""
Adds an image patch to the `PatchViewer`.
Patches are added left to right, top to bottom. If this method is
called when the `PatchViewer` is already full, it will clear the
viewer and start adding patches at the upper left again.
Parameters
----------
patch : ndarray
If this `PatchViewer` is in color (controlled by the `is_color`
parameter of the constructor) `patch` should be a 3D ndarray, with
the first axis being the rows of the image, the second axis
being the columsn of the image, and the third being RGB color
channels.
If this `PatchViewer` is grayscale, `patch` should be either a
3D ndarray with the third axis having length 1, or a 2D ndarray.
The values of the ndarray should be floating point. 0 is displayed
as gray. Negative numbers are displayed as blacker. Positive
numbers are displayed as whiter. See the `rescale` parameter for
more detail. This color convention was chosen because it is useful
for displaying weight matrices.
rescale : bool
If True, the maximum absolute value of a pixel in `patch` sets the
scale, so that abs(patch).max() is absolute white and
-abs(patch).max() is absolute black.
If False, `patch` should lie in [-1, 1].
recenter : bool
If True (default), if `patch` has smaller dimensions than were
specified to the constructor's `patch_shape` argument, we will
display the patch in the center of the area allocated to it in
the display grid.
If False, we will raise an exception if `patch` is not exactly
the specified shape.
activation : WRITEME
WRITEME
warn_blank_patch : WRITEME
WRITEME
"""
if warn_blank_patch and \
(patch.min() == patch.max()) and \
(rescale or patch.min() == 0.0):
warnings.warn("displaying totally blank patch")
if self.is_color:
assert patch.ndim == 3
if not (patch.shape[-1] == 3):
raise ValueError("Expected color image to have shape[-1]=3, "
"but shape[-1] is " + str(patch.shape[-1]))
else:
assert patch.ndim in [2, 3]
if patch.ndim == 3:
if patch.shape[-1] != 1:
raise ValueError("Expected 2D patch or 3D patch with 1 "
"channel, but got patch with shape " + \
str(patch.shape))
if recenter:
assert patch.shape[0] <= self.patch_shape[0]
if patch.shape[1] > self.patch_shape[1]:
raise ValueError("Given patch of width %d but only patches up"
" to width %d fit" \
% (patch.shape[1], self.patch_shape[1]))
rs_pad = (self.patch_shape[0] - patch.shape[0]) / 2
re_pad = self.patch_shape[0] - rs_pad - patch.shape[0]
cs_pad = (self.patch_shape[1] - patch.shape[1]) / 2
ce_pad = self.patch_shape[1] - cs_pad - patch.shape[1]
else:
if patch.shape[0:2] != self.patch_shape:
raise ValueError('Expected patch with shape %s, got %s' %
(str(self.patch_shape), str(patch.shape)))
rs_pad = 0
re_pad = 0
cs_pad = 0
ce_pad = 0
temp = patch.copy()
assert (not np.any(np.isnan(temp))) and (not np.any(np.isinf(temp)))
if rescale:
scale = np.abs(temp).max()
if scale > 0:
temp /= scale
else:
if temp.min() < -1.0 or temp.max() > 1.0:
raise ValueError('When rescale is set to False, pixel values '
'must lie in [-1,1]. Got [%f, %f].'
% (temp.min(), temp.max()))
temp *= 0.5
temp += 0.5
assert temp.min() >= 0.0
assert temp.max() <= 1.0
if self.cur_pos == (0, 0):
self.clear()
rs = self.pad[0] + (self.cur_pos[0] *
(self.patch_shape[0] + self.pad[0]))
re = rs + self.patch_shape[0]
assert self.cur_pos[1] <= self.grid_shape[1]
cs = self.pad[1] + (self.cur_pos[1] *
(self.patch_shape[1] + self.pad[1]))
ce = cs + self.patch_shape[1]
assert ce <= self.image.shape[1], (ce, self.image.shape[1])
temp *= (temp > 0)
if len(temp.shape) == 2:
temp = temp[:, :, np.newaxis]
assert ce-ce_pad <= self.image.shape[1]
self.image[rs + rs_pad:re - re_pad, cs + cs_pad:ce - ce_pad, :] = temp
if activation is not None:
if (not isinstance(activation, tuple) and
not isinstance(activation, list)):
activation = (activation,)
for shell, amt in enumerate(activation):
assert 2 * shell + 2 < self.pad[0]
assert 2 * shell + 2 < self.pad[1]
if amt >= 0:
act = amt * np.asarray(self.colors[shell])
self.image[rs + rs_pad - shell - 1,
cs + cs_pad - shell - 1:
ce - ce_pad + 1 + shell,
:] = act
self.image[re - re_pad + shell,
cs + cs_pad - 1 - shell:
ce - ce_pad + 1 + shell,
:] = act
self.image[rs + rs_pad - 1 - shell:
re - re_pad + 1 + shell,
cs + cs_pad - 1 - shell,
:] = act
self.image[rs + rs_pad - shell - 1:
re - re_pad + shell + 1,
ce - ce_pad + shell,
:] = act
self.cur_pos = (self.cur_pos[0], self.cur_pos[1] + 1)
if self.cur_pos[1] == self.grid_shape[1]:
self.cur_pos = (self.cur_pos[0] + 1, 0)
if self.cur_pos[0] == self.grid_shape[0]:
self.cur_pos = (0, 0)
def addVid(self, vid, rescale=False, subtract_mean=False, recenter=False):
myvid = vid.copy()
"""
.. todo::
WRITEME
"""
if subtract_mean:
myvid -= vid.mean()
if rescale:
scale = np.abs(myvid).max()
if scale == 0:
scale = 1
myvid /= scale
for i in xrange(vid.shape[2]):
self.add_patch(myvid[:, :, i], rescale=False, recenter=recenter)
def show(self):
"""
.. todo::
WRITEME
"""
#image.imview_async(self.image)
show(self.image)
def get_img(self):
"""
.. todo::
WRITEME
"""
#print 'image range '+str((self.image.min(), self.image.max()))
x = np.cast['uint8'](self.image * 255.0)
if x.shape[2] == 1:
x = x[:, :, 0]
ensure_Image()
img = Image.fromarray(x)
return img
def save(self, path):
"""
.. todo::
WRITEME
"""
self.get_img().save(path)
def pick_shape(n, exact = False):
"""
.. todo::
WRITEME properly
Returns a shape that fits n elements. If exact, fits exactly n elements
"""
if not isinstance(n, py_integer_types):
raise TypeError("n must be an integer, but is "+str(type(n)))
if exact:
best_r = -1
best_c = -1
best_ratio = 0
for r in xrange(1,int(np.sqrt(n))+1):
if n % r != 0:
continue
c = n / r
ratio = min( float(r)/float(c), float(c)/float(r) )
if ratio > best_ratio:
best_ratio = ratio
best_r = r
best_c = c
return (best_r, best_c)
sqrt = np.sqrt(n)
r = c = int(np.floor(sqrt))
while r * c < n:
c += 1
return (r, c)
pick_shape = staticmethod(pick_shape)
| bsd-3-clause |
ProfessionalIT/maxigenios-website | sdk/google_appengine/lib/django-1.4/django/middleware/http.py | 105 | 1643 | from django.utils.http import http_date, parse_http_date_safe
class ConditionalGetMiddleware(object):
"""
Handles conditional GET operations. If the response has a ETag or
Last-Modified header, and the request has If-None-Match or
If-Modified-Since, the response is replaced by an HttpNotModified.
Also sets the Date and Content-Length response-headers.
"""
def process_response(self, request, response):
response['Date'] = http_date()
if not response.has_header('Content-Length'):
response['Content-Length'] = str(len(response.content))
if response.has_header('ETag'):
if_none_match = request.META.get('HTTP_IF_NONE_MATCH')
if if_none_match == response['ETag']:
# Setting the status is enough here. The response handling path
# automatically removes content for this status code (in
# http.conditional_content_removal()).
response.status_code = 304
if response.has_header('Last-Modified'):
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE')
if if_modified_since is not None:
if_modified_since = parse_http_date_safe(if_modified_since)
if if_modified_since is not None:
last_modified = parse_http_date_safe(response['Last-Modified'])
if last_modified is not None and last_modified <= if_modified_since:
# Setting the status code is enough here (same reasons as
# above).
response.status_code = 304
return response
| mit |
Deepakkothandan/ansible-modules-extras | monitoring/datadog_event.py | 24 | 5386 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Author: Artūras 'arturaz' Šlajus <x11@arturaz.net>
#
# This module is proudly sponsored by iGeolise (www.igeolise.com) and
# Tiny Lab Productions (www.tinylabproductions.com).
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: datadog_event
short_description: Posts events to DataDog service
description:
- "Allows to post events to DataDog (www.datadoghq.com) service."
- "Uses http://docs.datadoghq.com/api/#events API."
version_added: "1.3"
author: "Artūras `arturaz` Šlajus (@arturaz)"
notes: []
requirements: []
options:
api_key:
description: ["Your DataDog API key."]
required: true
default: null
title:
description: ["The event title."]
required: true
default: null
text:
description: ["The body of the event."]
required: true
default: null
date_happened:
description:
- POSIX timestamp of the event.
- Default value is now.
required: false
default: now
priority:
description: ["The priority of the event."]
required: false
default: normal
choices: [normal, low]
tags:
description: ["Comma separated list of tags to apply to the event."]
required: false
default: null
alert_type:
description: ["Type of alert."]
required: false
default: info
choices: ['error', 'warning', 'info', 'success']
aggregation_key:
description: ["An arbitrary string to use for aggregation."]
required: false
default: null
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
'''
EXAMPLES = '''
# Post an event with low priority
datadog_event: title="Testing from ansible" text="Test!" priority="low"
api_key="6873258723457823548234234234"
# Post an event with several tags
datadog_event: title="Testing from ansible" text="Test!"
api_key="6873258723457823548234234234"
tags=aa,bb,#host:{{ inventory_hostname }}
'''
def main():
module = AnsibleModule(
argument_spec=dict(
api_key=dict(required=True, no_log=True),
title=dict(required=True),
text=dict(required=True),
date_happened=dict(required=False, default=None, type='int'),
priority=dict(
required=False, default='normal', choices=['normal', 'low']
),
tags=dict(required=False, default=None, type='list'),
alert_type=dict(
required=False, default='info',
choices=['error', 'warning', 'info', 'success']
),
aggregation_key=dict(required=False, default=None),
source_type_name=dict(
required=False, default='my apps',
choices=['nagios', 'hudson', 'jenkins', 'user', 'my apps',
'feed', 'chef', 'puppet', 'git', 'bitbucket', 'fabric',
'capistrano']
),
validate_certs = dict(default='yes', type='bool'),
)
)
post_event(module)
def post_event(module):
uri = "https://app.datadoghq.com/api/v1/events?api_key=%s" % module.params['api_key']
body = dict(
title=module.params['title'],
text=module.params['text'],
priority=module.params['priority'],
alert_type=module.params['alert_type']
)
if module.params['date_happened'] != None:
body['date_happened'] = module.params['date_happened']
if module.params['tags'] != None:
body['tags'] = module.params['tags']
if module.params['aggregation_key'] != None:
body['aggregation_key'] = module.params['aggregation_key']
if module.params['source_type_name'] != None:
body['source_type_name'] = module.params['source_type_name']
json_body = module.jsonify(body)
headers = {"Content-Type": "application/json"}
(response, info) = fetch_url(module, uri, data=json_body, headers=headers)
if info['status'] == 202:
response_body = response.read()
response_json = module.from_json(response_body)
if response_json['status'] == 'ok':
module.exit_json(changed=True)
else:
module.fail_json(msg=response)
else:
module.fail_json(**info)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 |
CamelBackNotation/CarnotKE | jyhton/lib-python/2.7/plat-irix6/AL.py | 132 | 1593 | from warnings import warnpy3k
warnpy3k("the AL module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
RATE_48000 = 48000
RATE_44100 = 44100
RATE_32000 = 32000
RATE_22050 = 22050
RATE_16000 = 16000
RATE_11025 = 11025
RATE_8000 = 8000
SAMPFMT_TWOSCOMP= 1
SAMPFMT_FLOAT = 32
SAMPFMT_DOUBLE = 64
SAMPLE_8 = 1
SAMPLE_16 = 2
# SAMPLE_24 is the low 24 bits of a long, sign extended to 32 bits
SAMPLE_24 = 4
MONO = 1
STEREO = 2
QUADRO = 4 # 4CHANNEL is not a legal Python name
INPUT_LINE = 0
INPUT_MIC = 1
INPUT_DIGITAL = 2
MONITOR_OFF = 0
MONITOR_ON = 1
ERROR_NUMBER = 0
ERROR_TYPE = 1
ERROR_LOCATION_LSP = 2
ERROR_LOCATION_MSP = 3
ERROR_LENGTH = 4
ERROR_INPUT_UNDERFLOW = 0
ERROR_OUTPUT_OVERFLOW = 1
# These seem to be not supported anymore:
##HOLD, RELEASE = 0, 1
##ATTAIL, ATHEAD, ATMARK, ATTIME = 0, 1, 2, 3
DEFAULT_DEVICE = 1
INPUT_SOURCE = 0
LEFT_INPUT_ATTEN = 1
RIGHT_INPUT_ATTEN = 2
INPUT_RATE = 3
OUTPUT_RATE = 4
LEFT_SPEAKER_GAIN = 5
RIGHT_SPEAKER_GAIN = 6
INPUT_COUNT = 7
OUTPUT_COUNT = 8
UNUSED_COUNT = 9
SYNC_INPUT_TO_AES = 10
SYNC_OUTPUT_TO_AES = 11
MONITOR_CTL = 12
LEFT_MONITOR_ATTEN = 13
RIGHT_MONITOR_ATTEN = 14
ENUM_VALUE = 0 # only certain values are valid
RANGE_VALUE = 1 # any value in range is valid
| apache-2.0 |
wattad169/sportbuddy_server | lib/django/contrib/contenttypes/views.py | 380 | 3608 | from __future__ import unicode_literals
from django import http
from django.apps import apps
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.requests import RequestSite
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext as _
def shortcut(request, content_type_id, object_id):
"""
Redirect to an object's page based on a content-type ID and an object ID.
"""
# Look up the object, making sure it's got a get_absolute_url() function.
try:
content_type = ContentType.objects.get(pk=content_type_id)
if not content_type.model_class():
raise http.Http404(_("Content type %(ct_id)s object has no associated model") %
{'ct_id': content_type_id})
obj = content_type.get_object_for_this_type(pk=object_id)
except (ObjectDoesNotExist, ValueError):
raise http.Http404(_("Content type %(ct_id)s object %(obj_id)s doesn't exist") %
{'ct_id': content_type_id, 'obj_id': object_id})
try:
get_absolute_url = obj.get_absolute_url
except AttributeError:
raise http.Http404(_("%(ct_name)s objects don't have a get_absolute_url() method") %
{'ct_name': content_type.name})
absurl = get_absolute_url()
# Try to figure out the object's domain, so we can do a cross-site redirect
# if necessary.
# If the object actually defines a domain, we're done.
if absurl.startswith(('http://', 'https://', '//')):
return http.HttpResponseRedirect(absurl)
# Otherwise, we need to introspect the object's relationships for a
# relation to the Site object
object_domain = None
if apps.is_installed('django.contrib.sites'):
Site = apps.get_model('sites.Site')
opts = obj._meta
# First, look for an many-to-many relationship to Site.
for field in opts.many_to_many:
if field.remote_field.model is Site:
try:
# Caveat: In the case of multiple related Sites, this just
# selects the *first* one, which is arbitrary.
object_domain = getattr(obj, field.name).all()[0].domain
except IndexError:
pass
if object_domain is not None:
break
# Next, look for a many-to-one relationship to Site.
if object_domain is None:
for field in obj._meta.fields:
if field.remote_field and field.remote_field.model is Site:
try:
object_domain = getattr(obj, field.name).domain
except Site.DoesNotExist:
pass
if object_domain is not None:
break
# Fall back to the current site (if possible).
if object_domain is None:
try:
object_domain = Site.objects.get_current(request).domain
except Site.DoesNotExist:
pass
else:
# Fall back to the current request's site.
object_domain = RequestSite(request).domain
# If all that malarkey found an object domain, use it. Otherwise, fall back
# to whatever get_absolute_url() returned.
if object_domain is not None:
protocol = request.scheme
return http.HttpResponseRedirect('%s://%s%s'
% (protocol, object_domain, absurl))
else:
return http.HttpResponseRedirect(absurl)
| bsd-3-clause |
arpitparmar5739/youtube-dl | youtube_dl/extractor/telebruxelles.py | 150 | 2352 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class TeleBruxellesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?telebruxelles\.be/(news|sport|dernier-jt)/?(?P<id>[^/#?]+)'
_TESTS = [{
'url': 'http://www.telebruxelles.be/news/auditions-devant-parlement-francken-galant-tres-attendus/',
'md5': '59439e568c9ee42fb77588b2096b214f',
'info_dict': {
'id': '11942',
'display_id': 'auditions-devant-parlement-francken-galant-tres-attendus',
'ext': 'flv',
'title': 'Parlement : Francken et Galant répondent aux interpellations de l’opposition',
'description': 're:Les auditions des ministres se poursuivent*'
},
'params': {
'skip_download': 'requires rtmpdump'
},
}, {
'url': 'http://www.telebruxelles.be/sport/basket-brussels-bat-mons-80-74/',
'md5': '181d3fbdcf20b909309e5aef5c6c6047',
'info_dict': {
'id': '10091',
'display_id': 'basket-brussels-bat-mons-80-74',
'ext': 'flv',
'title': 'Basket : le Brussels bat Mons 80-74',
'description': 're:^Ils l\u2019on fait ! En basket, le B*',
},
'params': {
'skip_download': 'requires rtmpdump'
},
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
article_id = self._html_search_regex(
r"<article id=\"post-(\d+)\"", webpage, 'article ID')
title = self._html_search_regex(
r'<h1 class=\"entry-title\">(.*?)</h1>', webpage, 'title')
description = self._og_search_description(webpage)
rtmp_url = self._html_search_regex(
r"file: \"(rtmp://\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}/vod/mp4:\" \+ \"\w+\" \+ \".mp4)\"",
webpage, 'RTMP url')
rtmp_url = rtmp_url.replace("\" + \"", "")
return {
'id': article_id,
'display_id': display_id,
'title': title,
'description': description,
'url': rtmp_url,
'ext': 'flv',
'rtmp_live': True # if rtmpdump is not called with "--live" argument, the download is blocked and can be completed
}
| unlicense |
kaynfiretvguru/Eldritch | plugin.video.exodus/resources/lib/sources/en/xmovies.py | 3 | 9412 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['xmovies8.tv', 'xmovies8.ru']
self.base_link = 'https://xmovies8.ru'
self.search_link = '/movies/search?s=%s'
def matchAlias(self, title, aliases):
try:
for alias in aliases:
if cleantitle.get(title) == cleantitle.get(alias['title']):
return True
except:
return False
def movie(self, imdb, title, localtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': title})
url = {'imdb': imdb, 'title': title, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': tvshowtitle})
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def searchShow(self, title, season, year, aliases, headers):
try:
title = cleantitle.normalize(title)
t = cleantitle.get(title)
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.query('%s S%02d' % (title.replace('\'', '-'), int(season)))))
sr = client.request(url, headers=headers, timeout='10')
if sr:
r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.findall('(.+?)\s+-\s+S(\d+)', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and int(season) == int(i[2])][0]
else:
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.query('%s Season %01d' % (title.replace('\'', '-'), int(season)))))
sr = client.request(url, headers=headers, timeout='10')
if sr:
r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.findall('(.+?)\s+-\s+Season\s+(\d+)', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and int(season) == int(i[2])][0]
else:
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.query('%s %01d' % (title.replace('\'', '-'), int(year)))))
sr = client.request(url, headers=headers, timeout='10')
if sr:
r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
return url.encode('utf-8')
except:
return
def searchMovie(self, title, year, aliases, headers):
try:
title = cleantitle.normalize(title)
url = urlparse.urljoin(self.base_link, self.search_link % (cleantitle.geturl(title.replace('\'', '-'))))
r = client.request(url, timeout='10', headers=headers)
r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
try:
match = [i[0] for i in r if self.matchAlias(i[1], aliases) and year == i[2]][0]
except:
match = [i[0] for i in r if self.matchAlias(i[1], aliases)][0]
url = re.findall('(?://.+?|)(/.+)', match)[0]
url = client.replaceHTMLCodes(url)
return url.encode('utf-8')
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
aliases = eval(data['aliases'])
headers = {}
if 'tvshowtitle' in data:
episode = int(data['episode'])
url = self.searchShow(data['tvshowtitle'], data['season'], data['year'], aliases, headers)
else:
episode = 0
url = self.searchMovie(data['title'], data['year'], aliases, headers)
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
url = re.sub('/watching.html$', '', url.strip('/'))
url = url + '/watching.html'
p = client.request(url, headers=headers, timeout='10')
if episode > 0:
r = client.parseDOM(p, 'div', attrs={'class': 'ep_link.+?'})[0]
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))
r = [(i[0], re.findall('Episode\s+(\d+)', i[1])) for i in r]
r = [(i[0], i[1][0]) for i in r]
r = [i[0] for i in r if int(i[1]) == episode][0]
p = client.request(r, headers=headers, timeout='10')
referer = url
id = re.findall('load_player\(.+?(\d+)', p)[0]
r = urlparse.urljoin(self.base_link, '/ajax/movie/load_player_v3?id=%s' % id)
r = client.request(r, headers=headers, referer=referer, XHR=True, timeout='10')
url = json.loads(r)['value']
if (url.startswith('//')):
url = 'https:' + url
url = client.request(url, headers=headers, XHR=True, output='geturl', timeout='10')
if 'openload.io' in url or 'openload.co' in url or 'oload.tv' in url:
sources.append({'source': 'openload.co', 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False,'debridonly': False})
raise Exception()
r = client.request(url, headers=headers, XHR=True, timeout='10')
try:
src = json.loads(r)['playlist'][0]['sources']
links = [i['file'] for i in src if 'file' in i]
for i in links:
try:
sources.append(
{'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en',
'url': i, 'direct': True, 'debridonly': False})
except:
pass
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
for i in range(3):
u = directstream.googlepass(url)
if not u == None: break
return u
except:
return | gpl-2.0 |
cgc1983/zerorpc-python | tests/test_server.py | 72 | 5945 | # -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc (opensource@dotcloud.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from nose.tools import assert_raises
import gevent
import sys
from zerorpc import zmq
import zerorpc
from testutils import teardown, random_ipc_endpoint
def test_server_manual():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
def lolita(self):
return 42
def add(self, a, b):
return a + b
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
client_channel = client.channel()
client_channel.emit('lolita', tuple())
event = client_channel.recv()
assert list(event.args) == [42]
client_channel.close()
client_channel = client.channel()
client_channel.emit('add', (1, 2))
event = client_channel.recv()
assert list(event.args) == [3]
client_channel.close()
srv.stop()
def test_client_server():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
def lolita(self):
return 42
def add(self, a, b):
return a + b
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client = zerorpc.Client()
client.connect(endpoint)
print client.lolita()
assert client.lolita() == 42
print client.add(1, 4)
assert client.add(1, 4) == 5
def test_client_server_client_timeout():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
def lolita(self):
return 42
def add(self, a, b):
gevent.sleep(10)
return a + b
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client = zerorpc.Client(timeout=2)
client.connect(endpoint)
if sys.version_info < (2, 7):
assert_raises(zerorpc.TimeoutExpired, client.add, 1, 4)
else:
with assert_raises(zerorpc.TimeoutExpired):
print client.add(1, 4)
client.close()
srv.close()
def test_client_server_exception():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
def raise_something(self, a):
return a[4]
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client = zerorpc.Client(timeout=2)
client.connect(endpoint)
if sys.version_info < (2, 7):
def _do_with_assert_raises():
print client.raise_something(42)
assert_raises(zerorpc.RemoteError, _do_with_assert_raises)
else:
with assert_raises(zerorpc.RemoteError):
print client.raise_something(42)
assert client.raise_something(range(5)) == 4
client.close()
srv.close()
def test_client_server_detailed_exception():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
def raise_error(self):
raise RuntimeError('oops!')
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client = zerorpc.Client(timeout=2)
client.connect(endpoint)
if sys.version_info < (2, 7):
def _do_with_assert_raises():
print client.raise_error()
assert_raises(zerorpc.RemoteError, _do_with_assert_raises)
else:
with assert_raises(zerorpc.RemoteError):
print client.raise_error()
try:
client.raise_error()
except zerorpc.RemoteError as e:
print 'got that:', e
print 'name', e.name
print 'msg', e.msg
assert e.name == 'RuntimeError'
assert e.msg == 'oops!'
client.close()
srv.close()
def test_exception_compat_v1():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
pass
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
rpccall = client.channel()
rpccall.emit('donotexist', tuple())
event = rpccall.recv()
print event
assert event.name == 'ERR'
(name, msg, tb) = event.args
print 'detailed error', name, msg, tb
assert name == 'NameError'
assert msg == 'donotexist'
rpccall = client.channel()
rpccall.emit('donotexist', tuple(), xheader=dict(v=1))
event = rpccall.recv()
print event
assert event.name == 'ERR'
(msg,) = event.args
print 'msg only', msg
assert msg == "NameError('donotexist',)"
client_events.close()
srv.close()
def test_removed_unscriptable_error_format_args_spec():
class MySrv(zerorpc.Server):
pass
srv = MySrv()
return_value = srv._format_args_spec(None)
assert return_value is None
| mit |
Metaswitch/calico-neutron | neutron/tests/unit/bigswitch/test_router_db.py | 8 | 28080 | # Copyright 2013 Big Switch Networks, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Adapted from neutron.tests.unit.test_l3_plugin
import contextlib
import copy
import mock
from oslo.config import cfg
from six import moves
from webob import exc
from neutron.common import test_lib
from neutron import context
from neutron.extensions import l3
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.plugins.bigswitch.extensions import routerrule
from neutron.tests.unit.bigswitch import fake_server
from neutron.tests.unit.bigswitch import test_base
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_extension_extradhcpopts as test_extradhcp
from neutron.tests.unit import test_l3_plugin
HTTPCON = 'neutron.plugins.bigswitch.servermanager.httplib.HTTPConnection'
_uuid = uuidutils.generate_uuid
class RouterRulesTestExtensionManager(object):
def get_resources(self):
l3.RESOURCE_ATTRIBUTE_MAP['routers'].update(
routerrule.EXTENDED_ATTRIBUTES_2_0['routers'])
return l3.L3.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class DHCPOptsTestCase(test_base.BigSwitchTestBase,
test_extradhcp.TestExtraDhcpOpt):
def setUp(self, plugin=None):
self.setup_patches()
self.setup_config_files()
super(test_extradhcp.ExtraDhcpOptDBTestCase,
self).setUp(plugin=self._plugin_name)
self.setup_db()
self.startHttpPatch()
class RouterDBTestBase(test_base.BigSwitchTestBase,
test_l3_plugin.L3BaseForIntTests,
test_l3_plugin.L3NatTestCaseMixin):
mock_rescheduling = False
def setUp(self):
self.setup_patches()
self.setup_config_files()
ext_mgr = RouterRulesTestExtensionManager()
service_plugins = {'L3_ROUTER_NAT': self._l3_plugin_name}
super(RouterDBTestBase, self).setUp(plugin=self._plugin_name,
ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.setup_db()
cfg.CONF.set_default('allow_overlapping_ips', False)
self.plugin_obj = manager.NeutronManager.get_service_plugins().get(
'L3_ROUTER_NAT')
self.startHttpPatch()
def tearDown(self):
super(RouterDBTestBase, self).tearDown()
del test_lib.test_config['config_files']
class RouterDBTestCase(RouterDBTestBase,
test_l3_plugin.L3NatDBIntTestCase):
def test_router_remove_router_interface_wrong_subnet_returns_400(self):
with self.router() as r:
with self.subnet() as s:
with self.subnet(cidr='10.0.10.0/24') as s1:
with self.port(subnet=s1) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
p['port']['id'],
exc.HTTPBadRequest.code)
#remove properly to clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_remove_router_interface_wrong_port_returns_404(self):
with self.router() as r:
with self.subnet() as s:
with self.port(subnet=s) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
# create another port for testing failure case
res = self._create_port('json', p['port']['network_id'])
p2 = self.deserialize('json', res)
self._router_interface_action('remove',
r['router']['id'],
None,
p2['port']['id'],
exc.HTTPNotFound.code)
# remove correct interface to cleanup
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
# remove extra port created
self._delete('ports', p2['port']['id'])
def test_add_network_to_ext_gw_backend_body(self):
plugin_obj = manager.NeutronManager.get_plugin()
with contextlib.nested(
self.network(), self.router()
) as (n1, r1):
with self.subnet(network=n1, cidr='10.10.10.10/24') as s1:
self._set_net_external(s1['subnet']['network_id'])
with mock.patch.object(plugin_obj.servers,
'rest_update_router') as upmock:
self._add_external_gateway_to_router(r1['router']['id'],
n1['network']['id'])
router_body = upmock.mock_calls[0][1][1]
self.assertEqual(
plugin_obj.get_network(context.get_admin_context(),
n1['network']['id']),
router_body['external_gateway_info']['network'])
def test_multi_tenant_flip_alllocation(self):
tenant1_id = _uuid()
tenant2_id = _uuid()
with contextlib.nested(
self.network(tenant_id=tenant1_id),
self.network(tenant_id=tenant2_id)) as (n1, n2):
with contextlib.nested(
self.subnet(network=n1, cidr='11.0.0.0/24'),
self.subnet(network=n2, cidr='12.0.0.0/24'),
self.subnet(cidr='13.0.0.0/24')) as (s1, s2, psub):
with contextlib.nested(
self.router(tenant_id=tenant1_id),
self.router(tenant_id=tenant2_id),
self.port(subnet=s1, tenant_id=tenant1_id),
self.port(subnet=s2, tenant_id=tenant2_id)) as (r1, r2,
p1, p2):
self._set_net_external(psub['subnet']['network_id'])
s1id = p1['port']['fixed_ips'][0]['subnet_id']
s2id = p2['port']['fixed_ips'][0]['subnet_id']
s1 = {'subnet': {'id': s1id}}
s2 = {'subnet': {'id': s2id}}
self._add_external_gateway_to_router(
r1['router']['id'],
psub['subnet']['network_id'])
self._add_external_gateway_to_router(
r2['router']['id'],
psub['subnet']['network_id'])
self._router_interface_action(
'add', r1['router']['id'],
s1['subnet']['id'], None)
self._router_interface_action(
'add', r2['router']['id'],
s2['subnet']['id'], None)
fl1 = self._make_floatingip_for_tenant_port(
net_id=psub['subnet']['network_id'],
port_id=p1['port']['id'],
tenant_id=tenant1_id)
self.httpPatch.stop()
multiFloatPatch = mock.patch(
HTTPCON,
new=fake_server.VerifyMultiTenantFloatingIP)
multiFloatPatch.start()
fl2 = self._make_floatingip_for_tenant_port(
net_id=psub['subnet']['network_id'],
port_id=p2['port']['id'],
tenant_id=tenant2_id)
multiFloatPatch.stop()
self.httpPatch.start()
self._delete('floatingips', fl1['floatingip']['id'])
self._delete('floatingips', fl2['floatingip']['id'])
self._router_interface_action(
'remove', r1['router']['id'],
s1['subnet']['id'], None)
self._router_interface_action(
'remove', r2['router']['id'],
s2['subnet']['id'], None)
def _make_floatingip_for_tenant_port(self, net_id, port_id, tenant_id):
data = {'floatingip': {'floating_network_id': net_id,
'tenant_id': tenant_id,
'port_id': port_id}}
floatingip_req = self.new_create_request('floatingips', data, self.fmt)
res = floatingip_req.get_response(self.ext_api)
return self.deserialize(self.fmt, res)
def test_floatingip_with_invalid_create_port(self):
self._test_floatingip_with_invalid_create_port(
'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2')
def test_create_floatingip_no_ext_gateway_return_404(self):
with self.subnet(cidr='10.0.10.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
with self.port() as private_port:
with self.router():
res = self._create_floatingip(
'json',
public_sub['subnet']['network_id'],
port_id=private_port['port']['id'])
self.assertEqual(res.status_int, exc.HTTPNotFound.code)
def test_router_update_gateway(self):
with self.router() as r:
with self.subnet() as s1:
with self.subnet(cidr='10.0.10.0/24') as s2:
self._set_net_external(s1['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s1['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
net_id = (body['router']
['external_gateway_info']['network_id'])
self.assertEqual(net_id, s1['subnet']['network_id'])
self._set_net_external(s2['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s2['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
net_id = (body['router']
['external_gateway_info']['network_id'])
self.assertEqual(net_id, s2['subnet']['network_id'])
self._remove_external_gateway_from_router(
r['router']['id'],
s2['subnet']['network_id'])
def test_router_add_interface_overlapped_cidr(self):
self.skipTest("Plugin does not support")
def test_router_add_interface_overlapped_cidr_returns_400(self):
self.skipTest("Plugin does not support")
def test_list_nets_external(self):
self.skipTest("Plugin does not support")
def test_router_update_gateway_with_existed_floatingip(self):
with self.subnet(cidr='10.0.10.0/24') as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.floatingip_with_assoc() as fip:
self._add_external_gateway_to_router(
fip['floatingip']['router_id'],
subnet['subnet']['network_id'],
expected_code=exc.HTTPConflict.code)
def test_router_remove_interface_wrong_subnet_returns_400(self):
with self.router() as r:
with self.subnet(cidr='10.0.10.0/24') as s:
with self.port() as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
p['port']['id'],
exc.HTTPBadRequest.code)
#remove properly to clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_remove_interface_wrong_port_returns_404(self):
with self.router() as r:
with self.subnet(cidr='10.0.10.0/24'):
with self.port() as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
# create another port for testing failure case
res = self._create_port('json', p['port']['network_id'])
p2 = self.deserialize('json', res)
self._router_interface_action('remove',
r['router']['id'],
None,
p2['port']['id'],
exc.HTTPNotFound.code)
# remove correct interface to cleanup
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
# remove extra port created
self._delete('ports', p2['port']['id'])
def test_send_data(self):
fmt = 'json'
plugin_obj = manager.NeutronManager.get_plugin()
with self.router() as r:
r_id = r['router']['id']
with self.subnet(cidr='10.0.10.0/24') as s:
s_id = s['subnet']['id']
with self.router() as r1:
r1_id = r1['router']['id']
body = self._router_interface_action('add', r_id, s_id,
None)
self.assertIn('port_id', body)
r_port_id = body['port_id']
body = self._show('ports', r_port_id)
self.assertEqual(body['port']['device_id'], r_id)
with self.subnet(cidr='10.0.20.0/24') as s1:
s1_id = s1['subnet']['id']
body = self._router_interface_action('add', r1_id,
s1_id, None)
self.assertIn('port_id', body)
r1_port_id = body['port_id']
body = self._show('ports', r1_port_id)
self.assertEqual(body['port']['device_id'], r1_id)
with self.subnet(cidr='11.0.0.0/24') as public_sub:
public_net_id = public_sub['subnet']['network_id']
self._set_net_external(public_net_id)
with self.port() as prv_port:
prv_fixed_ip = prv_port['port']['fixed_ips'][0]
priv_sub_id = prv_fixed_ip['subnet_id']
self._add_external_gateway_to_router(
r_id, public_net_id)
self._router_interface_action('add', r_id,
priv_sub_id,
None)
priv_port_id = prv_port['port']['id']
res = self._create_floatingip(
fmt, public_net_id,
port_id=priv_port_id)
self.assertEqual(res.status_int,
exc.HTTPCreated.code)
floatingip = self.deserialize(fmt, res)
result = plugin_obj._send_all_data()
self.assertEqual(result[0], 200)
self._delete('floatingips',
floatingip['floatingip']['id'])
self._remove_external_gateway_from_router(
r_id, public_net_id)
self._router_interface_action('remove', r_id,
priv_sub_id,
None)
self._router_interface_action('remove', r_id, s_id,
None)
self._show('ports', r_port_id,
expected_code=exc.HTTPNotFound.code)
self._router_interface_action('remove', r1_id, s1_id,
None)
self._show('ports', r1_port_id,
expected_code=exc.HTTPNotFound.code)
def test_router_rules_update(self):
with self.router() as r:
r_id = r['router']['id']
router_rules = [{'destination': '1.2.3.4/32',
'source': '4.3.2.1/32',
'action': 'permit',
'nexthops': ['4.4.4.4', '4.4.4.5']}]
body = self._update('routers', r_id,
{'router': {'router_rules': router_rules}})
body = self._show('routers', r['router']['id'])
self.assertIn('router_rules', body['router'])
rules = body['router']['router_rules']
self.assertEqual(_strip_rule_ids(rules), router_rules)
# Try after adding another rule
router_rules.append({'source': 'external',
'destination': '8.8.8.8/32',
'action': 'permit', 'nexthops': []})
body = self._update('routers', r['router']['id'],
{'router': {'router_rules': router_rules}})
body = self._show('routers', r['router']['id'])
self.assertIn('router_rules', body['router'])
rules = body['router']['router_rules']
self.assertEqual(_strip_rule_ids(rules), router_rules)
def test_router_rules_separation(self):
with self.router() as r1:
with self.router() as r2:
r1_id = r1['router']['id']
r2_id = r2['router']['id']
router1_rules = [{'destination': '5.6.7.8/32',
'source': '8.7.6.5/32',
'action': 'permit',
'nexthops': ['8.8.8.8', '9.9.9.9']}]
router2_rules = [{'destination': '1.2.3.4/32',
'source': '4.3.2.1/32',
'action': 'permit',
'nexthops': ['4.4.4.4', '4.4.4.5']}]
body1 = self._update('routers', r1_id,
{'router':
{'router_rules': router1_rules}})
body2 = self._update('routers', r2_id,
{'router':
{'router_rules': router2_rules}})
body1 = self._show('routers', r1_id)
body2 = self._show('routers', r2_id)
rules1 = body1['router']['router_rules']
rules2 = body2['router']['router_rules']
self.assertEqual(_strip_rule_ids(rules1), router1_rules)
self.assertEqual(_strip_rule_ids(rules2), router2_rules)
def test_router_rules_validation(self):
with self.router() as r:
r_id = r['router']['id']
good_rules = [{'destination': '1.2.3.4/32',
'source': '4.3.2.1/32',
'action': 'permit',
'nexthops': ['4.4.4.4', '4.4.4.5']}]
body = self._update('routers', r_id,
{'router': {'router_rules': good_rules}})
body = self._show('routers', r_id)
self.assertIn('router_rules', body['router'])
self.assertEqual(good_rules,
_strip_rule_ids(body['router']['router_rules']))
# Missing nexthops should be populated with an empty list
light_rules = copy.deepcopy(good_rules)
del light_rules[0]['nexthops']
body = self._update('routers', r_id,
{'router': {'router_rules': light_rules}})
body = self._show('routers', r_id)
self.assertIn('router_rules', body['router'])
light_rules[0]['nexthops'] = []
self.assertEqual(light_rules,
_strip_rule_ids(body['router']['router_rules']))
# bad CIDR
bad_rules = copy.deepcopy(good_rules)
bad_rules[0]['destination'] = '1.1.1.1'
body = self._update('routers', r_id,
{'router': {'router_rules': bad_rules}},
expected_code=exc.HTTPBadRequest.code)
# bad next hop
bad_rules = copy.deepcopy(good_rules)
bad_rules[0]['nexthops'] = ['1.1.1.1', 'f2']
body = self._update('routers', r_id,
{'router': {'router_rules': bad_rules}},
expected_code=exc.HTTPBadRequest.code)
# bad action
bad_rules = copy.deepcopy(good_rules)
bad_rules[0]['action'] = 'dance'
body = self._update('routers', r_id,
{'router': {'router_rules': bad_rules}},
expected_code=exc.HTTPBadRequest.code)
# duplicate rule with opposite action
bad_rules = copy.deepcopy(good_rules)
bad_rules.append(copy.deepcopy(bad_rules[0]))
bad_rules.append(copy.deepcopy(bad_rules[0]))
bad_rules[1]['source'] = 'any'
bad_rules[2]['action'] = 'deny'
body = self._update('routers', r_id,
{'router': {'router_rules': bad_rules}},
expected_code=exc.HTTPBadRequest.code)
# duplicate nexthop
bad_rules = copy.deepcopy(good_rules)
bad_rules[0]['nexthops'] = ['1.1.1.1', '1.1.1.1']
body = self._update('routers', r_id,
{'router': {'router_rules': bad_rules}},
expected_code=exc.HTTPBadRequest.code)
# make sure light rules persisted during bad updates
body = self._show('routers', r_id)
self.assertIn('router_rules', body['router'])
self.assertEqual(light_rules,
_strip_rule_ids(body['router']['router_rules']))
def test_router_rules_config_change(self):
cfg.CONF.set_override('tenant_default_router_rule',
['*:any:any:deny',
'*:8.8.8.8/32:any:permit:1.2.3.4'],
'ROUTER')
with self.router() as r:
body = self._show('routers', r['router']['id'])
expected_rules = [{'source': 'any', 'destination': 'any',
'nexthops': [], 'action': 'deny'},
{'source': '8.8.8.8/32', 'destination': 'any',
'nexthops': ['1.2.3.4'], 'action': 'permit'}]
self.assertEqual(expected_rules,
_strip_rule_ids(body['router']['router_rules']))
def test_rule_exhaustion(self):
cfg.CONF.set_override('max_router_rules', 10, 'ROUTER')
with self.router() as r:
rules = []
for i in moves.xrange(1, 12):
rule = {'source': 'any', 'nexthops': [],
'destination': '1.1.1.' + str(i) + '/32',
'action': 'permit'}
rules.append(rule)
self._update('routers', r['router']['id'],
{'router': {'router_rules': rules}},
expected_code=exc.HTTPBadRequest.code)
def test_rollback_on_router_create(self):
tid = test_api_v2._uuid()
self.httpPatch.stop()
with mock.patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
self._create_router('json', tid)
self.assertTrue(len(self._get_routers(tid)) == 0)
def test_rollback_on_router_update(self):
with self.router() as r:
data = {'router': {'name': 'aNewName'}}
self.httpPatch.stop()
with mock.patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
self.new_update_request(
'routers', data, r['router']['id']).get_response(self.api)
self.httpPatch.start()
updatedr = self._get_routers(r['router']['tenant_id'])[0]
# name should have stayed the same due to failure
self.assertEqual(r['router']['name'], updatedr['name'])
def test_rollback_on_router_delete(self):
with self.router() as r:
self.httpPatch.stop()
with mock.patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
self._delete('routers', r['router']['id'],
expected_code=exc.HTTPInternalServerError.code)
self.httpPatch.start()
self.assertEqual(r['router']['id'],
self._get_routers(r['router']['tenant_id']
)[0]['id'])
def _get_routers(self, tenant_id):
ctx = context.Context('', tenant_id)
return self.plugin_obj.get_routers(ctx)
def _strip_rule_ids(rules):
cleaned = []
for rule in rules:
del rule['id']
cleaned.append(rule)
return cleaned
| apache-2.0 |
kleskjr/scipy | scipy/fftpack/realtransforms.py | 102 | 15426 | """
Real spectrum tranforms (DCT, DST, MDCT)
"""
from __future__ import division, print_function, absolute_import
__all__ = ['dct', 'idct', 'dst', 'idst']
import numpy as np
from scipy.fftpack import _fftpack
from scipy.fftpack.basic import _datacopied, _fix_shape, _asfarray
import atexit
atexit.register(_fftpack.destroy_ddct1_cache)
atexit.register(_fftpack.destroy_ddct2_cache)
atexit.register(_fftpack.destroy_dct1_cache)
atexit.register(_fftpack.destroy_dct2_cache)
atexit.register(_fftpack.destroy_ddst1_cache)
atexit.register(_fftpack.destroy_ddst2_cache)
atexit.register(_fftpack.destroy_dst1_cache)
atexit.register(_fftpack.destroy_dst2_cache)
def dct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
"""
Return the Discrete Cosine Transform of arbitrary type sequence x.
Parameters
----------
x : array_like
The input array.
type : {1, 2, 3}, optional
Type of the DCT (see Notes). Default type is 2.
n : int, optional
Length of the transform. If ``n < x.shape[axis]``, `x` is
truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the dct is computed; the default is over the
last axis (i.e., ``axis=-1``).
norm : {None, 'ortho'}, optional
Normalization mode (see Notes). Default is None.
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
y : ndarray of real
The transformed input array.
See Also
--------
idct : Inverse DCT
Notes
-----
For a single dimension array ``x``, ``dct(x, norm='ortho')`` is equal to
MATLAB ``dct(x)``.
There are theoretically 8 types of the DCT, only the first 3 types are
implemented in scipy. 'The' DCT generally refers to DCT type 2, and 'the'
Inverse DCT generally refers to DCT type 3.
**Type I**
There are several definitions of the DCT-I; we use the following
(for ``norm=None``)::
N-2
y[k] = x[0] + (-1)**k x[N-1] + 2 * sum x[n]*cos(pi*k*n/(N-1))
n=1
Only None is supported as normalization mode for DCT-I. Note also that the
DCT-I is only supported for input size > 1
**Type II**
There are several definitions of the DCT-II; we use the following
(for ``norm=None``)::
N-1
y[k] = 2* sum x[n]*cos(pi*k*(2n+1)/(2*N)), 0 <= k < N.
n=0
If ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor `f`::
f = sqrt(1/(4*N)) if k = 0,
f = sqrt(1/(2*N)) otherwise.
Which makes the corresponding matrix of coefficients orthonormal
(``OO' = Id``).
**Type III**
There are several definitions, we use the following
(for ``norm=None``)::
N-1
y[k] = x[0] + 2 * sum x[n]*cos(pi*(k+0.5)*n/N), 0 <= k < N.
n=1
or, for ``norm='ortho'`` and 0 <= k < N::
N-1
y[k] = x[0] / sqrt(N) + sqrt(2/N) * sum x[n]*cos(pi*(k+0.5)*n/N)
n=1
The (unnormalized) DCT-III is the inverse of the (unnormalized) DCT-II, up
to a factor `2N`. The orthonormalized DCT-III is exactly the inverse of
the orthonormalized DCT-II.
References
----------
.. [1] 'A Fast Cosine Transform in One and Two Dimensions', by J.
Makhoul, `IEEE Transactions on acoustics, speech and signal
processing` vol. 28(1), pp. 27-34,
http://dx.doi.org/10.1109/TASSP.1980.1163351 (1980).
.. [2] Wikipedia, "Discrete cosine transform",
http://en.wikipedia.org/wiki/Discrete_cosine_transform
Examples
--------
The Type 1 DCT is equivalent to the FFT (though faster) for real,
even-symmetrical inputs. The output is also real and even-symmetrical.
Half of the FFT input is used to generate half of the FFT output:
>>> from scipy.fftpack import fft, dct
>>> fft(np.array([4., 3., 5., 10., 5., 3.])).real
array([ 30., -8., 6., -2., 6., -8.])
>>> dct(np.array([4., 3., 5., 10.]), 1)
array([ 30., -8., 6., -2.])
"""
if type == 1 and norm is not None:
raise NotImplementedError(
"Orthonormalization not yet supported for DCT-I")
return _dct(x, type, n, axis, normalize=norm, overwrite_x=overwrite_x)
def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
"""
Return the Inverse Discrete Cosine Transform of an arbitrary type sequence.
Parameters
----------
x : array_like
The input array.
type : {1, 2, 3}, optional
Type of the DCT (see Notes). Default type is 2.
n : int, optional
Length of the transform. If ``n < x.shape[axis]``, `x` is
truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the idct is computed; the default is over the
last axis (i.e., ``axis=-1``).
norm : {None, 'ortho'}, optional
Normalization mode (see Notes). Default is None.
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
idct : ndarray of real
The transformed input array.
See Also
--------
dct : Forward DCT
Notes
-----
For a single dimension array `x`, ``idct(x, norm='ortho')`` is equal to
MATLAB ``idct(x)``.
'The' IDCT is the IDCT of type 2, which is the same as DCT of type 3.
IDCT of type 1 is the DCT of type 1, IDCT of type 2 is the DCT of type
3, and IDCT of type 3 is the DCT of type 2. For the definition of these
types, see `dct`.
Examples
--------
The Type 1 DCT is equivalent to the DFT for real, even-symmetrical
inputs. The output is also real and even-symmetrical. Half of the IFFT
input is used to generate half of the IFFT output:
>>> from scipy.fftpack import ifft, idct
>>> ifft(np.array([ 30., -8., 6., -2., 6., -8.])).real
array([ 4., 3., 5., 10., 5., 3.])
>>> idct(np.array([ 30., -8., 6., -2.]), 1) / 6
array([ 4., 3., 5., 10.])
"""
if type == 1 and norm is not None:
raise NotImplementedError(
"Orthonormalization not yet supported for IDCT-I")
# Inverse/forward type table
_TP = {1:1, 2:3, 3:2}
return _dct(x, _TP[type], n, axis, normalize=norm, overwrite_x=overwrite_x)
def _get_dct_fun(type, dtype):
try:
name = {'float64':'ddct%d', 'float32':'dct%d'}[dtype.name]
except KeyError:
raise ValueError("dtype %s not supported" % dtype)
try:
f = getattr(_fftpack, name % type)
except AttributeError as e:
raise ValueError(str(e) + ". Type %d not understood" % type)
return f
def _get_norm_mode(normalize):
try:
nm = {None:0, 'ortho':1}[normalize]
except KeyError:
raise ValueError("Unknown normalize mode %s" % normalize)
return nm
def __fix_shape(x, n, axis, dct_or_dst):
tmp = _asfarray(x)
copy_made = _datacopied(tmp, x)
if n is None:
n = tmp.shape[axis]
elif n != tmp.shape[axis]:
tmp, copy_made2 = _fix_shape(tmp, n, axis)
copy_made = copy_made or copy_made2
if n < 1:
raise ValueError("Invalid number of %s data points "
"(%d) specified." % (dct_or_dst, n))
return tmp, n, copy_made
def _raw_dct(x0, type, n, axis, nm, overwrite_x):
f = _get_dct_fun(type, x0.dtype)
return _eval_fun(f, x0, n, axis, nm, overwrite_x)
def _raw_dst(x0, type, n, axis, nm, overwrite_x):
f = _get_dst_fun(type, x0.dtype)
return _eval_fun(f, x0, n, axis, nm, overwrite_x)
def _eval_fun(f, tmp, n, axis, nm, overwrite_x):
if axis == -1 or axis == len(tmp.shape) - 1:
return f(tmp, n, nm, overwrite_x)
tmp = np.swapaxes(tmp, axis, -1)
tmp = f(tmp, n, nm, overwrite_x)
return np.swapaxes(tmp, axis, -1)
def _dct(x, type, n=None, axis=-1, overwrite_x=False, normalize=None):
"""
Return Discrete Cosine Transform of arbitrary type sequence x.
Parameters
----------
x : array_like
input array.
n : int, optional
Length of the transform. If ``n < x.shape[axis]``, `x` is
truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the dct is computed; the default is over the
last axis (i.e., ``axis=-1``).
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
z : ndarray
"""
x0, n, copy_made = __fix_shape(x, n, axis, 'DCT')
if type == 1 and n < 2:
raise ValueError("DCT-I is not defined for size < 2")
overwrite_x = overwrite_x or copy_made
nm = _get_norm_mode(normalize)
if np.iscomplexobj(x0):
return (_raw_dct(x0.real, type, n, axis, nm, overwrite_x) + 1j *
_raw_dct(x0.imag, type, n, axis, nm, overwrite_x))
else:
return _raw_dct(x0, type, n, axis, nm, overwrite_x)
def dst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
"""
Return the Discrete Sine Transform of arbitrary type sequence x.
Parameters
----------
x : array_like
The input array.
type : {1, 2, 3}, optional
Type of the DST (see Notes). Default type is 2.
n : int, optional
Length of the transform. If ``n < x.shape[axis]``, `x` is
truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the dst is computed; the default is over the
last axis (i.e., ``axis=-1``).
norm : {None, 'ortho'}, optional
Normalization mode (see Notes). Default is None.
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
dst : ndarray of reals
The transformed input array.
See Also
--------
idst : Inverse DST
Notes
-----
For a single dimension array ``x``.
There are theoretically 8 types of the DST for different combinations of
even/odd boundary conditions and boundary off sets [1]_, only the first
3 types are implemented in scipy.
**Type I**
There are several definitions of the DST-I; we use the following
for ``norm=None``. DST-I assumes the input is odd around n=-1 and n=N. ::
N-1
y[k] = 2 * sum x[n]*sin(pi*(k+1)*(n+1)/(N+1))
n=0
Only None is supported as normalization mode for DCT-I. Note also that the
DCT-I is only supported for input size > 1
The (unnormalized) DCT-I is its own inverse, up to a factor `2(N+1)`.
**Type II**
There are several definitions of the DST-II; we use the following
for ``norm=None``. DST-II assumes the input is odd around n=-1/2 and
n=N-1/2; the output is odd around k=-1 and even around k=N-1 ::
N-1
y[k] = 2* sum x[n]*sin(pi*(k+1)*(n+0.5)/N), 0 <= k < N.
n=0
if ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor `f` ::
f = sqrt(1/(4*N)) if k == 0
f = sqrt(1/(2*N)) otherwise.
**Type III**
There are several definitions of the DST-III, we use the following
(for ``norm=None``). DST-III assumes the input is odd around n=-1
and even around n=N-1 ::
N-2
y[k] = x[N-1]*(-1)**k + 2* sum x[n]*sin(pi*(k+0.5)*(n+1)/N), 0 <= k < N.
n=0
The (unnormalized) DCT-III is the inverse of the (unnormalized) DCT-II, up
to a factor `2N`. The orthonormalized DST-III is exactly the inverse of
the orthonormalized DST-II.
.. versionadded:: 0.11.0
References
----------
.. [1] Wikipedia, "Discrete sine transform",
http://en.wikipedia.org/wiki/Discrete_sine_transform
"""
if type == 1 and norm is not None:
raise NotImplementedError(
"Orthonormalization not yet supported for IDCT-I")
return _dst(x, type, n, axis, normalize=norm, overwrite_x=overwrite_x)
def idst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
"""
Return the Inverse Discrete Sine Transform of an arbitrary type sequence.
Parameters
----------
x : array_like
The input array.
type : {1, 2, 3}, optional
Type of the DST (see Notes). Default type is 2.
n : int, optional
Length of the transform. If ``n < x.shape[axis]``, `x` is
truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the idst is computed; the default is over the
last axis (i.e., ``axis=-1``).
norm : {None, 'ortho'}, optional
Normalization mode (see Notes). Default is None.
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
idst : ndarray of real
The transformed input array.
See Also
--------
dst : Forward DST
Notes
-----
'The' IDST is the IDST of type 2, which is the same as DST of type 3.
IDST of type 1 is the DST of type 1, IDST of type 2 is the DST of type
3, and IDST of type 3 is the DST of type 2. For the definition of these
types, see `dst`.
.. versionadded:: 0.11.0
"""
if type == 1 and norm is not None:
raise NotImplementedError(
"Orthonormalization not yet supported for IDCT-I")
# Inverse/forward type table
_TP = {1:1, 2:3, 3:2}
return _dst(x, _TP[type], n, axis, normalize=norm, overwrite_x=overwrite_x)
def _get_dst_fun(type, dtype):
try:
name = {'float64':'ddst%d', 'float32':'dst%d'}[dtype.name]
except KeyError:
raise ValueError("dtype %s not supported" % dtype)
try:
f = getattr(_fftpack, name % type)
except AttributeError as e:
raise ValueError(str(e) + ". Type %d not understood" % type)
return f
def _dst(x, type, n=None, axis=-1, overwrite_x=False, normalize=None):
"""
Return Discrete Sine Transform of arbitrary type sequence x.
Parameters
----------
x : array_like
input array.
n : int, optional
Length of the transform.
axis : int, optional
Axis along which the dst is computed. (default=-1)
overwrite_x : bool, optional
If True the contents of x can be destroyed. (default=False)
Returns
-------
z : real ndarray
"""
x0, n, copy_made = __fix_shape(x, n, axis, 'DST')
if type == 1 and n < 2:
raise ValueError("DST-I is not defined for size < 2")
overwrite_x = overwrite_x or copy_made
nm = _get_norm_mode(normalize)
if np.iscomplexobj(x0):
return (_raw_dst(x0.real, type, n, axis, nm, overwrite_x) + 1j *
_raw_dst(x0.imag, type, n, axis, nm, overwrite_x))
else:
return _raw_dst(x0, type, n, axis, nm, overwrite_x)
| bsd-3-clause |
iffy/eliot | benchmarks/logwriter.py | 1 | 1041 | """
A benchmark for eliot.logwriter.
"""
import tempfile
import time
from twisted.internet.task import react
from twisted.python.filepath import FilePath
from eliot.logwriter import ThreadedFileWriter
LENGTH = 100
MESSAGES = 100000
def main(reactor):
print "Message size: %d bytes Num messages: %d" % (LENGTH, MESSAGES)
message = b"a" * LENGTH
fp = FilePath(tempfile.mktemp())
writer = ThreadedFileWriter(fp.open("ab"), reactor)
writer.startService()
start = time.time()
for i in range(MESSAGES):
writer(message)
d = writer.stopService()
def done(_):
elapsed = time.time() - start
kbSec = (LENGTH * MESSAGES) / (elapsed * 1024)
messagesSec = MESSAGES / elapsed
print "messages/sec: %s KB/sec: %s" % (messagesSec, kbSec)
d.addCallback(done)
def cleanup(result):
fp.restat()
print
print "File size: ", fp.getsize()
fp.remove()
d.addBoth(cleanup)
return d
if __name__ == '__main__':
react(main, [])
| apache-2.0 |
iniverno/RnR-LLC | simics-3.0-install/simics-3.0.31/amd64-linux/lib/python2.4/UserDict.py | 14 | 5572 | """A more or less complete user-defined wrapper around dictionary objects."""
class UserDict:
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __repr__(self): return repr(self.data)
def __cmp__(self, dict):
if isinstance(dict, UserDict):
return cmp(self.data, dict.data)
else:
return cmp(self.data, dict)
def __len__(self): return len(self.data)
def __getitem__(self, key): return self.data[key]
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def clear(self): self.data.clear()
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
def keys(self): return self.data.keys()
def items(self): return self.data.items()
def iteritems(self): return self.data.iteritems()
def iterkeys(self): return self.data.iterkeys()
def itervalues(self): return self.data.itervalues()
def values(self): return self.data.values()
def has_key(self, key): return self.data.has_key(key)
def update(self, dict=None, **kwargs):
if dict is None:
pass
elif isinstance(dict, UserDict):
self.data.update(dict.data)
elif isinstance(dict, type({})) or not hasattr(dict, 'items'):
self.data.update(dict)
else:
for k, v in dict.items():
self[k] = v
if len(kwargs):
self.data.update(kwargs)
def get(self, key, failobj=None):
if not self.has_key(key):
return failobj
return self[key]
def setdefault(self, key, failobj=None):
if not self.has_key(key):
self[key] = failobj
return self[key]
def pop(self, key, *args):
return self.data.pop(key, *args)
def popitem(self):
return self.data.popitem()
def __contains__(self, key):
return key in self.data
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
fromkeys = classmethod(fromkeys)
class IterableUserDict(UserDict):
def __iter__(self):
return iter(self.data)
class DictMixin:
# Mixin defining all dictionary methods for classes that already have
# a minimum dictionary interface including getitem, setitem, delitem,
# and keys. Without knowledge of the subclass constructor, the mixin
# does not define __init__() or copy(). In addition to the four base
# methods, progressively more efficiency comes with defining
# __contains__(), __iter__(), and iteritems().
# second level definitions support higher levels
def __iter__(self):
for k in self.keys():
yield k
def has_key(self, key):
try:
value = self[key]
except KeyError:
return False
return True
def __contains__(self, key):
return self.has_key(key)
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def clear(self):
for key in self.keys():
del self[key]
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError, "pop expected at most 2 arguments, got "\
+ repr(1 + len(args))
try:
value = self[key]
except KeyError:
if args:
return args[0]
raise
del self[key]
return value
def popitem(self):
try:
k, v = self.iteritems().next()
except StopIteration:
raise KeyError, 'container is empty'
del self[k]
return (k, v)
def update(self, other=None, **kwargs):
# Make progressively weaker assumptions about "other"
if other is None:
pass
elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups
for k, v in other.iteritems():
self[k] = v
elif hasattr(other, 'keys'):
for k in other.keys():
self[k] = other[k]
else:
for k, v in other:
self[k] = v
if kwargs:
self.update(kwargs)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.iteritems()))
def __cmp__(self, other):
if other is None:
return 1
if isinstance(other, DictMixin):
other = dict(other.iteritems())
return cmp(dict(self.iteritems()), other)
def __len__(self):
return len(self.keys())
| gpl-2.0 |
edevil/django | tests/utils_tests/test_autoreload.py | 42 | 3564 | from importlib import import_module
import os
import tempfile
from django import conf
from django.contrib import admin
from django.test import TestCase, override_settings
from django.utils.autoreload import gen_filenames
from django.utils._os import upath
LOCALE_PATH = os.path.join(os.path.dirname(__file__), 'locale')
class TestFilenameGenerator(TestCase):
def setUp(self):
# Empty cached variables
from django.utils import autoreload
autoreload._cached_modules = set()
autoreload._cached_filenames = []
def test_django_locales(self):
"""
Test that gen_filenames() also yields the built-in django locale files.
"""
filenames = list(gen_filenames())
self.assertIn(os.path.join(os.path.dirname(conf.__file__), 'locale',
'nl', 'LC_MESSAGES', 'django.mo'),
filenames)
@override_settings(LOCALE_PATHS=(LOCALE_PATH,))
def test_locale_paths_setting(self):
"""
Test that gen_filenames also yields from LOCALE_PATHS locales.
"""
filenames = list(gen_filenames())
self.assertIn(os.path.join(LOCALE_PATH, 'nl', 'LC_MESSAGES', 'django.mo'),
filenames)
@override_settings(INSTALLED_APPS=[])
def test_project_root_locale(self):
"""
Test that gen_filenames also yields from the current directory (project
root).
"""
old_cwd = os.getcwd()
os.chdir(os.path.dirname(__file__))
try:
filenames = list(gen_filenames())
self.assertIn(
os.path.join(LOCALE_PATH, 'nl', 'LC_MESSAGES', 'django.mo'),
filenames)
finally:
os.chdir(old_cwd)
@override_settings(INSTALLED_APPS=['django.contrib.admin'])
def test_app_locales(self):
"""
Test that gen_filenames also yields from locale dirs in installed apps.
"""
filenames = list(gen_filenames())
self.assertIn(os.path.join(os.path.dirname(admin.__file__), 'locale',
'nl', 'LC_MESSAGES', 'django.mo'),
filenames)
@override_settings(USE_I18N=False)
def test_no_i18n(self):
"""
If i18n machinery is disabled, there is no need for watching the
locale files.
"""
filenames = list(gen_filenames())
self.assertNotIn(
os.path.join(os.path.dirname(conf.__file__), 'locale', 'nl',
'LC_MESSAGES', 'django.mo'),
filenames)
def test_only_new_files(self):
"""
When calling a second time gen_filenames with only_new = True, only
files from newly loaded modules should be given.
"""
list(gen_filenames())
from fractions import Fraction # NOQA
filenames2 = list(gen_filenames(only_new=True))
self.assertEqual(len(filenames2), 1)
self.assertTrue(filenames2[0].endswith('fractions.py'))
self.assertFalse(any(f.endswith('.pyc') for f in gen_filenames()))
def test_deleted_removed(self):
fd, filepath = tempfile.mkstemp(dir=os.path.dirname(upath(__file__)), suffix='.py')
try:
_, filename = os.path.split(filepath)
import_module('.%s' % filename.replace('.py', ''), package='utils_tests')
self.assertIn(filepath, gen_filenames())
finally:
os.close(fd)
os.remove(filepath)
self.assertNotIn(filepath, gen_filenames())
| bsd-3-clause |
kogone/android_kernel_oneplus_msm8974 | tools/perf/python/twatch.py | 7370 | 1334 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
ssvsergeyev/ZenPacks.zenoss.AWS | src/boto/tests/integration/glacier/test_layer2.py | 136 | 2025 | # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import time
from tests.unit import unittest
from boto.glacier.layer2 import Layer1, Layer2
class TestGlacierLayer2(unittest.TestCase):
glacier = True
def setUp(self):
self.layer2 = Layer2()
self.vault_name = 'testvault%s' % int(time.time())
def test_create_delete_vault(self):
vault = self.layer2.create_vault(self.vault_name)
retrieved_vault = self.layer2.get_vault(self.vault_name)
self.layer2.delete_vault(self.vault_name)
self.assertEqual(vault.name, retrieved_vault.name)
self.assertEqual(vault.arn, retrieved_vault.arn)
self.assertEqual(vault.creation_date, retrieved_vault.creation_date)
self.assertEqual(vault.last_inventory_date,
retrieved_vault.last_inventory_date)
self.assertEqual(vault.number_of_archives,
retrieved_vault.number_of_archives)
| gpl-2.0 |
dagwieers/ansible | lib/ansible/plugins/connection/napalm.py | 24 | 6630 | # (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
author: Ansible Networking Team
connection: napalm
short_description: Provides persistent connection using NAPALM
description:
- This connection plugin provides connectivity to network devices using
the NAPALM network device abstraction library. This library requires
certain features to be enabled on network devices depending on the
destination device operating system. The connection plugin requires
C(napalm) to be installed locally on the Ansible controller.
version_added: "2.8"
requirements:
- napalm
options:
host:
description:
- Specifies the remote device FQDN or IP address to establish the SSH
connection to.
default: inventory_hostname
vars:
- name: ansible_host
port:
type: int
description:
- Specifies the port on the remote device that listens for connections
when establishing the SSH connection.
default: 22
ini:
- section: defaults
key: remote_port
env:
- name: ANSIBLE_REMOTE_PORT
vars:
- name: ansible_port
network_os:
description:
- Configures the device platform network operating system. This value is
used to load a napalm device abstraction.
vars:
- name: ansible_network_os
remote_user:
description:
- The username used to authenticate to the remote device when the SSH
connection is first established. If the remote_user is not specified,
the connection will use the username of the logged in user.
- Can be configured from the CLI via the C(--user) or C(-u) options.
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
vars:
- name: ansible_user
password:
description:
- Configures the user password used to authenticate to the remote device
when first establishing the SSH connection.
vars:
- name: ansible_password
- name: ansible_ssh_pass
- name: ansible_ssh_password
private_key_file:
description:
- The private SSH key or certificate file used to authenticate to the
remote device when first establishing the SSH connection.
ini:
- section: defaults
key: private_key_file
env:
- name: ANSIBLE_PRIVATE_KEY_FILE
vars:
- name: ansible_private_key_file
timeout:
type: int
description:
- Sets the connection time, in seconds, for communicating with the
remote device. This timeout is used as the default timeout value for
commands when issuing a command to the network CLI. If the command
does not return in timeout seconds, an error is generated.
default: 120
host_key_auto_add:
type: boolean
description:
- By default, Ansible will prompt the user before adding SSH keys to the
known hosts file. By enabling this option, unknown host keys will
automatically be added to the known hosts file.
- Be sure to fully understand the security implications of enabling this
option on production systems as it could create a security vulnerability.
default: False
ini:
- section: paramiko_connection
key: host_key_auto_add
env:
- name: ANSIBLE_HOST_KEY_AUTO_ADD
persistent_connect_timeout:
type: int
description:
- Configures, in seconds, the amount of time to wait when trying to
initially establish a persistent connection. If this value expires
before the connection to the remote device is completed, the connection
will fail.
default: 30
ini:
- section: persistent_connection
key: connect_timeout
env:
- name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT
vars:
- name: ansible_connect_timeout
persistent_command_timeout:
type: int
description:
- Configures, in seconds, the amount of time to wait for a command to
return from the remote device. If this timer is exceeded before the
command returns, the connection plugin will raise an exception and
close.
default: 30
ini:
- section: persistent_connection
key: command_timeout
env:
- name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT
vars:
- name: ansible_command_timeout
"""
from ansible.errors import AnsibleConnectionFailure, AnsibleError
from ansible.plugins.connection import NetworkConnectionBase
try:
from napalm import get_network_driver
from napalm.base import ModuleImportError
HAS_NAPALM = True
except ImportError:
HAS_NAPALM = False
class Connection(NetworkConnectionBase):
"""Napalm connections"""
transport = 'napalm'
has_pipelining = False
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.napalm = None
def _connect(self):
if not HAS_NAPALM:
raise AnsibleError('The "napalm" python library is required to use the napalm connection type.\n')
super(Connection, self)._connect()
if not self.connected:
if not self._network_os:
raise AnsibleConnectionFailure(
'Unable to automatically determine host network os. Please '
'manually configure ansible_network_os value for this host'
)
self.queue_message('log', 'network_os is set to %s' % self._network_os)
try:
driver = get_network_driver(self._network_os)
except ModuleImportError:
raise AnsibleConnectionFailure('Failed to import napalm driver for {0}'.format(self._network_os))
host = self.get_option('host')
self.napalm = driver(
hostname=host,
username=self.get_option('remote_user'),
password=self.get_option('password'),
timeout=self.get_option('persistent_command_timeout'),
)
self.napalm.open()
self._sub_plugin = {'type': 'external', 'name': 'napalm', 'obj': self.napalm}
self.queue_message('vvvv', 'created napalm device for network_os %s' % self._network_os)
self._connected = True
def close(self):
if self.napalm:
self.napalm.close()
self.napalm = None
super(Connection, self).close()
| gpl-3.0 |
michalbe/servo | src/components/script/dom/bindings/codegen/parser/tests/test_interface.py | 134 | 5702 | import WebIDL
def WebIDLTest(parser, harness):
parser.parse("interface Foo { };")
results = parser.finish()
harness.ok(True, "Empty interface parsed without error.")
harness.check(len(results), 1, "Should be one production")
harness.ok(isinstance(results[0], WebIDL.IDLInterface),
"Should be an IDLInterface")
iface = results[0]
harness.check(iface.identifier.QName(), "::Foo", "Interface has the right QName")
harness.check(iface.identifier.name, "Foo", "Interface has the right name")
harness.check(iface.parent, None, "Interface has no parent")
parser.parse("interface Bar : Foo { };")
results = parser.finish()
harness.ok(True, "Empty interface parsed without error.")
harness.check(len(results), 2, "Should be two productions")
harness.ok(isinstance(results[1], WebIDL.IDLInterface),
"Should be an IDLInterface")
iface = results[1]
harness.check(iface.identifier.QName(), "::Bar", "Interface has the right QName")
harness.check(iface.identifier.name, "Bar", "Interface has the right name")
harness.ok(isinstance(iface.parent, WebIDL.IDLInterface),
"Interface has a parent")
parser = parser.reset()
parser.parse("""
interface QNameBase {
attribute long foo;
};
interface QNameDerived : QNameBase {
attribute long long foo;
attribute byte bar;
};
""")
results = parser.finish()
harness.check(len(results), 2, "Should be two productions")
harness.ok(isinstance(results[0], WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.ok(isinstance(results[1], WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(results[1].parent, results[0], "Inheritance chain is right")
harness.check(len(results[0].members), 1, "Expect 1 productions")
harness.check(len(results[1].members), 2, "Expect 2 productions")
base = results[0]
derived = results[1]
harness.check(base.members[0].identifier.QName(), "::QNameBase::foo",
"Member has the right QName")
harness.check(derived.members[0].identifier.QName(), "::QNameDerived::foo",
"Member has the right QName")
harness.check(derived.members[1].identifier.QName(), "::QNameDerived::bar",
"Member has the right QName")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface A : B {};
interface B : A {};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should not allow cycles in interface inheritance chains")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface A : C {};
interface C : B {};
interface B : A {};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should not allow indirect cycles in interface inheritance chains")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface A {};
interface B {};
A implements B;
B implements A;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should not allow cycles via implements")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface A {};
interface C {};
interface B {};
A implements C;
C implements B;
B implements A;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should not allow indirect cycles via implements")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface A : B {};
interface B {};
B implements A;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should not allow inheriting from an interface that implements us")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface A : B {};
interface B {};
interface C {};
B implements C;
C implements A;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should not allow inheriting from an interface that indirectly implements us")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface A : B {};
interface B : C {};
interface C {};
C implements A;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should not allow indirectly inheriting from an interface that implements us")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface A : B {};
interface B : C {};
interface C {};
interface D {};
C implements D;
D implements A;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should not allow indirectly inheriting from an interface that indirectly implements us")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface A;
interface B : A {};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should not allow inheriting from an interface that is only forward declared")
| mpl-2.0 |
sestrella/ansible | lib/ansible/modules/net_tools/hetzner_failover_ip.py | 30 | 3706 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2019 Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: hetzner_failover_ip
version_added: "2.9"
short_description: Manage Hetzner's failover IPs
author:
- Felix Fontein (@felixfontein)
description:
- Manage Hetzner's failover IPs.
seealso:
- name: Failover IP documentation
description: Hetzner's documentation on failover IPs.
link: https://wiki.hetzner.de/index.php/Failover/en
- module: hetzner_failover_ip_info
description: Retrieve information on failover IPs.
extends_documentation_fragment:
- hetzner
options:
failover_ip:
description: The failover IP address.
type: str
required: yes
state:
description:
- Defines whether the IP will be routed or not.
- If set to C(routed), I(value) must be specified.
type: str
choices:
- routed
- unrouted
default: routed
value:
description:
- The new value for the failover IP address.
- Required when setting I(state) to C(routed).
type: str
timeout:
description:
- Timeout to use when routing or unrouting the failover IP.
- Note that the API call returns when the failover IP has been
successfully routed to the new address, respectively successfully
unrouted.
type: int
default: 180
'''
EXAMPLES = r'''
- name: Set value of failover IP 1.2.3.4 to 5.6.7.8
hetzner_failover_ip:
hetzner_user: foo
hetzner_password: bar
failover_ip: 1.2.3.4
value: 5.6.7.8
- name: Set value of failover IP 1.2.3.4 to unrouted
hetzner_failover_ip:
hetzner_user: foo
hetzner_password: bar
failover_ip: 1.2.3.4
state: unrouted
'''
RETURN = r'''
value:
description:
- The value of the failover IP.
- Will be C(none) if the IP is unrouted.
returned: success
type: str
state:
description:
- Will be C(routed) or C(unrouted).
returned: success
type: str
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.hetzner import (
HETZNER_DEFAULT_ARGUMENT_SPEC,
get_failover,
set_failover,
get_failover_state,
)
def main():
argument_spec = dict(
failover_ip=dict(type='str', required=True),
state=dict(type='str', default='routed', choices=['routed', 'unrouted']),
value=dict(type='str'),
timeout=dict(type='int', default=180),
)
argument_spec.update(HETZNER_DEFAULT_ARGUMENT_SPEC)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=(
('state', 'routed', ['value']),
),
)
failover_ip = module.params['failover_ip']
value = get_failover(module, failover_ip)
changed = False
before = get_failover_state(value)
if module.params['state'] == 'routed':
new_value = module.params['value']
else:
new_value = None
if value != new_value:
if module.check_mode:
value = new_value
changed = True
else:
value, changed = set_failover(module, failover_ip, new_value, timeout=module.params['timeout'])
after = get_failover_state(value)
module.exit_json(
changed=changed,
diff=dict(
before=before,
after=after,
),
**after
)
if __name__ == '__main__':
main()
| gpl-3.0 |
kennym/itools | test/test_ical.py | 1 | 36237 | # -*- coding: UTF-8 -*-
# Copyright (C) 2005-2008 Juan David Ibáñez Palomar <jdavid@itaapy.com>
# Copyright (C) 2006-2007 Nicolas Deram <nicolas@itaapy.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import from the Standard Library
from cStringIO import StringIO
from datetime import datetime
from unittest import TestCase, main
# Import from itools
from itools.csv import Property
from itools.csv.table import encode_param_value
from itools.datatypes import String
from itools.ical import iCalendar, icalendarTable
# Example with 1 event
content = """
BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Mozilla.org/NONSGML Mozilla Calendar V1.0//EN
METHOD:PUBLISH
BEGIN:VEVENT
UID:581361a0-1dd2-11b2-9a42-bd3958eeac9a
SUMMARY:Résumé
DESCRIPTION:all all all
LOCATION:France
STATUS:TENTATIVE
CLASS:PRIVATE
X-MOZILLA-RECUR-DEFAULT-INTERVAL:0
DTSTART;VALUE="DATE":20050530
DTEND;VALUE=DATE:20050531
DTSTAMP:20050601T074604Z
ATTENDEE;RSVP=TRUE;MEMBER="mailto:DEV-GROUP@host2.com":mailto:jdoe@itaapy.com
ATTENDEE;MEMBER="mailto:DEV-GROUP@host2.com":mailto:jsmith@itaapy.com
PRIORITY:1
SEQUENCE:0
END:VEVENT
END:VCALENDAR
"""
# Example with 2 events
content2 = """
BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Mozilla.org/NONSGML Mozilla Calendar V1.0//EN
METHOD:PUBLISH
BEGIN:VEVENT
UID:581361a0-1dd2-11b2-9a42-bd3958eeac9a
SUMMARY:Refound
DESCRIPTION:all all all
LOCATION:France
STATUS:TENTATIVE
CLASS:PRIVATE
X-MOZILLA-RECUR-DEFAULT-INTERVAL:0
DTSTART;VALUE="DATE":20050530T000000
DTEND;VALUE=DATE:20050531T235959.999999
DTSTAMP:20050601T074604Z
ATTENDEE;RSVP=TRUE;MEMBER="mailto:DEV-GROUP@host2.com":mailto:jdoe@itaapy.com
PRIORITY:1
SEQUENCE:0
END:VEVENT
BEGIN:VEVENT
UID:581361a0-1dd2-11b2-9a42-bd3958eeac9b
SUMMARY:222222222
DTSTART;VALUE="DATE":20050701
DTEND;VALUE=DATE:20050701
ATTENDEE;RSVP=TRUE;MEMBER="mailto:DEV-GROUP@host2.com":mailto:jdoe@itaapy.com
PRIORITY:2
SEQUENCE:0
END:VEVENT
END:VCALENDAR
"""
def property_to_string(prop_name, prop):
"""Method only used by test_load and test_load2.
"""
value, params = prop.value, ''
for p_name in prop.parameters:
p_value = prop.parameters[p_name]
p_value = [ encode_param_value(p_name, x, String) for x in p_value ]
param = ';%s=%s' % (p_name, ','.join(p_value))
params = params + param
return u'%s%s:%s' % (prop_name, params, value)
class icalTestCase(TestCase):
def setUp(self):
self.cal1 = iCalendar(string=content)
self.cal2 = iCalendar(string=content2)
def test_new(self):
cal = iCalendar()
properties = []
for name in cal.properties:
params = cal.properties[name].parameters
value = cal.properties[name].value
property = '%s;%s:%s' % (name, params, value)
properties.append(property)
# Test properties
expected_properties = [
u'VERSION;{}:2.0',
u'PRODID;{}:-//itaapy.com/NONSGML ikaaro icalendar V1.0//EN']
self.assertEqual(properties, expected_properties)
# Test components
self.assertEqual(len(cal.get_components()), 0)
self.assertEqual(cal.get_components('VEVENT'), [])
def test_property(self):
"""Test to create, access and encode a property with or without
parameters.
"""
# Property without parameter
expected = ['SUMMARY:This is the summary\n']
property_value = Property('This is the summary')
output = self.cal1.encode_property('SUMMARY', property_value)
self.assertEqual(output, expected)
# Property with one parameter
expected = ['ATTENDEE;MEMBER="mailto:DEV-GROUP@host.com":'
'mailto:darwin@itaapy.com\n']
member = 'mailto:DEV-GROUP@host.com'
value = Property('mailto:darwin@itaapy.com', MEMBER=[member])
output = self.cal1.encode_property('ATTENDEE', value)
self.assertEqual(output, expected)
def test_get_property_values(self):
cal = self.cal1
# icalendar property
expected = '2.0'
property = cal.get_property_values('VERSION')
self.assertEqual(property.value, expected)
# Component property
events = cal.get_components('VEVENT')
properties = events[0].get_version()
expected = u'Résumé'
property = events[0].get_property_values('SUMMARY')
self.assertEqual(property.value, expected)
expected = 1
property = events[0].get_property_values('PRIORITY')
self.assertEqual(property.value, expected)
# Component properties
properties = {}
properties['MYADD'] = Property(u'Résumé à crêtes')
value = Property(u'Property added by calling add_property')
properties['DESCRIPTION'] = value
member = '"mailto:DEV-GROUP@host2.com"'
value = Property('mailto:darwin@itaapy.com', MEMBER=[member])
properties['ATTENDEE'] = value
uid = cal.add_component('VEVENT', **properties)
event = cal.get_component_by_uid(uid)
properties = event.get_property_values()
self.assertEqual('MYADD' in properties, True)
self.assertEqual('DESCRIPTION' in properties, True)
self.assertEqual('ATTENDEE' in properties, True)
self.assertEqual('VERSION' in properties, False)
def test_add_to_calendar(self):
"""Test to add property and component to an empty icalendar object.
"""
cal = iCalendar()
cal.add_component('VEVENT')
self.assertEqual(len(cal.get_components('VEVENT')), 1)
value = Property('PUBLISH')
cal.set_property('METHOD', value)
self.assertEqual(cal.get_property_values('METHOD'), value)
def test_load(self):
"""Test loading a simple calendar.
"""
cal = self.cal1
# Test icalendar properties
properties = []
for name in cal.properties:
property_value = cal.properties[name]
# Only property METHOD can occur several times, we give only one
if isinstance(property_value, list):
property_value = property_value[0]
params = property_value.parameters
value = property_value.value
property = '%s;%s:%s' % (name, params, value)
properties.append(property)
expected_properties = [
u'VERSION;{}:2.0',
u'METHOD;{}:PUBLISH',
u'PRODID;{}:-//Mozilla.org/NONSGML Mozilla Calendar V1.0//EN' ]
self.assertEqual(properties, expected_properties)
# Test component properties
properties = []
event = cal.get_components('VEVENT')[0]
version = event.get_version()
for prop_name in version:
datatype = cal.get_record_datatype(prop_name)
if datatype.multiple is False:
prop = version[prop_name]
property = property_to_string(prop_name, prop)
properties.append(property)
else:
for prop in version[prop_name]:
property = property_to_string(prop_name, prop)
properties.append(property)
expected_event_properties = [
u'STATUS:TENTATIVE',
u'DTSTAMP:2005-06-01 07:46:04',
u'DESCRIPTION:all all all',
u'ATTENDEE;MEMBER="mailto:DEV-GROUP@host2.com"'
';RSVP=TRUE:mailto:jdoe@itaapy.com',
u'ATTENDEE;MEMBER="mailto:DEV-GROUP@host2.com"'
':mailto:jsmith@itaapy.com',
u'SUMMARY:Résumé',
u'PRIORITY:1',
u'LOCATION:France',
u'X-MOZILLA-RECUR-DEFAULT-INTERVAL:0',
u'DTEND;VALUE=DATE:2005-05-31 00:00:00',
u'DTSTART;VALUE=DATE:2005-05-30 00:00:00',
u'CLASS:PRIVATE']
self.assertEqual(event.uid, '581361a0-1dd2-11b2-9a42-bd3958eeac9a')
self.assertEqual(properties, expected_event_properties)
self.assertEqual(len(cal.get_components('VEVENT')), 1)
# Test journals
self.assertEqual(len(cal.get_components('VJOURNAL')), 0)
# Test todos
self.assertEqual(len(cal.get_components('TODO')), 0)
# Test freebusys
self.assertEqual(len(cal.get_components('FREEBUSY')), 0)
# Test timezones
self.assertEqual(len(cal.get_components('TIMEZONE')), 0)
# Test others
self.assertEqual(len(cal.get_components('others')), 0)
def test_load_2(self):
"""Test loading a 2 events calendar.
"""
cal = self.cal2
properties = []
for name in cal.properties:
params = cal.properties[name].parameters
value = cal.properties[name].value
property = '%s;%s:%s' % (name, params, value)
properties.append(property)
# Test properties
expected_properties = [
u'VERSION;{}:2.0',
u'METHOD;{}:PUBLISH',
u'PRODID;{}:-//Mozilla.org/NONSGML Mozilla Calendar V1.0//EN' ]
self.assertEqual(properties, expected_properties)
events = []
for event in cal.get_components('VEVENT'):
version = event.get_version()
properties = []
for prop_name in version:
if prop_name == 'DTSTAMP':
continue
datatype = cal.get_record_datatype(prop_name)
if datatype.multiple is False:
prop = version[prop_name]
property = property_to_string(prop_name, prop)
properties.append(property)
else:
for prop in version[prop_name]:
property = property_to_string(prop_name, prop)
properties.append(property)
events.append(properties)
# Test events
expected_events = [[
u'STATUS:TENTATIVE',
u'DESCRIPTION:all all all',
u'ATTENDEE;MEMBER="mailto:DEV-GROUP@host2.com"'
';RSVP=TRUE:mailto:jdoe@itaapy.com',
u'SUMMARY:Refound',
u'PRIORITY:1',
u'LOCATION:France',
u'X-MOZILLA-RECUR-DEFAULT-INTERVAL:0',
u'DTEND;VALUE=DATE:2005-05-31 23:59:59.999999',
u'DTSTART;VALUE=DATE:2005-05-30 00:00:00',
u'CLASS:PRIVATE'],
[
u'ATTENDEE;MEMBER="mailto:DEV-GROUP@host2.com";RSVP=TRUE'\
':mailto:jdoe@itaapy.com',
u'SUMMARY:222222222',
u'PRIORITY:2',
u'DTEND;VALUE=DATE:2005-07-01 00:00:00',
u'DTSTART;VALUE=DATE:2005-07-01 00:00:00'
]]
self.assertEqual(events, expected_events)
self.assertEqual(len(cal.get_components('VEVENT')), 2)
# Test journals
self.assertEqual(len(cal.get_components('VJOURNAL')), 0)
# Test todos
self.assertEqual(len(cal.get_components('TODO')), 0)
# Test freebusys
self.assertEqual(len(cal.get_components('FREEBUSY')), 0)
# Test timezones
self.assertEqual(len(cal.get_components('TIMEZONE')), 0)
# Test others
self.assertEqual(len(cal.get_components('others')), 0)
# Just call to_str method
def test_to_str(self):
"""Call to_str method.
"""
cal = self.cal2
cal.to_str()
def test_add_property(self):
"""Test adding a property to any component.
"""
cal = self.cal2
event = cal.get_components('VEVENT')[1]
# other property (MYADD)
name, value = 'MYADD', Property(u'Résumé à crêtes')
cal.update_component(event.uid, **{name: value})
property = event.get_property_values(name)
self.assertEqual(property[0], value)
self.assertEqual(property[0].value, value.value)
# property DESCRIPTION
name = 'DESCRIPTION'
value = Property(u'Property added by calling add_property')
cal.update_component(event.uid, **{name: value})
property = event.get_property_values(name)
self.assertEqual(property, value)
# property ATTENDEE
name = 'ATTENDEE'
value = event.get_property_values(name)
member = '"mailto:DEV-GROUP@host2.com"'
value.append(Property('mailto:darwin@itaapy.com', MEMBER=[member]))
cal.update_component(event.uid, **{name: value})
property = event.get_property_values(name)
self.assertEqual(str(property[0].value), 'mailto:jdoe@itaapy.com')
self.assertEqual(property[1].parameters, {'MEMBER': [member]})
self.assertEqual(property[1], value[1])
def test_icalendar_set_property(self):
"""Test setting a new value to an existant icalendar property.
"""
cal = self.cal1
name, value = 'VERSION', Property('2.1')
cal.set_property(name, value)
self.assertEqual(cal.get_property_values(name), value)
cal.set_property(name, [value, ])
self.assertEqual(cal.get_property_values(name), value)
def test_component_set_property(self):
"""Test setting a new value to an existant component property.
"""
cal = self.cal1
event = cal.get_components('VEVENT')[0]
name, value = 'SUMMARY', Property('This is a new summary')
cal.update_component(event.uid, **{name: value})
self.assertEqual(event.get_property_values(name), value)
member = '"mailto:DEV-GROUP@host2.com"'
value = [
Property('mailto:darwin@itaapy.com', MEMBER=[member]),
Property('mailto:jdoe@itaapy.com'),
Property('mailto:jsmith@itaapy.com')]
cal.update_component(event.uid, ATTENDEE=value)
self.assertEqual(event.get_property_values('ATTENDEE'), value)
def test_search_events(self):
"""Test get events filtered by arguments given.
"""
# Test with 1 event
cal = self.cal1
attendee_value = 'mailto:jdoe@itaapy.com'
events = cal.search_events(ATTENDEE=attendee_value)
self.assertEqual(len(events), 1)
events = cal.search_events(STATUS='CONFIRMED')
self.assertEqual(events, [])
events = cal.search_events(STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events(ATTENDEE=attendee_value, STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events(STATUS='TENTATIVE', PRIORITY=1)
self.assertEqual(len(events), 1)
events = cal.search_events(
ATTENDEE=[attendee_value, 'mailto:jsmith@itaapy.com'],
STATUS='TENTATIVE',
PRIORITY=1)
self.assertEqual(len(events), 1)
# Tests with 2 events
cal = self.cal2
attendee_value = 'mailto:jdoe@itaapy.com'
events = cal.search_events(ATTENDEE=attendee_value)
self.assertEqual(len(events), 2)
events = cal.search_events(STATUS='CONFIRMED')
self.assertEqual(events, [])
events = cal.search_events(STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events(ATTENDEE=attendee_value, STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events(STATUS='TENTATIVE', PRIORITY=1)
self.assertEqual(len(events), 1)
events = cal.search_events(
ATTENDEE=[attendee_value, 'mailto:jsmith@itaapy.com'],
STATUS='TENTATIVE',
PRIORITY=1)
self.assertEqual(len(events), 1)
def test_search_events_in_date(self):
"""Test search events by date.
"""
cal = self.cal1
date = datetime(2005, 5, 29)
events = cal.search_events_in_date(date)
self.assertEqual(len(events), 0)
self.assertEqual(cal.has_event_in_date(date), False)
date = datetime(2005, 5, 30)
events = cal.search_events_in_date(date)
self.assertEqual(len(events), 1)
self.assertEqual(cal.has_event_in_date(date), True)
events = cal.search_events_in_date(date, STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events_in_date(date, STATUS='CONFIRMED')
self.assertEqual(len(events), 0)
attendee_value = 'mailto:jdoe@itaapy.com'
events = cal.search_events_in_date(date, ATTENDEE=attendee_value)
self.assertEqual(len(events), 1)
events = cal.search_events_in_date(date, ATTENDEE=attendee_value,
STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events_in_date(date, ATTENDEE=attendee_value,
STATUS='CONFIRMED')
self.assertEqual(len(events), 0)
date = datetime(2005, 7, 30)
events = cal.search_events_in_date(date)
self.assertEqual(len(events), 0)
self.assertEqual(cal.has_event_in_date(date), False)
def test_search_events_in_range(self):
"""Test search events matching given dates range.
"""
cal = self.cal2
dtstart = datetime(2005, 1, 1)
dtend = datetime(2005, 1, 1, 20, 0)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 0)
dtstart = datetime(2005, 5, 28)
dtend = datetime(2005, 5, 30, 0, 50)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 1)
dtstart = datetime(2005, 5, 29)
dtend = datetime(2005, 5, 30, 0, 1)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 1)
dtstart = datetime(2005, 5, 30, 23, 59, 59)
dtend = datetime(2005, 5, 31, 0, 0)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 1)
dtstart = datetime(2005, 5, 1)
dtend = datetime(2005, 8, 1)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 2)
dtstart = datetime(2005, 5, 30, 23)
dtend = datetime(2005, 6, 1)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 1)
dtstart = datetime(2005, 5, 31, 0, 0, 1)
dtend = datetime(2005, 6, 1)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 1)
events = cal.search_events_in_range(dtstart, dtend, STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events_in_range(dtstart, dtend, STATUS='CONFIRMED')
self.assertEqual(len(events), 0)
attendee_value = 'mailto:jdoe@itaapy.com'
events = cal.search_events_in_range(dtstart, dtend,
ATTENDEE=attendee_value)
self.assertEqual(len(events), 1)
events = cal.search_events_in_range(dtstart, dtend,
ATTENDEE=attendee_value, STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events_in_range(dtstart, dtend,
ATTENDEE=attendee_value, STATUS='CONFIRMED')
self.assertEqual(len(events), 0)
def test_get_conflicts(self):
"""Test get_conflicts method which returns uid couples of events
conflicting on a given date.
"""
cal = self.cal2
date = datetime(2005, 05, 30)
conflicts = cal.get_conflicts(date)
self.assertEqual(conflicts, None)
# Set a conflict
uid1 = '581361a0-1dd2-11b2-9a42-bd3958eeac9a'
uid2 = '581361a0-1dd2-11b2-9a42-bd3958eeac9b'
cal.update_component(uid2, DTSTART=Property(datetime(2005, 05, 30)),
DTEND=Property(datetime(2005, 05, 31)))
conflicts = cal.get_conflicts(date)
self.assertEqual(conflicts, [(uid1, uid2)])
class icalTableTestCase(TestCase):
def setUp(self):
src = iCalendar(string=content)
src = StringIO(src.to_str())
cal = icalendarTable()
cal.load_state_from_ical_file(src)
self.cal1 = cal
src = iCalendar(string=content2)
src = StringIO(src.to_str())
cal = icalendarTable()
cal.load_state_from_ical_file(src)
self.cal2 = cal
def test_new(self):
cal = icalendarTable()
# Test components
self.assertEqual(len(cal.get_components()), 0)
self.assertEqual(cal.get_components('VEVENT'), [])
def test_property(self):
"""Test to create, access and encode a property with or without
parameters.
"""
# Property without parameter
expected = ['SUMMARY:This is the summary\n']
property_value = Property('This is the summary')
output = self.cal1.encode_property('SUMMARY', property_value)
self.assertEqual(output, expected)
# Property with one parameter
expected = ['ATTENDEE;MEMBER="mailto:DEV-GROUP@host.com":'
'mailto:darwin@itaapy.com\n']
member = 'mailto:DEV-GROUP@host.com'
value = Property('mailto:darwin@itaapy.com', MEMBER=[member])
output = self.cal1.encode_property('ATTENDEE', value)
self.assertEqual(output, expected)
def test_get_property(self):
cal = self.cal1
# Component property
events = cal.get_components('VEVENT')
properties = events[0][-1]
expected = u'Résumé'
property = events[0].get_property('SUMMARY')
self.assertEqual(property.value, expected)
expected = 1
property = events[0].get_property('PRIORITY')
self.assertEqual(property.value, expected)
# Component properties
properties = {}
properties['MYADD'] = Property(u'Résumé à crêtes')
value = Property(u'Property added by calling add_property')
properties['DESCRIPTION'] = value
member = '"mailto:DEV-GROUP@host2.com"'
value = Property('mailto:darwin@itaapy.com', MEMBER=[member])
properties['ATTENDEE'] = value
properties['type'] = 'VEVENT'
uid = cal.add_record(properties).UID
event = cal.get_component_by_uid(uid)[0]
properties = event.get_property()
self.assertEqual('MYADD' in properties, True)
self.assertEqual('DESCRIPTION' in properties, True)
self.assertEqual('ATTENDEE' in properties, True)
self.assertEqual('VERSION' in properties, False)
def test_add_to_calendar(self):
"""Test to add property and component to an empty icalendar object.
"""
cal = icalendarTable()
cal.add_record({'type': 'VEVENT'})
self.assertEqual(len(cal.get_components('VEVENT')), 1)
def test_load(self):
"""Test loading a simple calendar.
"""
cal = self.cal1
# Test component properties
properties = []
event = cal.get_components('VEVENT')[0]
version = event[-1]
for prop_name in version:
if prop_name in ('ts', 'id', 'type', 'UID', 'SEQUENCE'):
continue
datatype = cal.get_record_datatype(prop_name)
if getattr(datatype, 'multiple', False) is False:
prop = version[prop_name]
property = property_to_string(prop_name, prop)
properties.append(property)
else:
for prop in version[prop_name]:
property = property_to_string(prop_name, prop)
properties.append(property)
expected_event_properties = [
u'STATUS:TENTATIVE',
u'DTSTAMP:2005-06-01 07:46:04',
u'DESCRIPTION:all all all',
u'ATTENDEE;MEMBER="mailto:DEV-GROUP@host2.com"'
';RSVP=TRUE:mailto:jdoe@itaapy.com',
u'ATTENDEE;MEMBER="mailto:DEV-GROUP@host2.com"'
':mailto:jsmith@itaapy.com',
u'SUMMARY:Résumé',
u'PRIORITY:1',
u'LOCATION:France',
u'X-MOZILLA-RECUR-DEFAULT-INTERVAL:0',
u'DTEND;VALUE=DATE:2005-05-31 00:00:00',
u'DTSTART;VALUE=DATE:2005-05-30 00:00:00',
u'CLASS:PRIVATE']
self.assertEqual(event.UID, '581361a0-1dd2-11b2-9a42-bd3958eeac9a')
self.assertEqual(properties, expected_event_properties)
self.assertEqual(len(cal.get_components('VEVENT')), 1)
# Test journals
self.assertEqual(len(cal.get_components('VJOURNAL')), 0)
# Test todos
self.assertEqual(len(cal.get_components('TODO')), 0)
# Test freebusys
self.assertEqual(len(cal.get_components('FREEBUSY')), 0)
# Test timezones
self.assertEqual(len(cal.get_components('TIMEZONE')), 0)
# Test others
self.assertEqual(len(cal.get_components('others')), 0)
def test_load_2(self):
"""Test loading a 2 events calendar.
"""
cal = self.cal2
events = []
for event in cal.get_components('VEVENT'):
version = event[-1]
properties = []
for prop_name in version:
if prop_name in ('ts', 'id', 'type', 'UID', 'SEQUENCE'):
continue
if prop_name == 'DTSTAMP':
continue
datatype = cal.get_record_datatype(prop_name)
if getattr(datatype, 'multiple', False) is False:
prop = version[prop_name]
property = property_to_string(prop_name, prop)
properties.append(property)
else:
for prop in version[prop_name]:
property = property_to_string(prop_name, prop)
properties.append(property)
events.append(properties)
# Test events
expected_events = [[
u'ATTENDEE;MEMBER="mailto:DEV-GROUP@host2.com";RSVP=TRUE'\
':mailto:jdoe@itaapy.com',
u'SUMMARY:222222222',
u'PRIORITY:2',
u'DTEND;VALUE=DATE:2005-07-01 00:00:00',
u'DTSTART;VALUE=DATE:2005-07-01 00:00:00'
],
[
u'STATUS:TENTATIVE',
u'DESCRIPTION:all all all',
u'ATTENDEE;MEMBER="mailto:DEV-GROUP@host2.com"'
';RSVP=TRUE:mailto:jdoe@itaapy.com',
u'SUMMARY:Refound',
u'PRIORITY:1',
u'LOCATION:France',
u'X-MOZILLA-RECUR-DEFAULT-INTERVAL:0',
u'DTEND;VALUE=DATE:2005-05-31 23:59:59.999999',
u'DTSTART;VALUE=DATE:2005-05-30 00:00:00',
u'CLASS:PRIVATE']
]
self.assertEqual(events, expected_events)
self.assertEqual(len(cal.get_components('VEVENT')), 2)
# Test journals
self.assertEqual(len(cal.get_components('VJOURNAL')), 0)
# Test todos
self.assertEqual(len(cal.get_components('TODO')), 0)
# Test freebusys
self.assertEqual(len(cal.get_components('FREEBUSY')), 0)
# Test timezones
self.assertEqual(len(cal.get_components('TIMEZONE')), 0)
# Test others
self.assertEqual(len(cal.get_components('others')), 0)
# Just call to_ical method
def test_to_ical(self):
"""Call to_ical method.
"""
cal = self.cal2
cal.to_ical()
def test_add_property(self):
"""Test adding a property to any component.
"""
cal = self.cal2
event = cal.get_components('VEVENT')[1]
# other property (MYADD)
name, value = 'MYADD', Property(u'Résumé à crêtes')
cal.update_record(event.id, **{name: value})
property = event.get_property(name)
self.assertEqual(property[0], value)
self.assertEqual(property[0].value, value.value)
# property DESCRIPTION
name = 'DESCRIPTION'
value = Property(u'Property added by calling add_property')
cal.update_record(event.id, **{name: value})
property = event.get_property(name)
self.assertEqual(property, value)
# property ATTENDEE
name = 'ATTENDEE'
value = event.get_property(name)
member = '"mailto:DEV-GROUP@host2.com"'
value.append(Property('mailto:darwin@itaapy.com', MEMBER=[member]))
cal.update_record(event.id, **{name: value})
property = event.get_property(name)
self.assertEqual(str(property[0].value), 'mailto:jdoe@itaapy.com')
self.assertEqual(property[1].parameters, {'MEMBER': [member]})
self.assertEqual(property[1], value[1])
def test_component_set_property(self):
"""Test setting a new value to an existant component property.
"""
cal = self.cal1
event = cal.get_components('VEVENT')[0]
name, value = 'SUMMARY', Property('This is a new summary')
cal.update_record(event.id, **{name: value})
self.assertEqual(event.get_property(name), value)
member = '"mailto:DEV-GROUP@host2.com"'
value = [
Property('mailto:darwin@itaapy.com', MEMBER=[member]),
Property('mailto:jdoe@itaapy.com'),
Property('mailto:jsmith@itaapy.com')]
cal.update_record(event.id, ATTENDEE=value)
self.assertEqual(event.get_property('ATTENDEE'), value)
def test_search_events(self):
"""Test get events filtered by arguments given.
"""
cal = self.cal1
# Test with 1 event
attendee_value = 'mailto:jdoe@itaapy.com'
events = cal.search_events(ATTENDEE=attendee_value)
self.assertEqual(len(events), 1)
events = cal.search_events(STATUS='CONFIRMED')
self.assertEqual(events, [])
events = cal.search_events(STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events(ATTENDEE=attendee_value, STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events(STATUS='TENTATIVE', PRIORITY=1)
self.assertEqual(len(events), 1)
events = cal.search_events(
ATTENDEE=[attendee_value, 'mailto:jsmith@itaapy.com'],
STATUS='TENTATIVE',
PRIORITY=1)
self.assertEqual(len(events), 1)
# Tests with 2 events
cal = iCalendar(string=content2)
attendee_value = 'mailto:jdoe@itaapy.com'
events = cal.search_events(ATTENDEE=attendee_value)
self.assertEqual(len(events), 2)
events = cal.search_events(STATUS='CONFIRMED')
self.assertEqual(events, [])
events = cal.search_events(STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events(ATTENDEE=attendee_value, STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events(STATUS='TENTATIVE', PRIORITY=1)
self.assertEqual(len(events), 1)
events = cal.search_events(
ATTENDEE=[attendee_value, 'mailto:jsmith@itaapy.com'],
STATUS='TENTATIVE',
PRIORITY=1)
self.assertEqual(len(events), 1)
def test_search_events_in_date(self):
"""Test search events by date.
"""
cal = self.cal1
date = datetime(2005, 5, 29)
events = cal.search_events_in_date(date)
self.assertEqual(len(events), 0)
self.assertEqual(cal.has_event_in_date(date), False)
date = datetime(2005, 5, 30)
events = cal.search_events_in_date(date)
self.assertEqual(len(events), 1)
self.assertEqual(cal.has_event_in_date(date), True)
events = cal.search_events_in_date(date, STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events_in_date(date, STATUS='CONFIRMED')
self.assertEqual(len(events), 0)
attendee_value = 'mailto:jdoe@itaapy.com'
events = cal.search_events_in_date(date, ATTENDEE=attendee_value)
self.assertEqual(len(events), 1)
events = cal.search_events_in_date(date, ATTENDEE=attendee_value,
STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events_in_date(date, ATTENDEE=attendee_value,
STATUS='CONFIRMED')
self.assertEqual(len(events), 0)
date = datetime(2005, 7, 30)
events = cal.search_events_in_date(date)
self.assertEqual(len(events), 0)
self.assertEqual(cal.has_event_in_date(date), False)
def test_search_events_in_range(self):
"""Test search events matching given dates range.
"""
cal = self.cal2
dtstart = datetime(2005, 1, 1)
dtend = datetime(2005, 1, 1, 20, 0)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 0)
dtstart = datetime(2005, 5, 28)
dtend = datetime(2005, 5, 30, 0, 50)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 1)
dtstart = datetime(2005, 5, 29)
dtend = datetime(2005, 5, 30, 0, 1)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 1)
dtstart = datetime(2005, 5, 30, 23, 59, 59)
dtend = datetime(2005, 5, 31, 0, 0)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 1)
dtstart = datetime(2005, 5, 1)
dtend = datetime(2005, 8, 1)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 2)
dtstart = datetime(2005, 5, 30, 23)
dtend = datetime(2005, 6, 1)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 1)
dtstart = datetime(2005, 5, 31, 0, 0, 1)
dtend = datetime(2005, 6, 1)
events = cal.search_events_in_range(dtstart, dtend)
self.assertEqual(len(events), 1)
events = cal.search_events_in_range(dtstart, dtend, STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events_in_range(dtstart, dtend, STATUS='CONFIRMED')
self.assertEqual(len(events), 0)
attendee_value = 'mailto:jdoe@itaapy.com'
events = cal.search_events_in_range(dtstart, dtend,
ATTENDEE=attendee_value)
self.assertEqual(len(events), 1)
events = cal.search_events_in_range(dtstart, dtend,
ATTENDEE=attendee_value, STATUS='TENTATIVE')
self.assertEqual(len(events), 1)
events = cal.search_events_in_range(dtstart, dtend,
ATTENDEE=attendee_value, STATUS='CONFIRMED')
self.assertEqual(len(events), 0)
def test_get_conflicts(self):
"""Test get_conflicts method which returns uid couples of events
conflicting on a given date.
"""
cal = self.cal2
date = datetime(2005, 05, 30)
conflicts = cal.get_conflicts(date)
self.assertEqual(conflicts, None)
# Set a conflict
uid1 = 0
uid2 = 1
cal.update_record(uid1, DTSTART=Property(datetime(2005, 05, 30)),
DTEND=Property(datetime(2005, 05, 31)))
conflicts = cal.get_conflicts(date)
self.assertEqual(conflicts, [(uid1, uid2)])
if __name__ == '__main__':
main()
| gpl-3.0 |
rhertzog/django | django/db/backends/postgresql/client.py | 67 | 2126 | import os
import subprocess
from django.core.files.temp import NamedTemporaryFile
from django.db.backends.base.client import BaseDatabaseClient
from django.utils.six import print_
def _escape_pgpass(txt):
"""
Escape a fragment of a PostgreSQL .pgpass file.
"""
return txt.replace('\\', '\\\\').replace(':', '\\:')
class DatabaseClient(BaseDatabaseClient):
executable_name = 'psql'
@classmethod
def runshell_db(cls, conn_params):
args = [cls.executable_name]
host = conn_params.get('host', '')
port = conn_params.get('port', '')
dbname = conn_params.get('database', '')
user = conn_params.get('user', '')
passwd = conn_params.get('password', '')
if user:
args += ['-U', user]
if host:
args += ['-h', host]
if port:
args += ['-p', str(port)]
args += [dbname]
temp_pgpass = None
try:
if passwd:
# Create temporary .pgpass file.
temp_pgpass = NamedTemporaryFile(mode='w+')
try:
print_(
_escape_pgpass(host) or '*',
str(port) or '*',
_escape_pgpass(dbname) or '*',
_escape_pgpass(user) or '*',
_escape_pgpass(passwd),
file=temp_pgpass,
sep=':',
flush=True,
)
os.environ['PGPASSFILE'] = temp_pgpass.name
except UnicodeEncodeError:
# If the current locale can't encode the data, we let
# the user input the password manually.
pass
subprocess.check_call(args)
finally:
if temp_pgpass:
temp_pgpass.close()
if 'PGPASSFILE' in os.environ: # unit tests need cleanup
del os.environ['PGPASSFILE']
def runshell(self):
DatabaseClient.runshell_db(self.connection.get_connection_params())
| bsd-3-clause |
ijuma/kafka | tests/kafkatest/benchmarks/core/benchmark_test.py | 16 | 14178 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.mark import matrix
from ducktape.mark import parametrize
from ducktape.mark.resource import cluster
from ducktape.services.service import Service
from ducktape.tests.test import Test
from kafkatest.services.kafka import KafkaService
from kafkatest.services.performance import ProducerPerformanceService, EndToEndLatencyService, ConsumerPerformanceService, throughput, latency, compute_aggregate_throughput
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.version import DEV_BRANCH, KafkaVersion
TOPIC_REP_ONE = "topic-replication-factor-one"
TOPIC_REP_THREE = "topic-replication-factor-three"
DEFAULT_RECORD_SIZE = 100 # bytes
class Benchmark(Test):
"""A benchmark of Kafka producer/consumer performance. This replicates the test
run here:
https://engineering.linkedin.com/kafka/benchmarking-apache-kafka-2-million-writes-second-three-cheap-machines
"""
def __init__(self, test_context):
super(Benchmark, self).__init__(test_context)
self.num_zk = 1
self.num_brokers = 3
self.topics = {
TOPIC_REP_ONE: {'partitions': 6, 'replication-factor': 1},
TOPIC_REP_THREE: {'partitions': 6, 'replication-factor': 3}
}
self.zk = ZookeeperService(test_context, self.num_zk)
self.msgs_large = 10000000
self.batch_size = 8*1024
self.buffer_memory = 64*1024*1024
self.msg_sizes = [10, 100, 1000, 10000, 100000]
self.target_data_size = 128*1024*1024
self.target_data_size_gb = self.target_data_size/float(1024*1024*1024)
def setUp(self):
self.zk.start()
def start_kafka(self, security_protocol, interbroker_security_protocol, version):
self.kafka = KafkaService(
self.test_context, self.num_brokers,
self.zk, security_protocol=security_protocol,
interbroker_security_protocol=interbroker_security_protocol, topics=self.topics,
version=version)
self.kafka.log_level = "INFO" # We don't DEBUG logging here
self.kafka.start()
@cluster(num_nodes=5)
@parametrize(acks=1, topic=TOPIC_REP_ONE)
@parametrize(acks=1, topic=TOPIC_REP_THREE)
@parametrize(acks=-1, topic=TOPIC_REP_THREE)
@matrix(acks=[1], topic=[TOPIC_REP_THREE], message_size=[10, 100, 1000, 10000, 100000], compression_type=["none", "snappy"], security_protocol=['PLAINTEXT', 'SSL'])
@cluster(num_nodes=7)
@parametrize(acks=1, topic=TOPIC_REP_THREE, num_producers=3)
def test_producer_throughput(self, acks, topic, num_producers=1, message_size=DEFAULT_RECORD_SIZE,
compression_type="none", security_protocol='PLAINTEXT', client_version=str(DEV_BRANCH),
broker_version=str(DEV_BRANCH)):
"""
Setup: 1 node zk + 3 node kafka cluster
Produce ~128MB worth of messages to a topic with 6 partitions. Required acks, topic replication factor,
security protocol and message size are varied depending on arguments injected into this test.
Collect and return aggregate throughput statistics after all messages have been acknowledged.
(This runs ProducerPerformance.java under the hood)
"""
client_version = KafkaVersion(client_version)
broker_version = KafkaVersion(broker_version)
self.validate_versions(client_version, broker_version)
self.start_kafka(security_protocol, security_protocol, broker_version)
# Always generate the same total amount of data
nrecords = int(self.target_data_size / message_size)
self.producer = ProducerPerformanceService(
self.test_context, num_producers, self.kafka, topic=topic,
num_records=nrecords, record_size=message_size, throughput=-1, version=client_version,
settings={
'acks': acks,
'compression.type': compression_type,
'batch.size': self.batch_size,
'buffer.memory': self.buffer_memory})
self.producer.run()
return compute_aggregate_throughput(self.producer)
@cluster(num_nodes=5)
@parametrize(security_protocol='SSL', interbroker_security_protocol='PLAINTEXT')
@matrix(security_protocol=['PLAINTEXT', 'SSL'], compression_type=["none", "snappy"])
def test_long_term_producer_throughput(self, compression_type="none", security_protocol='PLAINTEXT',
interbroker_security_protocol=None, client_version=str(DEV_BRANCH),
broker_version=str(DEV_BRANCH)):
"""
Setup: 1 node zk + 3 node kafka cluster
Produce 10e6 100 byte messages to a topic with 6 partitions, replication-factor 3, and acks=1.
Collect and return aggregate throughput statistics after all messages have been acknowledged.
(This runs ProducerPerformance.java under the hood)
"""
client_version = KafkaVersion(client_version)
broker_version = KafkaVersion(broker_version)
self.validate_versions(client_version, broker_version)
if interbroker_security_protocol is None:
interbroker_security_protocol = security_protocol
self.start_kafka(security_protocol, interbroker_security_protocol, broker_version)
self.producer = ProducerPerformanceService(
self.test_context, 1, self.kafka,
topic=TOPIC_REP_THREE, num_records=self.msgs_large, record_size=DEFAULT_RECORD_SIZE,
throughput=-1, version=client_version, settings={
'acks': 1,
'compression.type': compression_type,
'batch.size': self.batch_size,
'buffer.memory': self.buffer_memory
},
intermediate_stats=True
)
self.producer.run()
summary = ["Throughput over long run, data > memory:"]
data = {}
# FIXME we should be generating a graph too
# Try to break it into 5 blocks, but fall back to a smaller number if
# there aren't even 5 elements
block_size = max(len(self.producer.stats[0]) / 5, 1)
nblocks = len(self.producer.stats[0]) / block_size
for i in range(nblocks):
subset = self.producer.stats[0][i*block_size:min((i+1)*block_size, len(self.producer.stats[0]))]
if len(subset) == 0:
summary.append(" Time block %d: (empty)" % i)
data[i] = None
else:
records_per_sec = sum([stat['records_per_sec'] for stat in subset])/float(len(subset))
mb_per_sec = sum([stat['mbps'] for stat in subset])/float(len(subset))
summary.append(" Time block %d: %f rec/sec (%f MB/s)" % (i, records_per_sec, mb_per_sec))
data[i] = throughput(records_per_sec, mb_per_sec)
self.logger.info("\n".join(summary))
return data
@cluster(num_nodes=5)
@parametrize(security_protocol='SSL', interbroker_security_protocol='PLAINTEXT')
@matrix(security_protocol=['PLAINTEXT', 'SSL'], compression_type=["none", "snappy"])
@cluster(num_nodes=6)
@matrix(security_protocol=['SASL_PLAINTEXT', 'SASL_SSL'], compression_type=["none", "snappy"])
def test_end_to_end_latency(self, compression_type="none", security_protocol="PLAINTEXT",
interbroker_security_protocol=None, client_version=str(DEV_BRANCH),
broker_version=str(DEV_BRANCH)):
"""
Setup: 1 node zk + 3 node kafka cluster
Produce (acks = 1) and consume 10e3 messages to a topic with 6 partitions and replication-factor 3,
measuring the latency between production and consumption of each message.
Return aggregate latency statistics.
(Under the hood, this simply runs EndToEndLatency.scala)
"""
client_version = KafkaVersion(client_version)
broker_version = KafkaVersion(broker_version)
self.validate_versions(client_version, broker_version)
if interbroker_security_protocol is None:
interbroker_security_protocol = security_protocol
self.start_kafka(security_protocol, interbroker_security_protocol, broker_version)
self.logger.info("BENCHMARK: End to end latency")
self.perf = EndToEndLatencyService(
self.test_context, 1, self.kafka,
topic=TOPIC_REP_THREE, num_records=10000,
compression_type=compression_type, version=client_version
)
self.perf.run()
return latency(self.perf.results[0]['latency_50th_ms'], self.perf.results[0]['latency_99th_ms'], self.perf.results[0]['latency_999th_ms'])
@cluster(num_nodes=6)
@parametrize(security_protocol='PLAINTEXT', new_consumer=False)
@parametrize(security_protocol='SSL', interbroker_security_protocol='PLAINTEXT')
@matrix(security_protocol=['PLAINTEXT', 'SSL'], compression_type=["none", "snappy"])
def test_producer_and_consumer(self, compression_type="none", security_protocol="PLAINTEXT",
interbroker_security_protocol=None, new_consumer=True,
client_version=str(DEV_BRANCH), broker_version=str(DEV_BRANCH)):
"""
Setup: 1 node zk + 3 node kafka cluster
Concurrently produce and consume 10e6 messages with a single producer and a single consumer,
using new consumer if new_consumer == True
Return aggregate throughput statistics for both producer and consumer.
(Under the hood, this runs ProducerPerformance.java, and ConsumerPerformance.scala)
"""
client_version = KafkaVersion(client_version)
broker_version = KafkaVersion(broker_version)
self.validate_versions(client_version, broker_version)
if interbroker_security_protocol is None:
interbroker_security_protocol = security_protocol
self.start_kafka(security_protocol, interbroker_security_protocol, broker_version)
num_records = 10 * 1000 * 1000 # 10e6
self.producer = ProducerPerformanceService(
self.test_context, 1, self.kafka,
topic=TOPIC_REP_THREE,
num_records=num_records, record_size=DEFAULT_RECORD_SIZE, throughput=-1, version=client_version,
settings={
'acks': 1,
'compression.type': compression_type,
'batch.size': self.batch_size,
'buffer.memory': self.buffer_memory
}
)
self.consumer = ConsumerPerformanceService(
self.test_context, 1, self.kafka, topic=TOPIC_REP_THREE, new_consumer=new_consumer, messages=num_records)
Service.run_parallel(self.producer, self.consumer)
data = {
"producer": compute_aggregate_throughput(self.producer),
"consumer": compute_aggregate_throughput(self.consumer)
}
summary = [
"Producer + consumer:",
str(data)]
self.logger.info("\n".join(summary))
return data
@cluster(num_nodes=6)
@parametrize(security_protocol='PLAINTEXT', new_consumer=False)
@parametrize(security_protocol='SSL', interbroker_security_protocol='PLAINTEXT')
@matrix(security_protocol=['PLAINTEXT', 'SSL'], compression_type=["none", "snappy"])
def test_consumer_throughput(self, compression_type="none", security_protocol="PLAINTEXT",
interbroker_security_protocol=None, new_consumer=True, num_consumers=1,
client_version=str(DEV_BRANCH), broker_version=str(DEV_BRANCH)):
"""
Consume 10e6 100-byte messages with 1 or more consumers from a topic with 6 partitions
(using new consumer iff new_consumer == True), and report throughput.
"""
client_version = KafkaVersion(client_version)
broker_version = KafkaVersion(broker_version)
self.validate_versions(client_version, broker_version)
if interbroker_security_protocol is None:
interbroker_security_protocol = security_protocol
self.start_kafka(security_protocol, interbroker_security_protocol, broker_version)
num_records = 10 * 1000 * 1000 # 10e6
# seed kafka w/messages
self.producer = ProducerPerformanceService(
self.test_context, 1, self.kafka,
topic=TOPIC_REP_THREE,
num_records=num_records, record_size=DEFAULT_RECORD_SIZE, throughput=-1, version=client_version,
settings={
'acks': 1,
'compression.type': compression_type,
'batch.size': self.batch_size,
'buffer.memory': self.buffer_memory
}
)
self.producer.run()
# consume
self.consumer = ConsumerPerformanceService(
self.test_context, num_consumers, self.kafka,
topic=TOPIC_REP_THREE, new_consumer=new_consumer, messages=num_records)
self.consumer.group = "test-consumer-group"
self.consumer.run()
return compute_aggregate_throughput(self.consumer)
def validate_versions(self, client_version, broker_version):
assert client_version <= broker_version, "Client version %s should be <= than broker version %s" (client_version, broker_version)
| apache-2.0 |
hirofumi0810/tensorflow_end2end_speech_recognition | utils/dataset/xe.py | 1 | 5444 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Base class for loading dataset for the frame-wise model.
In this class, all data will be loaded at each step.
You can use the multi-GPU version.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from utils.dataset.base import Base
class DatasetBase(Base):
def __init__(self, *args, **kwargs):
super(DatasetBase, self).__init__(*args, **kwargs)
def __getitem__(self, index):
input_i = np.array(self.input_paths[index])
label_i = np.array(self.label_paths[index])
return (input_i, label_i)
def __len__(self):
if self.data_type == 'train':
return 18088388
elif self.data_type == 'dev_clean':
return 968057
elif self.data_type == 'dev_other':
return 919980
def __next__(self, batch_size=None):
"""Generate each mini-batch.
Args:
batch_size (int, optional): the size of mini-batch
Returns:
A tuple of `(inputs, labels, inputs_seq_len, labels_seq_len, input_names)`
inputs: list of input data of size
`[num_gpu, B, input_size]`
labels: list of target labels of size
`[num_gpu, B, num_classes]`
input_names: list of file name of input data of size
`[num_gpu, B]`
is_new_epoch (bool): If true, 1 epoch is finished
"""
if self.max_epoch is not None and self.epoch >= self.max_epoch:
raise StopIteration
# NOTE: max_epoch = None means infinite loop
if batch_size is None:
batch_size = self.batch_size
# reset
if self.is_new_epoch:
self.is_new_epoch = False
# Load the first block at each epoch
if self.iteration == 0 or self.is_new_epoch:
# Randomly sample block
block_index = random.sample(list(self.rest_block), 1)
self.rest_block -= set(block_index)
# Load block
self.inputs_block = np.array(list(
map(lambda path: np.load(path),
self.input_paths[block_index])))
# NOTE: `[1, num_frames_per_block, input_dim]`
self.inputs_block = self.inputs_block.reshape(
-1, self.inputs_block.shape[-1])
self.labels_block = np.array(list(
map(lambda path: np.load(path),
self.label_paths[block_index])))
# NOTE: `[1, num_frames_per_block, num_classes]`
self.labels_block = self.labels_block.reshape(
-1, self.labels_block.shape[-1])
self.rest_frames = set(range(0, len(self.inputs_block), 1))
# Load block if needed
if len(self.rest_frames) < batch_size and len(self.rest_block) != 0:
# Randomly sample block
if len(self.rest_block) > 1:
block_index = random.sample(list(self.rest_block), 1)
else:
# Last block in each epoch
block_index = list(self.rest_block)
self.rest_block -= set(block_index)
# tmp
rest_inputs_pre_block = self.inputs_block[list(self.rest_frames)]
rest_labels_pre_block = self.labels_block[list(self.rest_frames)]
self.inputs_block = np.array(list(
map(lambda path: np.load(path),
self.input_paths[block_index]))).reshape(-1, self.inputs_block.shape[-1])
self.labels_block = np.array(list(
map(lambda path: np.load(path),
self.label_paths[block_index]))).reshape(-1, self.labels_block.shape[-1])
# Concatenate
self.inputs_block = np.concatenate(
(rest_inputs_pre_block, self.inputs_block), axis=0)
self.labels_block = np.concatenate(
(rest_labels_pre_block, self.labels_block), axis=0)
self.rest_frames = set(range(0, len(self.inputs_block), 1))
# Randomly sample frames
if len(self.rest_frames) > batch_size:
frame_indices = random.sample(
list(self.rest_frames), batch_size)
else:
# Last mini-batch in each block
frame_indices = list(self.rest_frames)
# Shuffle selected mini-batch
random.shuffle(frame_indices)
self.rest_frames -= set(frame_indices)
if len(self.rest_block) == 0 and len(self.rest_frames) == 0:
self.reset()
self.is_new_epoch = True
self.epoch += 1
self.rest_block = set(range(0, len(self.input_paths), 1))
# Set values of each data in mini-batch
inputs = self.inputs_block[frame_indices]
labels = self.labels_block[frame_indices]
###############
# Multi-GPUs
###############
if self.num_gpu > 1:
# Now we split the mini-batch data by num_gpu
inputs = np.array_split(inputs, self.num_gpu, axis=0)
labels = np.array_split(labels, self.num_gpu, axis=0)
else:
inputs = inputs[np.newaxis, :, :]
labels = labels[np.newaxis, :, :]
self.iteration += len(frame_indices)
return (inputs, labels), self.is_new_epoch
| mit |
wagnerjs/speakerfight | deck/migrations/0011_auto_20150825_1628.py | 14 | 1293 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('deck', '0010_create_activities_from_proposals'),
]
operations = [
migrations.RemoveField(
model_name='proposal',
name='author',
),
migrations.RemoveField(
model_name='proposal',
name='created_at',
),
migrations.RemoveField(
model_name='proposal',
name='description',
),
migrations.RemoveField(
model_name='proposal',
name='id',
),
migrations.RemoveField(
model_name='proposal',
name='is_published',
),
migrations.RemoveField(
model_name='proposal',
name='slug',
),
migrations.RemoveField(
model_name='proposal',
name='title',
),
migrations.AddField(
model_name='proposal',
name='activity_ptr',
field=models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, default=None, serialize=False, to='deck.Activity'),
preserve_default=False,
),
]
| mit |
sparkslabs/kamaelia_ | Sketches/PT/likefile/testlikefile.py | 3 | 8892 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from likefile import LikeFile, schedulerThread
from Axon.Component import component
from Axon.Ipc import producerFinished, shutdownMicroprocess
import unittest, random, Axon, threading, time
scheduler = schedulerThread(slowmo=0.001)
scheduler.start()
randlist = [random.random() for x in xrange(0, 10)]
class DyingShunt(component):
"""A component which passes all data through itself, and terminates on receipt of
shutdownMicroprocess() or producerFinished()"""
Inboxes = { "inbox" : "Input data",
"control" : "Control data",
"extrain" : "An additional nonstandard inbox",
}
Outboxes = { "outbox" : "Input data is echoed here",
"signal" : "Control data is echoed here",
"extraout" : "Extra data is echoed here",
}
def main(self):
while True:
yield 1
while self.dataReady("inbox"):
self.send(self.recv("inbox"), "outbox")
while self.dataReady("extrain"):
self.send(self.recv("extrain"), "extraout")
while self.dataReady("control"):
data = self.recv("control")
self.send(data, "signal")
if isinstance(data, producerFinished) or isinstance(data, shutdownMicroprocess):
return
class Dummy(component):
Inboxes = { "inbox" : "Input data",
"control" : "Control data",
"extraout" : "An additional nonstandard inbox",
}
Outboxes = { "outbox" : "Input data is echoed here",
"signal" : "Control data is echoed here",
"extrain" : "Extra data is echoed here",
}
def main(self):
while True:
yield 1
class Test_DyingShunt(unittest.TestCase):
"""A test for the test dummy component used to test likefile. If this test passes, the behaviour of DyingShunt is assumed to always work."""
def setUp(self):
self.oldRun = Axon.Scheduler.scheduler.run
self.scheduler = Axon.Scheduler.scheduler()
Axon.Scheduler.scheduler.run = self.scheduler
self.shunt = DyingShunt()
self.inSrc = Dummy()
self.inSrc.link((self.inSrc,"outbox"), (self.shunt,"inbox"))
self.inSrc.link((self.inSrc,"signal"), (self.shunt,"control"))
self.inSrc.link((self.inSrc,"extrain"), (self.shunt,"extrain"))
self.outDest = Dummy()
self.outDest.link((self.shunt,"outbox"), (self.outDest,"inbox"))
self.outDest.link((self.shunt,"signal"), (self.outDest,"control"))
self.outDest.link((self.shunt,"extraout"), (self.outDest,"extraout"))
self.run = self.scheduler.main()
self.shunt.activate()
def tearDown(self):
del self.run, self.shunt, Axon.Scheduler.scheduler.run
Axon.Scheduler.scheduler.run = self.oldRun
def runFor(self, iterations):
for i in xrange(0, iterations):
self.run.next()
def test_passthrough(self):
for i in randlist:
self.inSrc.send(i, "outbox")
self.inSrc.send(i + 1, "signal")
self.inSrc.send(i + 2, "extrain")
self.runFor(20) # shouldn't terminate
for i in randlist:
self.failUnless(self.outDest.recv("inbox") == i)
self.failUnless(self.outDest.recv("control") == i + 1)
self.failUnless(self.outDest.recv("extraout") == i + 2)
def test_shutdown1(self):
self.inSrc.send(shutdownMicroprocess(), "signal")
self.failUnlessRaises(StopIteration, self.runFor, iterations = 10)
self.failUnless(isinstance(self.outDest.recv("control"), shutdownMicroprocess)) # pass through the shutdown code
def test_shutdown2(self):
self.inSrc.send(producerFinished(), "signal")
self.failUnlessRaises(StopIteration, self.runFor, iterations = 10)
self.failUnless(isinstance(self.outDest.recv("control"), producerFinished)) # pass through the shutdown code
class test_LikeFile(unittest.TestCase):
def status(self):
print threading.activeCount(), len(Axon.Scheduler.scheduler.run.threads), Axon.Scheduler.scheduler.run.threads
def setUp(self):
self.numthreads = threading.activeCount()
self.numcomponents = len(Axon.Scheduler.scheduler.run.threads)
def tearDown(self):
# the small timeout is necessary, since the shutdown signal is sent before
# likefile has returned, and if we check immediately then it might not have died yet.
time.sleep(0.5)
self.failUnless(self.numcomponents == len(Axon.Scheduler.scheduler.run.threads))
self.failUnless(self.numthreads == threading.activeCount())
## make sure also that creating then killing a likefile doesn't leave any crufty extra threads or extra scheduler entries.
def test_nop(self):
"""Test that creating, activating, and deleting a wrapped component doesn't fail."""
self.component = LikeFile(DyingShunt())
self.component.activate()
time.sleep(0.25) # I think this might be a threading issue - the instant shutdown is not being processed.
self.component.shutdown()
del self.component
def testmany(self):
compdict = dict()
for i in xrange(1, 50): # test 100 concurrent likefiles.
compdict[i] = LikeFile(DyingShunt(), extraInboxes = "extrain", extraOutboxes = "extraout")
compdict[i].activate()
time.sleep(0.1)
for num, component in compdict.iteritems():
for i in randlist:
# i is a random integer between 0 and 1, so the following manipulations guarantee that each box on each
# component gets a different number, to eliminate crosstalk passing a test.
component.put(num + i, "inbox")
component.put(num + i % 0.5, "control")
component.put(num + i % 0.25, "extrain")
for num, component in compdict.iteritems():
for i in randlist:
self.failUnless(component.get("outbox") == num + i)
self.failUnless(component.get("signal") == num + i % 0.5)
self.failUnless(component.get("extraout") == num + i % 0.25)
for component in compdict.itervalues():
component.shutdown()
def test_aborted(self):
"""test that creating but not activating a likefile wrapper doesn't leave any cruft in the scheduler,
and that you can't perform IO on a pre-activated component."""
component = LikeFile(DyingShunt())
self.failUnlessRaises(AttributeError, component.get)
self.failUnlessRaises(AttributeError, component.put, "boo")
def test_badboxwrap(self):
"""test that wrapping a nonexistent box will fail."""
self.failUnlessRaises(KeyError, LikeFile, DyingShunt(), extraInboxes = "nonsenseaddbox")
self.failUnlessRaises(KeyError, LikeFile, DyingShunt(), extraOutboxes = "nonsenseaddbox")
def test_badboxuse(self):
"""test that IO on a box name that doesn't exist will fail."""
component = LikeFile(DyingShunt())
component.activate()
self.failUnlessRaises(KeyError, component.put, "boo", "nonsensesendbox")
self.failUnlessRaises(KeyError, component.get, "nonsensesendbox")
component.shutdown()
def test_closed(self):
"""test that creating, activating, and then closing a likefile wrapper will result in an object you're not
allowed to perform IO on."""
component = LikeFile(DyingShunt())
component.activate()
time.sleep(0.1)
component.shutdown()
time.sleep(0.1)
self.failUnlessRaises(AttributeError, component.get)
self.failUnlessRaises(AttributeError, component.put, "boo")
if __name__ == "__main__":
unittest.main()
import sys
sys.tracebacklimit = 0
# if the interpreter exits with active threads, this spams the console and pushes anything useful off the top of the page. | apache-2.0 |
schmatzler/zte-kernel-smartchat | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
cs591B1-Project/Social-Media-Impact-on-Stock-Market-and-Price | data/25 verizon/getDataAutoQuery.py | 1 | 3761 | import webhose;
import time;
from datetime import datetime, timedelta
from lxml import html
import requests
import unirest
webhose.config(token='c6052904-f312-436b-a6d8-d915084ac866')
days_back = 30
date_days_ago = datetime.now() - timedelta(days=days_back)
organization = 'verizon'
lang = 'english'
country = 'US'
#set API Token
apiToken = 'c6052904-f312-436b-a6d8-d915084ac866'
# Build URL
#queryURL = 'https://webhose.io/search?token=' + apiToken + '&format=json&q=' + sentiment + '%3A%22' + organization + '%22&ts=1478565932339'
### UPDATE YOUR END POINT HERE - Amazon Positive
response = unirest.get("https://webhose.io/search?token=c6052904-f312-436b-a6d8-d915084ac866&format=json&q=language%3A(english)%20thread.country%3AUS%20organization.positive%3A%22Verizon%22&ts=1478579908230",
headers={
"Accept": "text/plain"
}
)
count = 1
results = response.body["totalResults"]
while results > 0:
fileName = 'verizon_pos_' + str(count) + '.json'
out0 = open(fileName, 'w')
out0.truncate()
out0.write(response.raw_body)
out0.write("\n")
out0.close()
count = count + 1
print response.body["next"]
url = 'https://webhose.io' + response.body["next"]
response = unirest.get(url,
headers={
"Accept": "text/plain"
}
)
results = response.body["totalResults"]
## UPDATE YOUR END POINT HERE - Amazon Neutral
response = unirest.get("https://webhose.io/search?token=c6052904-f312-436b-a6d8-d915084ac866&format=json&q=language%3A(english)%20thread.country%3AUS%20organization.neutral%3A%22Verizon%22&ts=1478579995010",
headers={
"Accept": "text/plain"
}
)
count = 1
results = response.body["totalResults"]
while results > 0:
fileName = 'verizon_neu_' + str(count) + '.json'
out0 = open(fileName, 'w')
out0.truncate()
out0.write(response.raw_body)
out0.write("\n")
out0.close()
count = count + 1
print response.body["next"]
url = 'https://webhose.io' + response.body["next"]
response = unirest.get(url,
headers={
"Accept": "text/plain"
}
)
results = response.body["totalResults"]
## UPDATE YOUR END POINT HERE - Amazon Negative
response = unirest.get("https://webhose.io/search?token=c6052904-f312-436b-a6d8-d915084ac866&format=json&q=language%3A(english)%20thread.country%3AUS%20organization.negative%3A%22Verizon%22&ts=1478580006047",
headers={
"Accept": "text/plain"
}
)
count = 1
results = response.body["totalResults"]
while results > 0:
fileName = 'verizon_neg_' + str(count) + '.json'
out0 = open(fileName, 'w')
out0.truncate()
out0.write(response.raw_body)
out0.write("\n")
out0.close()
count = count + 1
print response.body["next"]
url = 'https://webhose.io' + response.body["next"]
response = unirest.get(url,
headers={
"Accept": "text/plain"
}
)
results = response.body["totalResults"]
'''
postiveData = webhose.search("organization.positive:\"" + topic +
"\" language:\"" + lang +
"\" thread.country:\"" + country +
"\" domain_rank:<100000", since=int(time.mktime(date_days_ago.timetuple())) )
negativeData = webhose.search("organization.negative:\"" + topic +
"\" language:\"" + lang +
"\" thread.country:\"" + country +
"\" format:\"" + "json" +
"\" domain_rank:<100000", since=int(time.mktime(date_days_ago.timetuple())) )
neutralData = webhose.search("organization.negative:\"" + topic +
"\" language:\"" + lang +
"\" thread.country:\"" + country +
"\" domain_rank:<100000", since=int(time.mktime(date_days_ago.timetuple())) )
page = requests.get('https://webhose.io/search?token=c6052904-f312-436b-a6d8-d915084ac866&format=json&q=organization.positive%3A%22Microsoft%22&ts=1478565802902')
#print page
#print page.content
#print negativeData.next
#tree = html.fromstring(page.content)
'''
| mit |
bbsan2k/nzbToMedia | core/synchronousdeluge/transfer.py | 4 | 1364 | # coding=utf-8
import zlib
import struct
import socket
import ssl
from core.synchronousdeluge import rencode
__all__ = ["DelugeTransfer"]
class DelugeTransfer(object):
def __init__(self):
self.sock = None
self.conn = None
self.connected = False
def connect(self, hostport):
if self.connected:
self.disconnect()
self.sock = socket.create_connection(hostport)
self.conn = ssl.wrap_socket(self.sock, None, None, False, ssl.CERT_NONE, ssl.PROTOCOL_TLSv1)
self.connected = True
def disconnect(self):
if self.conn:
self.conn.close()
self.connected = False
def send_request(self, request):
data = (request.format(),)
payload = zlib.compress(rencode.dumps(data))
self.conn.sendall(payload)
buf = b""
while True:
data = self.conn.recv(1024)
if not data:
self.connected = False
break
buf += data
dobj = zlib.decompressobj()
try:
message = rencode.loads(dobj.decompress(buf))
except (ValueError, zlib.error, struct.error):
# Probably incomplete data, read more
continue
else:
buf = dobj.unused_data
yield message
| gpl-3.0 |
njemak/syakago | vendor/psy/psysh/test/tools/vis.py | 710 | 3428 | """
vis.py
======
Ctypes based module to access libbsd's strvis & strunvis functions.
The `vis` function is the equivalent of strvis.
The `unvis` function is the equivalent of strunvis.
All functions accept unicode string as input and return a unicode string.
Constants:
----------
* to select alternate encoding format
`VIS_OCTAL`: use octal \ddd format
`VIS_CSTYLE`: use \[nrft0..] where appropiate
* to alter set of characters encoded
(default is to encode all non-graphic except space, tab, and newline).
`VIS_SP`: also encode space
`VIS_TAB`: also encode tab
`VIS_NL`: also encode newline
`VIS_WHITE`: same as (VIS_SP | VIS_TAB | VIS_NL)
`VIS_SAFE`: only encode "unsafe" characters
* other
`VIS_NOSLASH`: inhibit printing '\'
`VIS_HTTP1808`: http-style escape % hex hex
`VIS_HTTPSTYLE`: http-style escape % hex hex
`VIS_MIMESTYLE`: mime-style escape = HEX HEX
`VIS_HTTP1866`: http-style &#num; or &string;
`VIS_NOESCAPE`: don't decode `\'
`VIS_GLOB`: encode glob(3) magic characters
:Authors:
- ju1ius (http://github.com/ju1ius)
:Version: 1
:Date: 2014-01-05
"""
from ctypes import CDLL, c_char_p, c_int
from ctypes.util import find_library
__all__ = [
'vis', 'unvis',
'VIS_OCTAL', 'VIS_CSTYLE',
'VIS_SP', 'VIS_TAB', 'VIS_NL', 'VIS_WHITE', 'VIS_SAFE',
'VIS_NOSLASH', 'VIS_HTTP1808', 'VIS_HTTPSTYLE', 'VIS_MIMESTYLE',
'VIS_HTTP1866', 'VIS_NOESCAPE', 'VIS_GLOB'
]
#############################################################
# Constants from bsd/vis.h
#############################################################
#to select alternate encoding format
VIS_OCTAL = 0x0001
VIS_CSTYLE = 0x0002
# to alter set of characters encoded
# (default is to encode all non-graphic except space, tab, and newline).
VIS_SP = 0x0004
VIS_TAB = 0x0008
VIS_NL = 0x0010
VIS_WHITE = VIS_SP | VIS_TAB | VIS_NL
VIS_SAFE = 0x0020
# other
VIS_NOSLASH = 0x0040
VIS_HTTP1808 = 0x0080
VIS_HTTPSTYLE = 0x0080
VIS_MIMESTYLE = 0x0100
VIS_HTTP1866 = 0x0200
VIS_NOESCAPE = 0x0400
VIS_GLOB = 0x1000
#############################################################
# Import libbsd/vis functions
#############################################################
_libbsd = CDLL(find_library('bsd'))
_strvis = _libbsd.strvis
_strvis.argtypes = [c_char_p, c_char_p, c_int]
_strvis.restype = c_int
_strunvis = _libbsd.strunvis
_strvis.argtypes = [c_char_p, c_char_p]
_strvis.restype = c_int
def vis(src, flags=VIS_WHITE):
"""
Encodes the string `src` into libbsd's vis encoding.
`flags` must be one of the VIS_* constants
C definition:
int strvis(char *dst, char *src, int flags);
"""
src = bytes(src, 'utf-8')
dst_p = c_char_p(bytes(len(src) * 4))
src_p = c_char_p(src)
flags = c_int(flags)
bytes_written = _strvis(dst_p, src_p, flags)
if -1 == bytes_written:
raise RuntimeError('vis failed to encode string "{}"'.format(src))
return dst_p.value.decode('utf-8')
def unvis(src):
"""
Decodes a string encoded by vis.
C definition:
int strunvis(char *dst, char *src);
"""
src = bytes(src, 'utf-8')
dst_p = c_char_p(bytes(len(src)))
src_p = c_char_p(src)
bytes_written = _strunvis(dst_p, src_p)
if -1 == bytes_written:
raise RuntimeError('unvis failed to decode string "{}"'.format(src))
return dst_p.value.decode('utf-8')
| mit |
jumpserver/jumpserver | apps/perms/serializers/asset/permission.py | 1 | 5347 | # -*- coding: utf-8 -*-
#
from rest_framework import serializers
from django.utils.translation import ugettext_lazy as _
from django.db.models import Prefetch, Q
from orgs.mixins.serializers import BulkOrgResourceModelSerializer
from perms.models import AssetPermission, Action
from assets.models import Asset, Node, SystemUser
from users.models import User, UserGroup
__all__ = [
'AssetPermissionSerializer',
'ActionsField',
]
class ActionsField(serializers.MultipleChoiceField):
def __init__(self, *args, **kwargs):
kwargs['choices'] = Action.CHOICES
super().__init__(*args, **kwargs)
def to_representation(self, value):
return Action.value_to_choices(value)
def to_internal_value(self, data):
if data is None:
return data
return Action.choices_to_value(data)
class ActionsDisplayField(ActionsField):
def to_representation(self, value):
values = super().to_representation(value)
choices = dict(Action.CHOICES)
return [choices.get(i) for i in values]
class AssetPermissionSerializer(BulkOrgResourceModelSerializer):
actions = ActionsField(required=False, allow_null=True)
is_valid = serializers.BooleanField(read_only=True)
is_expired = serializers.BooleanField(read_only=True, label=_('Is expired'))
users_display = serializers.ListField(child=serializers.CharField(), label=_('Users name'), required=False)
user_groups_display = serializers.ListField(child=serializers.CharField(), label=_('User groups name'), required=False)
assets_display = serializers.ListField(child=serializers.CharField(), label=_('Assets name'), required=False)
nodes_display = serializers.ListField(child=serializers.CharField(), label=_('Nodes name'), required=False)
system_users_display = serializers.ListField(child=serializers.CharField(), label=_('System users name'), required=False)
class Meta:
model = AssetPermission
fields_mini = ['id', 'name']
fields_small = fields_mini + [
'is_active', 'is_expired', 'is_valid', 'actions',
'created_by', 'date_created', 'date_expired',
'date_start', 'comment'
]
fields_m2m = [
'users', 'users_display', 'user_groups', 'user_groups_display', 'assets', 'assets_display',
'nodes', 'nodes_display', 'system_users', 'system_users_display',
'users_amount', 'user_groups_amount', 'assets_amount',
'nodes_amount', 'system_users_amount',
]
fields = fields_small + fields_m2m
read_only_fields = ['created_by', 'date_created']
extra_kwargs = {
'is_expired': {'label': _('Is expired')},
'is_valid': {'label': _('Is valid')},
'actions': {'label': _('Actions')},
'users_amount': {'label': _('Users amount')},
'user_groups_amount': {'label': _('User groups amount')},
'assets_amount': {'label': _('Assets amount')},
'nodes_amount': {'label': _('Nodes amount')},
'system_users_amount': {'label': _('System users amount')},
}
@classmethod
def setup_eager_loading(cls, queryset):
""" Perform necessary eager loading of data. """
queryset = queryset.prefetch_related('users', 'user_groups', 'assets', 'nodes', 'system_users')
return queryset
def to_internal_value(self, data):
if 'system_users_display' in data:
# system_users_display 转化为 system_users
system_users = data.get('system_users', [])
system_users_display = data.pop('system_users_display')
for name in system_users_display:
system_user = SystemUser.objects.filter(name=name).first()
if system_user and system_user.id not in system_users:
system_users.append(system_user.id)
data['system_users'] = system_users
return super().to_internal_value(data)
def perform_display_create(self, instance, **kwargs):
# 用户
users_to_set = User.objects.filter(
Q(name__in=kwargs.get('users_display')) | Q(username__in=kwargs.get('users_display'))
).distinct()
instance.users.add(*users_to_set)
# 用户组
user_groups_to_set = UserGroup.objects.filter(name__in=kwargs.get('user_groups_display')).distinct()
instance.user_groups.add(*user_groups_to_set)
# 资产
assets_to_set = Asset.objects.filter(
Q(ip__in=kwargs.get('assets_display')) | Q(hostname__in=kwargs.get('assets_display'))
).distinct()
instance.assets.add(*assets_to_set)
# 节点
nodes_to_set = Node.objects.filter(full_value__in=kwargs.get('nodes_display')).distinct()
instance.nodes.add(*nodes_to_set)
def create(self, validated_data):
display = {
'users_display' : validated_data.pop('users_display', ''),
'user_groups_display' : validated_data.pop('user_groups_display', ''),
'assets_display' : validated_data.pop('assets_display', ''),
'nodes_display' : validated_data.pop('nodes_display', '')
}
instance = super().create(validated_data)
self.perform_display_create(instance, **display)
return instance
| gpl-2.0 |
metabrainz/listenbrainz-server | listenbrainz_spark/recommendations/recording/tests/test_models.py | 2 | 7922 | import re
import os
import uuid
import unittest
from unittest.mock import patch, Mock, MagicMock
import listenbrainz_spark
from listenbrainz_spark.tests import SparkTestCase, TEST_PLAYCOUNTS_PATH, PLAYCOUNTS_COUNT
from listenbrainz_spark import utils, config, hdfs_connection, path, schema
from listenbrainz_spark.recommendations.recording import train_models
from pyspark.sql import Row
from pyspark.sql.types import StructType, StructField, IntegerType
class TrainModelsTestCase(SparkTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
super().upload_test_playcounts()
@classmethod
def tearDownClass(cls):
super().delete_dir()
super().tearDownClass()
def test_parse_dataset(self):
row = Row(user_id=1, recording_id=2, count=3)
rating_object = train_models.parse_dataset(row)
self.assertEqual(rating_object.user, 1)
self.assertEqual(rating_object.product, 2)
self.assertEqual(rating_object.rating, 3)
@patch('listenbrainz_spark.recommendations.recording.train_models.sqrt')
@patch('listenbrainz_spark.recommendations.recording.train_models.RDD')
@patch('listenbrainz_spark.recommendations.recording.train_models.add')
def test_compute_rmse(self, mock_add, mock_rdd, mock_sqrt):
n = 1
model_id = "281c4177-f33a-441d-b15d-910acaf18b07"
mock_model = MagicMock()
_ = train_models.compute_rmse(mock_model, mock_rdd, n, model_id)
mock_predict_all = mock_model.predictAll
mock_map = mock_rdd.map()
mock_predict_all.assert_called_once_with(mock_map)
mock_predictions = mock_predict_all.return_value.map
mock_predictions.assert_called_once()
mock_join = mock_predictions.return_value.join
mock_join.assert_called_once_with(mock_map)
mock_values = mock_join.return_value.values
mock_values.assert_called_once()
mock_reduce = mock_values.return_value.map.return_value.reduce
mock_reduce.assert_called_once_with(mock_add)
# test division operator
mock_sqrt.assert_called_once_with(mock_reduce.return_value.__truediv__())
def test_preprocess_data(self):
test_playcounts_df = utils.read_files_from_HDFS(TEST_PLAYCOUNTS_PATH)
training_data, validation_data, test_data = train_models.preprocess_data(test_playcounts_df)
total_playcounts = training_data.count() + validation_data.count() + test_data.count()
self.assertEqual(total_playcounts, PLAYCOUNTS_COUNT)
def test_generate_model_id(self):
model_id = train_models.generate_model_id()
assert re.match('{}-*'.format(config.MODEL_ID_PREFIX), model_id)
def test_get_model_path(self):
model_id = "a36d6fc9-49d0-4789-a7dd-a2b72369ca45"
actual_path = train_models.get_model_path(model_id)
expected_path = config.HDFS_CLUSTER_URI + path.RECOMMENDATION_RECORDING_DATA_DIR + '/' + model_id
self.assertEqual(actual_path, expected_path)
def test_get_latest_dataframe_id(self):
df_id_1 = "a36d6fc9-49d0-4789-a7dd-a2b72369ca45"
df_metadata_dict_1 = self.get_dataframe_metadata(df_id_1)
df_1 = utils.create_dataframe(schema.convert_dataframe_metadata_to_row(df_metadata_dict_1),
schema.dataframe_metadata_schema)
df_id_2 = "bbbd6fc9-49d0-4789-a7dd-a2b72369ca45"
df_metadata_dict_2 = self.get_dataframe_metadata(df_id_2)
df_2 = utils.create_dataframe(schema.convert_dataframe_metadata_to_row(df_metadata_dict_2),
schema.dataframe_metadata_schema)
df_metadata = df_1.union(df_2)
expected_dataframe_id = train_models.get_latest_dataframe_id(df_metadata)
self.assertEqual(expected_dataframe_id, df_id_2)
def test_get_best_model_metadata(self):
mock_model = MagicMock()
best_model = train_models.Model(
model=mock_model,
validation_rmse=3.9,
rank=4,
lmbda=2.1,
iteration=1,
model_id="xxx",
training_time="3.1",
rmse_time="2.1",
alpha=3.0,
)
metadata = train_models.get_best_model_metadata(best_model)
self.assertEqual(best_model.validation_rmse, metadata['validation_rmse'])
self.assertEqual(best_model.rank, metadata['rank'])
self.assertEqual(best_model.lmbda, metadata['lmbda'])
self.assertEqual(best_model.iteration, metadata['iteration'])
self.assertEqual(best_model.model_id, metadata['model_id'])
self.assertEqual(best_model.training_time, metadata['training_time'])
self.assertEqual(best_model.rmse_time, metadata['rmse_time'])
self.assertEqual(best_model.alpha, metadata['alpha'])
@patch('listenbrainz_spark.recommendations.recording.train_models.RDD')
@patch('listenbrainz_spark.recommendations.recording.train_models.ALS')
def test_train(self, mock_als, mock_rdd):
rank = 2
iteration = 2
lmbda = 2.0
alpha = 1.0
model_id = 'xxxxxxx'
_ = train_models.train(mock_rdd, rank, iteration, lmbda, alpha, model_id)
mock_als.trainImplicit.assert_called_once_with(mock_rdd, rank, iterations=iteration, lambda_=lmbda, alpha=alpha)
@patch('listenbrainz_spark.recommendations.recording.train_models.compute_rmse')
@patch('listenbrainz_spark.recommendations.recording.train_models.train')
@patch('listenbrainz_spark.recommendations.recording.train_models.generate_model_id')
def test_get_best_model(self, mock_id, mock_train, mock_rmse):
mock_rdd_training = Mock()
mock_rdd_validation = Mock()
num_validation = 4
ranks = [3]
lambdas = [4.8]
iterations = [2]
alpha = 3.0
mock_rmse.return_value = 6.999
best_model, model_metadata = train_models.get_best_model(mock_rdd_training, mock_rdd_validation, num_validation,
ranks, lambdas, iterations, alpha)
mock_id.assert_called_once()
mock_train.assert_called_once_with(mock_rdd_training, ranks[0], iterations[0], lambdas[0],
alpha, mock_id.return_value)
mock_rmse.assert_called_once_with(mock_train.return_value, mock_rdd_validation, num_validation, mock_id.return_value)
def test_delete_model(self):
df = utils.create_dataframe(Row(col1=1, col2=1), None)
utils.save_parquet(df, path.RECOMMENDATION_RECORDING_DATA_DIR)
train_models.delete_model()
dir_exists = utils.path_exists(path.RECOMMENDATION_RECORDING_DATA_DIR)
self.assertFalse(dir_exists)
def test_save_model_metadata_to_hdfs(self):
model_id = "3acb406f-c716-45f8-a8bd-96ca3939c2e5"
metadata = self.get_model_metadata(model_id)
train_models.save_model_metadata_to_hdfs(metadata)
status = utils.path_exists(path.RECOMMENDATION_RECORDING_MODEL_METADATA)
self.assertTrue(status)
df = utils.read_files_from_HDFS(path.RECOMMENDATION_RECORDING_MODEL_METADATA)
self.assertTrue(sorted(df.columns), sorted(schema.model_metadata_schema.fieldNames()))
@patch('listenbrainz_spark.recommendations.recording.train_models.listenbrainz_spark')
@patch('listenbrainz_spark.recommendations.recording.train_models.get_model_path')
@patch('listenbrainz_spark.recommendations.recording.train_models.delete_model')
def test_save_model(self, mock_del, mock_path, mock_context):
model_id = 'xxxxxx'
mock_model = MagicMock()
train_models.save_model(model_id, mock_model)
mock_del.assert_called_once()
mock_path.assert_called_once_with(model_id)
mock_model.save.assert_called_once_with(mock_context.context, mock_path.return_value)
| gpl-2.0 |
easyfmxu/zulip | api/integrations/perforce/zulip_perforce_config.py | 124 | 2608 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2014 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Change these values to configure authentication for the plugin
ZULIP_USER = "p4-bot@example.com"
ZULIP_API_KEY = "0123456789abcdef0123456789abcdef"
# commit_notice_destination() lets you customize where commit notices
# are sent to with the full power of a Python function.
#
# It takes the following arguments:
# * path = the path to the Perforce depot on the server
# * changelist = the changelist id
#
# Returns a dictionary encoding the stream and topic to send the
# notification to (or None to send no notification).
#
# The default code below will send every commit except for ones in the
# "master-plan" and "secret" subdirectories of //depot/ to:
# * stream "depot_subdirectory-commits"
# * subject "change_root"
def commit_notice_destination(path, changelist):
dirs = path.split('/')
if len(dirs) >= 4 and dirs[3] not in ("*", "..."):
directory = dirs[3]
else:
# No subdirectory, so just use "depot"
directory = dirs[2]
if directory not in ["evil-master-plan", "my-super-secret-repository"]:
return dict(stream = "%s-commits" % (directory,),
subject = path)
# Return None for cases where you don't want a notice sent
return None
## If properly installed, the Zulip API should be in your import
## path, but if not, set a custom path below
ZULIP_API_PATH = None
# This should not need to change unless you have a custom Zulip subdomain.
ZULIP_SITE = "https://api.zulip.com"
| apache-2.0 |
nthuoj/NTHUOJ_web | team/views.py | 4 | 3033 | '''
The MIT License (MIT)
Copyright (c) 2014 NTHUOJ team
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from django.shortcuts import render
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
import json
import random
import datetime
from utils.render_helper import render_index
from django.template import RequestContext
# Create your views here.
def team_list(request):
team_profile = {
'id': 1,
'team_name': 'ISeaTeL',
'leader': 'andy',
'member': ['hydai', 'henry'],
'description': 'we are ISeaTel',
'create_time': datetime.datetime.now()
}
team_list = [{}] * 200
for i in range(200): # Generate lots of entries for testing paging
team_profile['id'] = i + 1
team_list[i] = team_profile.copy()
paginator = Paginator(team_list, 25) # Show 25 teams per page
page = request.GET.get('page')
try:
teams = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
teams = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
teams = paginator.page(paginator.num_pages)
return render_index(
request,
'team/teamList.html',
{'team_list': teams})
def team_profile(request):
piechart_data = []
for l in ['WA', 'AC', 'RE', 'TLE', 'MLE', 'OLE', 'Others']:
piechart_data += [{'label': l, 'data': random.randint(50, 100)}]
team_stat = {
'contest': ['icpc practice1', 'icpc practice2']
}
team_profile = {
'id': 1,
'team_name': 'ISeaTeL',
'leader': 'andy',
'member': ['hydai', 'henry'],
'description': 'we are ISeaTel',
'create_time': datetime.datetime.now()
}
return render_index(
request,
'team/teamProfile.html',
{
'piechart_data': json.dumps(piechart_data),
'team_stat': team_stat,
'team_profile': team_profile
})
| mit |
bdero/edx-platform | common/lib/xmodule/xmodule/video_module/video_utils.py | 13 | 2020 | """
Module containts utils specific for video_module but not for transcripts.
"""
import json
import logging
import urllib
import requests
from requests.exceptions import RequestException
log = logging.getLogger(__name__)
def create_youtube_string(module):
"""
Create a string of Youtube IDs from `module`'s metadata
attributes. Only writes a speed if an ID is present in the
module. Necessary for backwards compatibility with XML-based
courses.
"""
youtube_ids = [
module.youtube_id_0_75,
module.youtube_id_1_0,
module.youtube_id_1_25,
module.youtube_id_1_5
]
youtube_speeds = ['0.75', '1.00', '1.25', '1.50']
return ','.join([
':'.join(pair)
for pair
in zip(youtube_speeds, youtube_ids)
if pair[1]
])
def get_video_from_cdn(cdn_base_url, original_video_url):
"""
Get video URL from CDN.
`original_video_url` is the existing video url.
Currently `cdn_base_url` equals 'http://api.xuetangx.com/edx/video?s3_url='
Example of CDN outcome:
{
"sources":
[
"http://cm12.c110.play.bokecc.com/flvs/ca/QxcVl/u39EQbA0Ra-20.mp4",
"http://bm1.42.play.bokecc.com/flvs/ca/QxcVl/u39EQbA0Ra-20.mp4"
],
"s3_url": "http://s3.amazonaws.com/BESTech/CS169/download/CS169_v13_w5l2s3.mp4"
}
where `s3_url` is requested original video url and `sources` is the list of
alternative links.
"""
if not cdn_base_url:
return None
request_url = cdn_base_url + urllib.quote(original_video_url)
try:
cdn_response = requests.get(request_url, timeout=0.5)
except RequestException as err:
log.info("Request timed out to CDN server: %s", request_url, exc_info=True)
return None
if cdn_response.status_code == 200:
cdn_content = json.loads(cdn_response.content)
return cdn_content['sources'][0]
else:
return None
| agpl-3.0 |
brainstorm/bcbio-nextgen | tests/bcbio_vm/test_docker.py | 1 | 3019 | import os
import subprocess
import pytest
from tests.conftest import make_workdir
from tests.conftest import get_post_process_yaml
@pytest.marks('docker')
def test_docker(install_test_files, data_dir):
"""Run an analysis with code and tools inside a docker container.
Requires https://github.com/chapmanb/bcbio-nextgen-vm
"""
with make_workdir() as workdir:
cl = [
"bcbio_vm.py",
"--datadir=%s" % data_dir,
"run",
"--systemconfig=%s" % get_post_process_yaml(data_dir, workdir),
"--fcdir=%s" % os.path.join(
data_dir, os.pardir, "100326_FC6107FAAXX"),
os.path.join(data_dir, "run_info-bam.yaml")
]
subprocess.check_call(cl)
@pytest.marks('docker_ipython', 'docker')
def test_docker_ipython(install_test_files, data_dir):
"""Run an analysis with code and tools inside a docker container,
driven via IPython.
Requires https://github.com/chapmanb/bcbio-nextgen-vm
"""
with make_workdir() as workdir:
cl = [
"bcbio_vm.py",
"--datadir=%s" % data_dir,
"ipython",
"--systemconfig=%s" % get_post_process_yaml(data_dir, workdir),
"--fcdir=%s" % os.path.join(
data_dir, os.pardir, "100326_FC6107FAAXX"),
os.path.join(data_dir, "run_info-bam.yaml"),
"lsf", "localrun"
]
subprocess.check_call(cl)
class TestCWL():
""" Run simple CWL workflows.
Requires https://github.com/chapmanb/bcbio-nextgen-vm
"""
@pytest.marks('cwl_docker', 'cwl', 'docker')
def test_2_cwl_docker(install_test_files, data_dir):
"""Create a common workflow language description and run on a
Docker installation.
"""
with make_workdir() as workdir:
cl = ["bcbio_vm.py", "cwl", "../data/automated/run_info-cwl.yaml",
"--systemconfig", get_post_process_yaml(data_dir, workdir)]
subprocess.check_call(cl)
cl = ["bcbio_vm.py", "cwlrun", "cwltool", "run_info-cwl-workflow"]
subprocess.check_call(cl)
print
print "To run with a CWL tool, cd test_automated_output and:"
print " ".join(cl)
@pytest.marks('speed2', 'cwl', 'cwl_local', 'install_required')
def test_1_cwl_local(self, install_test_files, data_dir):
"""Create a common workflow language description and run on local installation.
"""
with make_workdir() as workdir:
cl = ["bcbio_vm.py", "cwl", "../data/automated/run_info-cwl.yaml",
"--systemconfig", get_post_process_yaml(data_dir, workdir)]
subprocess.check_call(cl)
cl = ["bcbio_vm.py", "cwlrun", "cwltool", "run_info-cwl-workflow",
"--no-container"]
subprocess.check_call(cl)
print
print "To run with a CWL tool, cd test_automated_output and:"
print " ".join(cl)
| mit |
pdehaye/theming-edx-platform | common/lib/xmodule/xmodule/fields.py | 2 | 4427 | import time
import logging
import re
from xblock.core import ModelType
import datetime
import dateutil.parser
from pytz import UTC
log = logging.getLogger(__name__)
class Date(ModelType):
'''
Date fields know how to parse and produce json (iso) compatible formats. Converts to tz aware datetimes.
'''
# See note below about not defaulting these
CURRENT_YEAR = datetime.datetime.now(UTC).year
PREVENT_DEFAULT_DAY_MON_SEED1 = datetime.datetime(CURRENT_YEAR, 1, 1, tzinfo=UTC)
PREVENT_DEFAULT_DAY_MON_SEED2 = datetime.datetime(CURRENT_YEAR, 2, 2, tzinfo=UTC)
def _parse_date_wo_default_month_day(self, field):
"""
Parse the field as an iso string but prevent dateutils from defaulting the day or month while
allowing it to default the other fields.
"""
# It's not trivial to replace dateutil b/c parsing timezones as Z, +03:30, -400 is hard in python
# however, we don't want dateutil to default the month or day (but some tests at least expect
# us to default year); so, we'll see if dateutil uses the defaults for these the hard way
result = dateutil.parser.parse(field, default=self.PREVENT_DEFAULT_DAY_MON_SEED1)
result_other = dateutil.parser.parse(field, default=self.PREVENT_DEFAULT_DAY_MON_SEED2)
if result != result_other:
log.warning("Field {0} is missing month or day".format(self._name, field))
return None
if result.tzinfo is None:
result = result.replace(tzinfo=UTC)
return result
def from_json(self, field):
"""
Parse an optional metadata key containing a time: if present, complain
if it doesn't parse.
Return None if not present or invalid.
"""
if field is None:
return field
elif field is "":
return None
elif isinstance(field, basestring):
return self._parse_date_wo_default_month_day(field)
elif isinstance(field, (int, long, float)):
return datetime.datetime.fromtimestamp(field / 1000, UTC)
elif isinstance(field, time.struct_time):
return datetime.datetime.fromtimestamp(time.mktime(field), UTC)
elif isinstance(field, datetime.datetime):
return field
else:
msg = "Field {0} has bad value '{1}'".format(
self._name, field)
raise TypeError(msg)
def to_json(self, value):
"""
Convert a time struct to a string
"""
if value is None:
return None
if isinstance(value, time.struct_time):
# struct_times are always utc
return time.strftime('%Y-%m-%dT%H:%M:%SZ', value)
elif isinstance(value, datetime.datetime):
if value.tzinfo is None or value.utcoffset().total_seconds() == 0:
# isoformat adds +00:00 rather than Z
return value.strftime('%Y-%m-%dT%H:%M:%SZ')
else:
return value.isoformat()
else:
raise TypeError("Cannot convert {} to json".format(value))
TIMEDELTA_REGEX = re.compile(r'^((?P<days>\d+?) day(?:s?))?(\s)?((?P<hours>\d+?) hour(?:s?))?(\s)?((?P<minutes>\d+?) minute(?:s)?)?(\s)?((?P<seconds>\d+?) second(?:s)?)?$')
class Timedelta(ModelType):
# Timedeltas are immutable, see http://docs.python.org/2/library/datetime.html#available-types
MUTABLE = False
def from_json(self, time_str):
"""
time_str: A string with the following components:
<D> day[s] (optional)
<H> hour[s] (optional)
<M> minute[s] (optional)
<S> second[s] (optional)
Returns a datetime.timedelta parsed from the string
"""
if time_str is None:
return None
parts = TIMEDELTA_REGEX.match(time_str)
if not parts:
return
parts = parts.groupdict()
time_params = {}
for (name, param) in parts.iteritems():
if param:
time_params[name] = int(param)
return datetime.timedelta(**time_params)
def to_json(self, value):
values = []
for attr in ('days', 'hours', 'minutes', 'seconds'):
cur_value = getattr(value, attr, 0)
if cur_value > 0:
values.append("%d %s" % (cur_value, attr))
return ' '.join(values)
| agpl-3.0 |
nkgilley/home-assistant | tests/components/emulated_roku/test_config_flow.py | 22 | 1063 | """Tests for emulated_roku config flow."""
from homeassistant.components.emulated_roku import config_flow
from tests.common import MockConfigEntry
async def test_flow_works(hass):
"""Test that config flow works."""
flow = config_flow.EmulatedRokuFlowHandler()
flow.hass = hass
result = await flow.async_step_user(
user_input={"name": "Emulated Roku Test", "listen_port": 8060}
)
assert result["type"] == "create_entry"
assert result["title"] == "Emulated Roku Test"
assert result["data"] == {"name": "Emulated Roku Test", "listen_port": 8060}
async def test_flow_already_registered_entry(hass):
"""Test that config flow doesn't allow existing names."""
MockConfigEntry(
domain="emulated_roku", data={"name": "Emulated Roku Test", "listen_port": 8062}
).add_to_hass(hass)
flow = config_flow.EmulatedRokuFlowHandler()
flow.hass = hass
result = await flow.async_step_user(
user_input={"name": "Emulated Roku Test", "listen_port": 8062}
)
assert result["type"] == "abort"
| apache-2.0 |
kasioumis/invenio | invenio/legacy/websubmit/functions/Send_Delete_Mail.py | 13 | 6420 | # This file is part of Invenio.
# Copyright (C) 2008, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""The function in this module sends a mail to the user (and admins if
required) saying that a record has been deleted from the repository.
"""
__revision__ = "$Id$"
import os
from invenio.ext.logging import register_exception
from invenio.legacy.webuser import email_valid_p
from invenio.config import CFG_SITE_SUPPORT_EMAIL, CFG_SITE_NAME
from invenio.legacy.websubmit.config import CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN
from invenio.ext.email import send_email
CFG_MAIL_BODY = """
**This is an automated mail from %(site-name)s**
The following record was deleted from %(site-name)s:
Report number: %(report-number)s
It was deleted by %(deleter)s.
Please note that there may be a short delay before the record
disappears from its collection. It should be gone by tomorrow morning
at the latest.
Thankyou."""
def Send_Delete_Mail(parameters, curdir, form, user_info=None):
"""
In the event of a record having been deleted, this function is used
to the mail the submitter (and possibly the record "managers")
informing them about the record's deletion.
@parameters:
+ edsrn: The name of the file in the current submission's
working directory, in which the record's report number
is stored.
+ record_managers: A comma-separated string of the email
addresses of the record's managers. If given,
they will be (blind*) copied into the mail.
* At this time, they are only blind copied
because of send_email's behaviour of
blind copying everyone if "To" contains
multiple addresses. Anyway, blind was
wanted . . .
@return: empty string.
@Exceptions raised: None.
"""
## Get any addresses to which the mail should be copied:
## Get report number:
report_number_file = parameters["edsrn"]
report_number = \
Send_Delete_Mail_read_file("%s/%s" % \
(curdir, \
report_number_file)).strip()
########
## Get the "record_managers" parameter AND WASH THE EMAIL ADDRESSES
## TO BE SURE THAT THEY'RE VALID:
raw_record_managers = parameters["record_managers"]
record_managers = ""
try:
## We assume that the email addresses of item managers are
## separated by commas.
raw_record_managers_list = raw_record_managers.split(",")
for manager in raw_record_managers_list:
manager_address = manager.strip()
## Test that this manager's email address is OK, adding it if so:
if email_valid_p(manager_address):
## This address is OK - add it to the string of manager
## addresses:
record_managers += "%s," % manager_address
## Strip the trailing comma from record_managers (if there is one):
record_managers = record_managers.strip().rstrip(",")
except AttributeError:
## record_managers doesn't seem to be a string? Treat it as
## though it were empty:
record_managers = ""
##
########
## User email address:
user_email = user_info["email"]
## Concatenate the user's email address with the managers' addresses.
## Note: What we want to do here is send the mail to the user as "To"
## and to the managers as "bcc". At the time of writing though,
## send_email doesn't appear to allow email headers. It does have a
## strange behaviour though: If "To" contains more than one address,
## comma separated, ALL addresses will be put in "bcc" and the mail
## will appear to be sent to "undisclosed recipients".
if record_managers != "":
if user_email != "guest":
email_recipients = "%s,%s" % (user_email, record_managers)
else:
## Can't send mails to "guest"! Send only to managers.
email_recipients = record_managers
elif user_email == "guest":
## The user is a guest and there are no managers to send the mail
## to. Drop out quietly.
return ""
else:
## No managers to send the mail to. Send it only to the user.
email_recipients = user_email
mail_subj = "Document %s deleted from %s" \
% (report_number, CFG_SITE_NAME)
mail_body = CFG_MAIL_BODY % \
{ 'report-number' : report_number,
'deleter' : user_email,
'site-name' : CFG_SITE_NAME,
}
send_email(CFG_SITE_SUPPORT_EMAIL,
email_recipients,
mail_subj,
mail_body,
copy_to_admin=CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN)
##
return ""
def Send_Delete_Mail_read_file(filename):
"""Read a file from a path and return it as a string.
@param filename: (string) - the full path to the file to be read.
@return: (string) - the file's contents.
"""
file_contents = ""
if os.access("%s" % filename, os.R_OK):
try:
file_contents = open("%s" % filename, "r").read()
except IOError:
## There was a problem reading the file. Register the exception
## so that the admin is informed.
err_msg = """Error in a WebSubmit function. An unexpected """ \
"""error was encountered when trying to read from """ \
"""the file [%s].""" % filename
register_exception(prefix=err_msg)
return file_contents
| gpl-2.0 |
nightjean/Deep-Learning | tensorflow/python/training/slot_creator.py | 51 | 7287 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Standard functions for creating slots.
A slot is a `Variable` created with the same shape as a primary variable or
`Tensor`. A slot is always scoped in the namespace of the primary object and
typically has the same device and type.
Slots are typically used as accumulators to track values associated with
the primary object:
```python
# Optimizers can create a slot for each variable to track accumulators
accumulators = {var : create_zeros_slot(var, "momentum") for var in vs}
for var in vs:
apply_momentum(var, accumulators[var], lr, grad, momentum_tensor)
# Slots can also be used for moving averages
mavg = create_slot(var, var.initialized_value(), "exponential_moving_avg")
update_mavg = mavg.assign_sub((mavg - var) * (1 - decay))
```
"""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
def _is_resource(v):
"""Returns true if v is something you get from a resource variable."""
return isinstance(v, resource_variable_ops.ResourceVariable)
def _create_slot_var(primary, val, scope, validate_shape, shape, dtype):
"""Helper function for creating a slot variable."""
# TODO(lukaszkaiser): Consider allowing partitioners to be set in the current
# scope.
current_partitioner = variable_scope.get_variable_scope().partitioner
variable_scope.get_variable_scope().set_partitioner(None)
slot = variable_scope.get_variable(
scope, initializer=val, trainable=False,
use_resource=_is_resource(primary),
shape=shape, dtype=dtype,
validate_shape=validate_shape)
variable_scope.get_variable_scope().set_partitioner(current_partitioner)
# pylint: disable=protected-access
if isinstance(primary, variables.Variable) and primary._save_slice_info:
# Primary is a partitioned variable, so we need to also indicate that
# the slot is a partitioned variable. Slots have the same partitioning
# as their primaries.
# For examples when using AdamOptimizer in linear model, slot.name
# here can be "linear//weights/Adam:0", while primary.op.name is
# "linear//weight". We want to get 'Adam' as real_slot_name, so we
# remove "'linear//weight' + '/'" and ':0'.
real_slot_name = slot.name[len(primary.op.name + "/"):-2]
slice_info = primary._save_slice_info
slot._set_save_slice_info(variables.Variable.SaveSliceInfo(
slice_info.full_name + "/" + real_slot_name,
slice_info.full_shape[:],
slice_info.var_offset[:],
slice_info.var_shape[:]))
# pylint: enable=protected-access
return slot
def create_slot(primary, val, name, colocate_with_primary=True):
"""Create a slot initialized to the given value.
The type of the slot is determined by the given value.
Args:
primary: The primary `Variable` or `Tensor`.
val: A `Tensor` specifying the initial value of the slot.
name: Name to use for the slot variable.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
# Scope the slot name in the namespace of the primary variable.
# Set "primary.op.name + '/' + name" as default name, so the scope name of
# optimizer can be shared when reuse is True. Meanwhile when reuse is False
# and the same name has been previously used, the scope name will add '_N'
# as suffix for unique identifications.
validate_shape = val.get_shape().is_fully_defined()
with variable_scope.variable_scope(None, primary.op.name + "/" + name):
if colocate_with_primary:
with ops.colocate_with(primary):
return _create_slot_var(primary, val, "", validate_shape, None, None)
else:
return _create_slot_var(primary, val, "", validate_shape, None, None)
def create_slot_with_initializer(primary, initializer, shape, dtype, name,
colocate_with_primary=True):
"""Creates a slot initialized using an `Initializer`.
The type of the slot is determined by the given value.
Args:
primary: The primary `Variable` or `Tensor`.
initializer: An `Initializer`. The initial value of the slot.
shape: Shape of the initial value of the slot.
dtype: Type of the value of the slot.
name: Name to use for the slot variable.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
# Scope the slot name in the namespace of the primary variable.
# Set "primary.op.name + '/' + name" as default name, so the scope name of
# optimizer can be shared when reuse is True. Meanwhile when reuse is False
# and the same name has been previously used, the scope name will add '_N'
# as suffix for unique identifications.
validate_shape = shape.is_fully_defined()
with variable_scope.variable_scope(None, primary.op.name + "/" + name):
if colocate_with_primary:
with ops.colocate_with(primary):
return _create_slot_var(primary, initializer, "", validate_shape, shape,
dtype)
else:
return _create_slot_var(primary, initializer, "", validate_shape, shape,
dtype)
def create_zeros_slot(primary, name, dtype=None, colocate_with_primary=True):
"""Create a slot initialized to 0 with same shape as the primary object.
Args:
primary: The primary `Variable` or `Tensor`.
name: Name to use for the slot variable.
dtype: Type of the slot variable. Defaults to the type of `primary`.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
if dtype is None:
dtype = primary.dtype
slot_shape = primary.get_shape()
slot_shape = (slot_shape if slot_shape.is_fully_defined()
else array_ops.shape(primary.initialized_value()))
if slot_shape.is_fully_defined():
initializer = init_ops.zeros_initializer(dtype)
return create_slot_with_initializer(
primary, initializer, slot_shape, dtype, name,
colocate_with_primary=colocate_with_primary)
else:
val = array_ops.zeros(slot_shape, dtype=dtype)
return create_slot(primary, val, name,
colocate_with_primary=colocate_with_primary)
| apache-2.0 |
mrquim/mrquimrepo | script.module.covenant/lib/resources/lib/sources/en/ddlvalley.py | 6 | 7994 | # -*- coding: utf-8 -*-
'''
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
from resources.lib.modules import cfscrape
from resources.lib.modules import dom_parser2
class source:
def __init__(self):
self.priority = 0
self.language = ['en']
self.domains = ['ddlvalley.me']
self.base_link = 'http://www.ddlvalley.me'
self.search_link = 'search/%s/'
def movie(self, imdb, title, localtitle, aliases, year):
try:
clean_title = cleantitle.geturl(title).replace('-','+')
url = urlparse.urljoin(self.base_link, self.search_link % clean_title)
url = {'url': url, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
show = True if 'tvshowtitle' in data else False
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s' % (data['tvshowtitle']) if\
'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
scraper = cfscrape.create_scraper()
r = scraper.get(url).content
u = r
next_page = True
num = 1
while next_page:
try:
np = re.findall('<link rel="next" href="([^"]+)', u)[0]
# Client Requests is causing a timeout on links for ddl valley, falling back on cfscrape
#u = client.request(np, headers=headers, cookie=cookie, timeout=5)
u = scraper.get(np).content
r += u
except: next_page = False
items = dom_parser2.parse_dom(r, 'h2')
items = [dom_parser2.parse_dom(i.content, 'a', req=['href','rel','title','data-wpel-link']) for i in items]
items = [(i[0].content, i[0].attrs['href']) for i in items]
items = [(i[0], i[1]) for i in items if cleantitle.get_simple(title.lower()) in cleantitle.get_simple(i[0].lower())]
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
# Client Requests is causing a timeout on links for ddl valley, falling back on cfscrape
#r = client.request(item[1], headers=headers, cookie=cookie, timeout=15)
r = scraper.get(item[1]).content
links = dom_parser2.parse_dom(r, 'a', req=['href','rel','data-wpel-link','target'])
links = [i.attrs['href'] for i in links]
if show:
links = [i for i in links if hdlr.lower() in i.lower()]
for url in links:
try:
if hdlr in name:
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
if any(i in ['extras'] for i in fmt): raise Exception()
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = '720p'
else: quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
info = []
if '3d' in fmt: info.append('3D')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', name[2])[-1]
div = 1 if size.endswith(('GB', 'GiB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except:
pass
if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')
info = ' | '.join(info)
if not any(x in url for x in ['.rar', '.zip', '.iso']):
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
if host in hostDict:
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False})
elif host in hostprDict:
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
except:
pass
check = [i for i in sources if not i['quality'] == 'CAM']
if check: sources = check
return sources
except:
return sources
def resolve(self, url):
return url
| gpl-2.0 |
edgarli/proj8 | env/lib/python3.4/site-packages/dateutil/relativedelta.py | 104 | 18172 | # -*- coding: utf-8 -*-
import datetime
import calendar
from six import integer_types
__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class relativedelta(object):
"""
The relativedelta type is based on the specification of the excellent
work done by M.-A. Lemburg in his
`mx.DateTime <http://www.egenix.com/files/python/mxDateTime.html>`_ extension.
However, notice that this type does *NOT* implement the same algorithm as
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
There are two different ways to build a relativedelta instance. The
first one is passing it two date/datetime classes::
relativedelta(datetime1, datetime2)
The second one is passing it any number of the following keyword arguments::
relativedelta(arg1=x,arg2=y,arg3=z...)
year, month, day, hour, minute, second, microsecond:
Absolute information (argument is singular); adding or subtracting a
relativedelta with absolute information does not perform an aritmetic
operation, but rather REPLACES the corresponding value in the
original datetime with the value(s) in relativedelta.
years, months, weeks, days, hours, minutes, seconds, microseconds:
Relative information, may be negative (argument is plural); adding
or subtracting a relativedelta with relative information performs
the corresponding aritmetic operation on the original datetime value
with the information in the relativedelta.
weekday:
One of the weekday instances (MO, TU, etc). These instances may
receive a parameter N, specifying the Nth weekday, which could
be positive or negative (like MO(+1) or MO(-2). Not specifying
it is the same as specifying +1. You can also use an integer,
where 0=MO.
leapdays:
Will add given days to the date found, if year is a leap
year, and the date found is post 28 of february.
yearday, nlyearday:
Set the yearday or the non-leap year day (jump leap days).
These are converted to day/month/leapdays information.
Here is the behavior of operations with relativedelta:
1. Calculate the absolute year, using the 'year' argument, or the
original datetime year, if the argument is not present.
2. Add the relative 'years' argument to the absolute year.
3. Do steps 1 and 2 for month/months.
4. Calculate the absolute day, using the 'day' argument, or the
original datetime day, if the argument is not present. Then,
subtract from the day until it fits in the year and month
found after their operations.
5. Add the relative 'days' argument to the absolute day. Notice
that the 'weeks' argument is multiplied by 7 and added to
'days'.
6. Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds,
microsecond/microseconds.
7. If the 'weekday' argument is present, calculate the weekday,
with the given (wday, nth) tuple. wday is the index of the
weekday (0-6, 0=Mon), and nth is the number of weeks to add
forward or backward, depending on its signal. Notice that if
the calculated date is already Monday, for example, using
(0, 1) or (0, -1) won't change the day.
"""
def __init__(self, dt1=None, dt2=None,
years=0, months=0, days=0, leapdays=0, weeks=0,
hours=0, minutes=0, seconds=0, microseconds=0,
year=None, month=None, day=None, weekday=None,
yearday=None, nlyearday=None,
hour=None, minute=None, second=None, microsecond=None):
if dt1 and dt2:
# datetime is a subclass of date. So both must be date
if not (isinstance(dt1, datetime.date) and
isinstance(dt2, datetime.date)):
raise TypeError("relativedelta only diffs datetime/date")
# We allow two dates, or two datetimes, so we coerce them to be
# of the same type
if (isinstance(dt1, datetime.datetime) !=
isinstance(dt2, datetime.datetime)):
if not isinstance(dt1, datetime.datetime):
dt1 = datetime.datetime.fromordinal(dt1.toordinal())
elif not isinstance(dt2, datetime.datetime):
dt2 = datetime.datetime.fromordinal(dt2.toordinal())
self.years = 0
self.months = 0
self.days = 0
self.leapdays = 0
self.hours = 0
self.minutes = 0
self.seconds = 0
self.microseconds = 0
self.year = None
self.month = None
self.day = None
self.weekday = None
self.hour = None
self.minute = None
self.second = None
self.microsecond = None
self._has_time = 0
months = (dt1.year*12+dt1.month)-(dt2.year*12+dt2.month)
self._set_months(months)
dtm = self.__radd__(dt2)
if dt1 < dt2:
while dt1 > dtm:
months += 1
self._set_months(months)
dtm = self.__radd__(dt2)
else:
while dt1 < dtm:
months -= 1
self._set_months(months)
dtm = self.__radd__(dt2)
delta = dt1 - dtm
self.seconds = delta.seconds+delta.days*86400
self.microseconds = delta.microseconds
else:
self.years = years
self.months = months
self.days = days+weeks*7
self.leapdays = leapdays
self.hours = hours
self.minutes = minutes
self.seconds = seconds
self.microseconds = microseconds
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
if isinstance(weekday, integer_types):
self.weekday = weekdays[weekday]
else:
self.weekday = weekday
yday = 0
if nlyearday:
yday = nlyearday
elif yearday:
yday = yearday
if yearday > 59:
self.leapdays = -1
if yday:
ydayidx = [31, 59, 90, 120, 151, 181, 212,
243, 273, 304, 334, 366]
for idx, ydays in enumerate(ydayidx):
if yday <= ydays:
self.month = idx+1
if idx == 0:
self.day = yday
else:
self.day = yday-ydayidx[idx-1]
break
else:
raise ValueError("invalid year day (%d)" % yday)
self._fix()
def _fix(self):
if abs(self.microseconds) > 999999:
s = self.microseconds//abs(self.microseconds)
div, mod = divmod(self.microseconds*s, 1000000)
self.microseconds = mod*s
self.seconds += div*s
if abs(self.seconds) > 59:
s = self.seconds//abs(self.seconds)
div, mod = divmod(self.seconds*s, 60)
self.seconds = mod*s
self.minutes += div*s
if abs(self.minutes) > 59:
s = self.minutes//abs(self.minutes)
div, mod = divmod(self.minutes*s, 60)
self.minutes = mod*s
self.hours += div*s
if abs(self.hours) > 23:
s = self.hours//abs(self.hours)
div, mod = divmod(self.hours*s, 24)
self.hours = mod*s
self.days += div*s
if abs(self.months) > 11:
s = self.months//abs(self.months)
div, mod = divmod(self.months*s, 12)
self.months = mod*s
self.years += div*s
if (self.hours or self.minutes or self.seconds or self.microseconds
or self.hour is not None or self.minute is not None or
self.second is not None or self.microsecond is not None):
self._has_time = 1
else:
self._has_time = 0
def _set_months(self, months):
self.months = months
if abs(self.months) > 11:
s = self.months//abs(self.months)
div, mod = divmod(self.months*s, 12)
self.months = mod*s
self.years = div*s
else:
self.years = 0
def __add__(self, other):
if isinstance(other, relativedelta):
return relativedelta(years=other.years+self.years,
months=other.months+self.months,
days=other.days+self.days,
hours=other.hours+self.hours,
minutes=other.minutes+self.minutes,
seconds=other.seconds+self.seconds,
microseconds=(other.microseconds +
self.microseconds),
leapdays=other.leapdays or self.leapdays,
year=other.year or self.year,
month=other.month or self.month,
day=other.day or self.day,
weekday=other.weekday or self.weekday,
hour=other.hour or self.hour,
minute=other.minute or self.minute,
second=other.second or self.second,
microsecond=(other.microsecond or
self.microsecond))
if not isinstance(other, datetime.date):
raise TypeError("unsupported type for add operation")
elif self._has_time and not isinstance(other, datetime.datetime):
other = datetime.datetime.fromordinal(other.toordinal())
year = (self.year or other.year)+self.years
month = self.month or other.month
if self.months:
assert 1 <= abs(self.months) <= 12
month += self.months
if month > 12:
year += 1
month -= 12
elif month < 1:
year -= 1
month += 12
day = min(calendar.monthrange(year, month)[1],
self.day or other.day)
repl = {"year": year, "month": month, "day": day}
for attr in ["hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
repl[attr] = value
days = self.days
if self.leapdays and month > 2 and calendar.isleap(year):
days += self.leapdays
ret = (other.replace(**repl)
+ datetime.timedelta(days=days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds,
microseconds=self.microseconds))
if self.weekday:
weekday, nth = self.weekday.weekday, self.weekday.n or 1
jumpdays = (abs(nth)-1)*7
if nth > 0:
jumpdays += (7-ret.weekday()+weekday) % 7
else:
jumpdays += (ret.weekday()-weekday) % 7
jumpdays *= -1
ret += datetime.timedelta(days=jumpdays)
return ret
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
return self.__neg__().__radd__(other)
def __sub__(self, other):
if not isinstance(other, relativedelta):
raise TypeError("unsupported type for sub operation")
return relativedelta(years=self.years-other.years,
months=self.months-other.months,
days=self.days-other.days,
hours=self.hours-other.hours,
minutes=self.minutes-other.minutes,
seconds=self.seconds-other.seconds,
microseconds=self.microseconds-other.microseconds,
leapdays=self.leapdays or other.leapdays,
year=self.year or other.year,
month=self.month or other.month,
day=self.day or other.day,
weekday=self.weekday or other.weekday,
hour=self.hour or other.hour,
minute=self.minute or other.minute,
second=self.second or other.second,
microsecond=self.microsecond or other.microsecond)
def __neg__(self):
return relativedelta(years=-self.years,
months=-self.months,
days=-self.days,
hours=-self.hours,
minutes=-self.minutes,
seconds=-self.seconds,
microseconds=-self.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __bool__(self):
return not (not self.years and
not self.months and
not self.days and
not self.hours and
not self.minutes and
not self.seconds and
not self.microseconds and
not self.leapdays and
self.year is None and
self.month is None and
self.day is None and
self.weekday is None and
self.hour is None and
self.minute is None and
self.second is None and
self.microsecond is None)
# Compatibility with Python 2.x
__nonzero__ = __bool__
def __mul__(self, other):
f = float(other)
return relativedelta(years=int(self.years*f),
months=int(self.months*f),
days=int(self.days*f),
hours=int(self.hours*f),
minutes=int(self.minutes*f),
seconds=int(self.seconds*f),
microseconds=int(self.microseconds*f),
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
__rmul__ = __mul__
def __eq__(self, other):
if not isinstance(other, relativedelta):
return False
if self.weekday or other.weekday:
if not self.weekday or not other.weekday:
return False
if self.weekday.weekday != other.weekday.weekday:
return False
n1, n2 = self.weekday.n, other.weekday.n
if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
return False
return (self.years == other.years and
self.months == other.months and
self.days == other.days and
self.hours == other.hours and
self.minutes == other.minutes and
self.seconds == other.seconds and
self.leapdays == other.leapdays and
self.year == other.year and
self.month == other.month and
self.day == other.day and
self.hour == other.hour and
self.minute == other.minute and
self.second == other.second and
self.microsecond == other.microsecond)
def __ne__(self, other):
return not self.__eq__(other)
def __div__(self, other):
return self.__mul__(1/float(other))
__truediv__ = __div__
def __repr__(self):
l = []
for attr in ["years", "months", "days", "leapdays",
"hours", "minutes", "seconds", "microseconds"]:
value = getattr(self, attr)
if value:
l.append("%s=%+d" % (attr, value))
for attr in ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
# vim:ts=4:sw=4:et
| artistic-2.0 |
giorgiop/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
maniteja123/sympy | sympy/liealgebras/root_system.py | 76 | 6884 | # -*- coding: utf-8 -*-
from .cartan_type import CartanType
from sympy.core import Basic
from sympy.core.compatibility import range
class RootSystem(Basic):
"""Represent the root system of a simple Lie algebra
Every simple Lie algebra has a unique root system. To find the root
system, we first consider the Cartan subalgebra of g, which is the maximal
abelian subalgebra, and consider the adjoint action of g on this
subalgebra. There is a root system associated with this action. Now, a
root system over a vector space V is a set of finite vectors Φ (called
roots), which satisfy:
1. The roots span V
2. The only scalar multiples of x in Φ are x and -x
3. For every x in Φ, the set Φ is closed under reflection
through the hyperplane perpendicular to x.
4. If x and y are roots in Φ, then the projection of y onto
the line through x is a half-integral multiple of x.
Now, there is a subset of Φ, which we will call Δ, such that:
1. Δ is a basis of V
2. Each root x in Φ can be written x = Σ k_y y for y in Δ
The elements of Δ are called the simple roots.
Therefore, we see that the simple roots span the root space of a given
simple Lie algebra.
References: https://en.wikipedia.org/wiki/Root_system
Lie Algebras and Representation Theory - Humphreys
"""
def __new__(cls, cartantype):
"""Create a new RootSystem object
This method assigns an attribute called cartan_type to each instance of
a RootSystem object. When an instance of RootSystem is called, it
needs an argument, which should be an instance of a simple Lie algebra.
We then take the CartanType of this argument and set it as the
cartan_type attribute of the RootSystem instance.
"""
obj = Basic.__new__(cls, cartantype)
obj.cartan_type = CartanType(cartantype)
return obj
def simple_roots(self):
"""Generate the simple roots of the Lie algebra
The rank of the Lie algebra determines the number of simple roots that
it has. This method obtains the rank of the Lie algebra, and then uses
the simple_root method from the Lie algebra classes to generate all the
simple roots.
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> roots = c.simple_roots()
>>> roots
{1: [1, -1, 0, 0], 2: [0, 1, -1, 0], 3: [0, 0, 1, -1]}
"""
n = self.cartan_type.rank()
roots = {}
for i in range(1, n+1):
root = self.cartan_type.simple_root(i)
roots[i] = root
return roots
def all_roots(self):
"""Generate all the roots of a given root system
The result is a dictionary where the keys are integer numbers. It
generates the roots by getting the dictionary of all positive roots
from the bases classes, and then taking each root, and multiplying it
by -1 and adding it to the dictionary. In this way all the negative
roots are generated.
"""
alpha = self.cartan_type.positive_roots()
keys = list(alpha.keys())
k = max(keys)
for val in keys:
k += 1
root = alpha[val]
newroot = [-x for x in root]
alpha[k] = newroot
return alpha
def root_space(self):
"""Return the span of the simple roots
The root space is the vector space spanned by the simple roots, i.e. it
is a vector space with a distinguished basis, the simple roots. This
method returns a string that represents the root space as the span of
the simple roots, alpha[1],...., alpha[n].
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> c.root_space()
'alpha[1] + alpha[2] + alpha[3]'
"""
n = self.cartan_type.rank()
rs = " + ".join("alpha["+str(i) +"]" for i in range(1, n+1))
return rs
def add_simple_roots(self, root1, root2):
"""Add two simple roots together
The function takes as input two integers, root1 and root2. It then
uses these integers as keys in the dictionary of simple roots, and gets
the corresponding simple roots, and then adds them together.
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> newroot = c.add_simple_roots(1, 2)
>>> newroot
[1, 0, -1, 0]
"""
alpha = self.simple_roots()
if root1 > len(alpha) or root2 > len(alpha):
raise ValueError("You've used a root that doesn't exist!")
a1 = alpha[root1]
a2 = alpha[root2]
newroot = []
length = len(a1)
for i in range(length):
newroot.append(a1[i] + a2[i])
return newroot
def add_as_roots(self, root1, root2):
"""Add two roots together if and only if their sum is also a root
It takes as input two vectors which should be roots. It then computes
their sum and checks if it is in the list of all possible roots. If it
is, it returns the sum. Otherwise it returns a string saying that the
sum is not a root.
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> c.add_as_roots([1, 0, -1, 0], [0, 0, 1, -1])
[1, 0, 0, -1]
>>> c.add_as_roots([1, -1, 0, 0], [0, 0, -1, 1])
'The sum of these two roots is not a root'
"""
alpha = self.all_roots()
newroot = []
for entry in range(len(root1)):
newroot.append(root1[entry] + root2[entry])
if newroot in alpha.values():
return newroot
else:
return "The sum of these two roots is not a root"
def cartan_matrix(self):
"""Cartan matrix of Lie algebra associated with this root system
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> c.cartan_matrix()
Matrix([
[ 2, -1, 0],
[-1, 2, -1],
[ 0, -1, 2]])
"""
return self.cartan_type.cartan_matrix()
def dynkin_diagram(self):
"""Dynkin diagram of the Lie algebra associated with this root system
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> print(c.dynkin_diagram())
0---0---0
1 2 3
"""
return self.cartan_type.dynkin_diagram()
| bsd-3-clause |
log2timeline/dfvfs | tests/resolver_helpers/vhdi_resolver_helper.py | 2 | 1467 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the Virtual Hard Disk image resolver helper implementation."""
import unittest
from dfvfs.lib import definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver_helpers import vhdi_resolver_helper
from tests.resolver_helpers import test_lib
class VHDIResolverHelperTest(test_lib.ResolverHelperTestCase):
"""Tests for the Virtual Hard Disk image resolver helper implementation."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(VHDIResolverHelperTest, self).setUp()
test_path = self._GetTestFilePath(['ext2.vhd'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
self._vhdi_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_VHDI, parent=test_os_path_spec)
def testNewFileObject(self):
"""Tests the NewFileObject function."""
resolver_helper_object = vhdi_resolver_helper.VHDIResolverHelper()
self._TestNewFileObject(resolver_helper_object, self._vhdi_path_spec)
def testNewFileSystem(self):
"""Tests the NewFileSystem function."""
resolver_helper_object = vhdi_resolver_helper.VHDIResolverHelper()
self._TestNewFileSystemRaisesNotSupported(
resolver_helper_object, self._vhdi_path_spec)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
commtrack/commtrack-old-to-del | apps/hq/tests/views.py | 1 | 3356 | from django.test import TestCase
from django.test.client import Client
from hq.models import ExtUser, Domain, Organization, ReporterProfile
from hq.tests.util import create_user_and_domain
from reporters.models import Reporter
class ViewsTestCase(TestCase):
def setUp(self):
user, domain = create_user_and_domain()
self.client.login(username='brian',password='test')
org = Organization(name='mockorg', domain=domain)
org.save()
def testBasicViews(self):
reporter = Reporter(alias="rapporteur")
reporter.save()
domain = Domain.objects.get(name='mockdomain')
profile = ReporterProfile(reporter=reporter, domain=domain)
profile.save()
response = self.client.get('/')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
response = self.client.get('/serverup.txt')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
response = self.client.get('/change_password/')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
response = self.client.get('/report/')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
response = self.client.get('/report/email/')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
response = self.client.get('/report/sms/')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
response = self.client.get('/reporters/add/')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
response = self.client.get('/reporters/%s/' % reporter.id)
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
response = self.client.get('/charts/default/')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
response = self.client.get('/charts/')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
response = self.client.get('/stats/')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
# TODO - fix
"""
response = self.client.get('/stats/delinquents/')
self.assertNotContains(response,"Error", status_code=200)
self.assertNotContains(response,"Exception", status_code=200)
"""
# format url variables like so:
# response = self.client.get('/api/xforms/',{'format':'json'})
def tearDown(self):
user = ExtUser.objects.get(username='brian')
user.delete()
domain = Domain.objects.get(name='mockdomain')
domain.delete()
| bsd-3-clause |
andymckay/zamboni | scripts/serve_webapps.py | 26 | 2238 | #!/usr/bin/env python
"""Serves .webapp/.json manifest files from the working directory."""
import logging
import optparse
import os
from wsgiref import simple_server
log = logging.getLogger(__name__)
document_root = os.getcwd()
def fileapp(environ, start_response):
path_info = environ['PATH_INFO']
if path_info.startswith('/'):
path_info = path_info[1:] # make relative
full_path = os.path.join(document_root, path_info)
content_type = 'text/html'
if full_path == '':
full_path = '.' # must be working dir
if path_info == "" or path_info.endswith('/') or os.path.isdir(full_path):
# directory listing:
out = ['<html><head></head><body><ul>']
for filename in os.listdir(full_path):
if filename.startswith('.'):
continue
if os.path.isdir(os.path.join(full_path, filename)):
filename = filename + '/'
out.append('<li><a href="%s">%s</a></li>' % (filename, filename))
out.append("</ul></body></html>")
body = "".join(out)
else:
f = open(full_path, 'r')
if full_path.endswith('.webapp') or full_path.endswith('.json'):
content_type = 'application/x-web-app-manifest+json'
body = f.read() # optimized for small files :)
start_response('200 OK', [('Content-Type', content_type),
('Content-Length', str(len(body)))])
return [body]
def main():
p = optparse.OptionParser(usage="%prog\n\n" + __doc__)
p.add_option("--addr", help="Address to serve at. Default: localhost",
default='')
p.add_option("--port", help="Port to run server on. Default: %default",
default=8090, type=int)
(options, args) = p.parse_args()
logging.basicConfig(level=logging.DEBUG,
format='[%(asctime)s] %(message)s')
log.info("starting webserver at http://%s:%s/"
% (options.addr or 'localhost', options.port))
httpd = simple_server.WSGIServer((options.addr, options.port),
simple_server.WSGIRequestHandler)
httpd.set_app(fileapp)
httpd.serve_forever()
if __name__ == '__main__':
main()
| bsd-3-clause |
FusionSP/android_external_chromium_org | tools/site_compare/scrapers/firefox/firefox2.py | 189 | 6725 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Does scraping for Firefox 2.0."""
import pywintypes
import time
import types
from drivers import keyboard
from drivers import mouse
from drivers import windowing
# Default version
version = "2.0.0.6"
DEFAULT_PATH = r"c:\program files\mozilla firefox\firefox.exe"
# TODO(jhaas): the Firefox scraper is a bit rickety at the moment. Known
# issues: 1) won't work if the default profile puts toolbars in different
# locations, 2) uses sleep() statements rather than more robust checks,
# 3) fails badly if an existing Firefox window is open when the scrape
# is invoked. This needs to be fortified at some point.
def GetBrowser(path):
"""Invoke the Firefox browser and return the process and window.
Args:
path: full path to browser
Returns:
A tuple of (process handle, render pane)
"""
if not path: path = DEFAULT_PATH
# Invoke Firefox
(proc, wnd) = windowing.InvokeAndWait(path)
# Get the content pane
render_pane = windowing.FindChildWindow(
wnd,
"MozillaWindowClass/MozillaWindowClass/MozillaWindowClass")
return (proc, wnd, render_pane)
def InvokeBrowser(path):
"""Invoke the Firefox browser.
Args:
path: full path to browser
Returns:
A tuple of (main window, process handle, render pane)
"""
# Reuse an existing instance of the browser if we can find one. This
# may not work correctly, especially if the window is behind other windows.
wnds = windowing.FindChildWindows(0, "MozillaUIWindowClass")
if len(wnds):
wnd = wnds[0]
proc = None
else:
# Invoke Firefox
(proc, wnd) = windowing.InvokeAndWait(path)
# Get the content pane
render_pane = windowing.FindChildWindow(
wnd,
"MozillaWindowClass/MozillaWindowClass/MozillaWindowClass")
return (wnd, proc, render_pane)
def Scrape(urls, outdir, size, pos, timeout=20, **kwargs):
"""Invoke a browser, send it to a series of URLs, and save its output.
Args:
urls: list of URLs to scrape
outdir: directory to place output
size: size of browser window to use
pos: position of browser window
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
Returns:
None if success, else an error string
"""
if "path" in kwargs and kwargs["path"]: path = kwargs["path"]
else: path = DEFAULT_PATH
(wnd, proc, render_pane) = InvokeBrowser(path)
# Resize and reposition the frame
windowing.MoveAndSizeWindow(wnd, pos, size, render_pane)
time.sleep(3)
# Firefox is a bit of a pain: it doesn't use standard edit controls,
# and it doesn't display a throbber when there's no tab. Let's make
# sure there's at least one tab, then select the first one
mouse.ClickInWindow(wnd)
keyboard.TypeString("[t]", True)
mouse.ClickInWindow(wnd, (30, 115))
time.sleep(2)
timedout = False
# Visit each URL we're given
if type(urls) in types.StringTypes: urls = [urls]
for url in urls:
# Use keyboard shortcuts
keyboard.TypeString("{d}", True)
keyboard.TypeString(url)
keyboard.TypeString("\n")
# Wait for the page to finish loading
load_time = windowing.WaitForThrobber(wnd, (10, 96, 26, 112), timeout)
timedout = load_time < 0
if timedout:
break
# Scrape the page
image = windowing.ScrapeWindow(render_pane)
# Save to disk
if "filename" in kwargs:
if callable(kwargs["filename"]):
filename = kwargs["filename"](url)
else:
filename = kwargs["filename"]
else:
filename = windowing.URLtoFilename(url, outdir, ".bmp")
image.save(filename)
# Close all the tabs, cheesily
mouse.ClickInWindow(wnd)
while len(windowing.FindChildWindows(0, "MozillaUIWindowClass")):
keyboard.TypeString("[w]", True)
time.sleep(1)
if timedout:
return "timeout"
def Time(urls, size, timeout, **kwargs):
"""Measure how long it takes to load each of a series of URLs
Args:
urls: list of URLs to time
size: size of browser window to use
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
Returns:
A list of tuples (url, time). "time" can be "crashed" or "timeout"
"""
if "path" in kwargs and kwargs["path"]: path = kwargs["path"]
else: path = DEFAULT_PATH
proc = None
# Visit each URL we're given
if type(urls) in types.StringTypes: urls = [urls]
ret = []
for url in urls:
try:
# Invoke the browser if necessary
if not proc:
(wnd, proc, render_pane) = InvokeBrowser(path)
# Resize and reposition the frame
windowing.MoveAndSizeWindow(wnd, (0,0), size, render_pane)
time.sleep(3)
# Firefox is a bit of a pain: it doesn't use standard edit controls,
# and it doesn't display a throbber when there's no tab. Let's make
# sure there's at least one tab, then select the first one
mouse.ClickInWindow(wnd)
keyboard.TypeString("[t]", True)
mouse.ClickInWindow(wnd, (30, 115))
time.sleep(2)
# Use keyboard shortcuts
keyboard.TypeString("{d}", True)
keyboard.TypeString(url)
keyboard.TypeString("\n")
# Wait for the page to finish loading
load_time = windowing.WaitForThrobber(wnd, (10, 96, 26, 112), timeout)
timedout = load_time < 0
if timedout:
load_time = "timeout"
# Try to close the browser; if this fails it's probably a crash
mouse.ClickInWindow(wnd)
count = 0
while (len(windowing.FindChildWindows(0, "MozillaUIWindowClass"))
and count < 5):
keyboard.TypeString("[w]", True)
time.sleep(1)
count = count + 1
if len(windowing.FindChildWindows(0, "MozillaUIWindowClass")):
windowing.EndProcess(proc)
load_time = "crashed"
proc = None
except pywintypes.error:
proc = None
load_time = "crashed"
ret.append( (url, load_time) )
if proc:
count = 0
while (len(windowing.FindChildWindows(0, "MozillaUIWindowClass"))
and count < 5):
keyboard.TypeString("[w]", True)
time.sleep(1)
count = count + 1
return ret
def main():
# We're being invoked rather than imported, so run some tests
path = r"c:\sitecompare\scrapes\Firefox\2.0.0.6"
windowing.PreparePath(path)
# Scrape three sites and save the results
Scrape(
["http://www.microsoft.com", "http://www.google.com",
"http://www.sun.com"],
path, (1024, 768), (0, 0))
return 0
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
chromium/chromium | buildtools/checkdeps/rules.py | 5 | 7044 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Base classes to represent dependency rules, used by checkdeps.py"""
import os
import re
class Rule(object):
"""Specifies a single rule for an include, which can be one of
ALLOW, DISALLOW and TEMP_ALLOW.
"""
# These are the prefixes used to indicate each type of rule. These
# are also used as values for self.allow to indicate which type of
# rule this is.
ALLOW = '+'
DISALLOW = '-'
TEMP_ALLOW = '!'
def __init__(self, allow, directory, dependent_directory, source):
self.allow = allow
self._dir = directory
self._dependent_dir = dependent_directory
self._source = source
def __str__(self):
return '"%s%s" from %s.' % (self.allow, self._dir, self._source)
def AsDependencyTuple(self):
"""Returns a tuple (allow, dependent dir, dependee dir) for this rule,
which is fully self-sufficient to answer the question whether the dependent
is allowed to depend on the dependee, without knowing the external
context."""
return self.allow, self._dependent_dir or '.', self._dir or '.'
def ParentOrMatch(self, other):
"""Returns true if the input string is an exact match or is a parent
of the current rule. For example, the input "foo" would match "foo/bar"."""
return self._dir == other or self._dir.startswith(other + '/')
def ChildOrMatch(self, other):
"""Returns true if the input string would be covered by this rule. For
example, the input "foo/bar" would match the rule "foo"."""
return self._dir == other or other.startswith(self._dir + '/')
class MessageRule(Rule):
"""A rule that has a simple message as the reason for failing,
unrelated to directory or source.
"""
def __init__(self, reason):
super(MessageRule, self).__init__(Rule.DISALLOW, '', '', '')
self._reason = reason
def __str__(self):
return self._reason
def ParseRuleString(rule_string, source):
"""Returns a tuple of a character indicating what type of rule this
is, and a string holding the path the rule applies to.
"""
if not rule_string:
raise Exception('The rule string "%s" is empty\nin %s' %
(rule_string, source))
if not rule_string[0] in [Rule.ALLOW, Rule.DISALLOW, Rule.TEMP_ALLOW]:
raise Exception(
'The rule string "%s" does not begin with a "+", "-" or "!".' %
rule_string)
# If a directory is specified in a DEPS file with a trailing slash, then it
# will not match as a parent directory in Rule's [Parent|Child]OrMatch above.
# Ban them.
if rule_string[-1] == '/':
raise Exception(
'The rule string "%s" ends with a "/" which is not allowed.'
' Please remove the trailing "/".' % rule_string)
return rule_string[0], rule_string[1:]
class Rules(object):
"""Sets of rules for files in a directory.
By default, rules are added to the set of rules applicable to all
dependee files in the directory. Rules may also be added that apply
only to dependee files whose filename (last component of their path)
matches a given regular expression; hence there is one additional
set of rules per unique regular expression.
"""
def __init__(self):
"""Initializes the current rules with an empty rule list for all
files.
"""
# We keep the general rules out of the specific rules dictionary,
# as we need to always process them last.
self._general_rules = []
# Keys are regular expression strings, values are arrays of rules
# that apply to dependee files whose basename matches the regular
# expression. These are applied before the general rules, but
# their internal order is arbitrary.
self._specific_rules = {}
def __str__(self):
result = ['Rules = {\n (apply to all files): [\n%s\n ],' % '\n'.join(
' %s' % x for x in self._general_rules)]
for regexp, rules in list(self._specific_rules.items()):
result.append(' (limited to files matching %s): [\n%s\n ]' % (
regexp, '\n'.join(' %s' % x for x in rules)))
result.append(' }')
return '\n'.join(result)
def AsDependencyTuples(self, include_general_rules, include_specific_rules):
"""Returns a list of tuples (allow, dependent dir, dependee dir) for the
specified rules (general/specific). Currently only general rules are
supported."""
def AddDependencyTuplesImpl(deps, rules, extra_dependent_suffix=""):
for rule in rules:
(allow, dependent, dependee) = rule.AsDependencyTuple()
tup = (allow, dependent + extra_dependent_suffix, dependee)
deps.add(tup)
deps = set()
if include_general_rules:
AddDependencyTuplesImpl(deps, self._general_rules)
if include_specific_rules:
for regexp, rules in list(self._specific_rules.items()):
AddDependencyTuplesImpl(deps, rules, "/" + regexp)
return deps
def AddRule(self, rule_string, dependent_dir, source, dependee_regexp=None):
"""Adds a rule for the given rule string.
Args:
rule_string: The include_rule string read from the DEPS file to apply.
source: A string representing the location of that string (filename, etc.)
so that we can give meaningful errors.
dependent_dir: The directory to which this rule applies.
dependee_regexp: The rule will only be applied to dependee files
whose filename (last component of their path)
matches the expression. None to match all
dependee files.
"""
rule_type, rule_dir = ParseRuleString(rule_string, source)
if not dependee_regexp:
rules_to_update = self._general_rules
else:
if dependee_regexp in self._specific_rules:
rules_to_update = self._specific_rules[dependee_regexp]
else:
rules_to_update = []
# Remove any existing rules or sub-rules that apply. For example, if we're
# passed "foo", we should remove "foo", "foo/bar", but not "foobar".
rules_to_update = [x for x in rules_to_update
if not x.ParentOrMatch(rule_dir)]
rules_to_update.insert(0, Rule(rule_type, rule_dir, dependent_dir, source))
if not dependee_regexp:
self._general_rules = rules_to_update
else:
self._specific_rules[dependee_regexp] = rules_to_update
def RuleApplyingTo(self, include_path, dependee_path):
"""Returns the rule that applies to |include_path| for a dependee
file located at |dependee_path|.
"""
dependee_filename = os.path.basename(dependee_path)
for regexp, specific_rules in list(self._specific_rules.items()):
if re.match(regexp, dependee_filename):
for rule in specific_rules:
if rule.ChildOrMatch(include_path):
return rule
for rule in self._general_rules:
if rule.ChildOrMatch(include_path):
return rule
return MessageRule('no rule applying.')
| bsd-3-clause |
popazerty/enigma2-obh | lib/python/Screens/Console.py | 26 | 2543 | from enigma import eConsoleAppContainer
from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.ScrollLabel import ScrollLabel
class Console(Screen):
#TODO move this to skin.xml
skin = """
<screen position="100,100" size="550,400" title="Command execution..." >
<widget name="text" position="0,0" size="550,400" font="Console;14" />
</screen>"""
def __init__(self, session, title = "Console", cmdlist = None, finishedCallback = None, closeOnSuccess = False):
Screen.__init__(self, session)
self.finishedCallback = finishedCallback
self.closeOnSuccess = closeOnSuccess
self.errorOcurred = False
self["text"] = ScrollLabel("")
self["actions"] = ActionMap(["WizardActions", "DirectionActions"],
{
"ok": self.cancel,
"back": self.cancel,
"up": self["text"].pageUp,
"down": self["text"].pageDown
}, -1)
self.cmdlist = cmdlist
self.newtitle = title
self.onShown.append(self.updateTitle)
self.container = eConsoleAppContainer()
self.run = 0
self.container.appClosed.append(self.runFinished)
self.container.dataAvail.append(self.dataAvail)
self.onLayoutFinish.append(self.startRun) # dont start before gui is finished
def updateTitle(self):
self.setTitle(self.newtitle)
def startRun(self):
self["text"].setText(_("Execution progress:") + "\n\n")
print "Console: executing in run", self.run, " the command:", self.cmdlist[self.run]
if self.container.execute(self.cmdlist[self.run]): #start of container application failed...
self.runFinished(-1) # so we must call runFinished manual
def runFinished(self, retval):
if retval:
self.errorOcurred = True
self.run += 1
if self.run != len(self.cmdlist):
if self.container.execute(self.cmdlist[self.run]): #start of container application failed...
self.runFinished(-1) # so we must call runFinished manual
else:
lastpage = self["text"].isAtLastPage()
str = self["text"].getText()
str += _("Execution finished!!");
self["text"].setText(str)
if lastpage:
self["text"].lastPage()
if self.finishedCallback is not None:
self.finishedCallback()
if not self.errorOcurred and self.closeOnSuccess:
self.cancel()
def cancel(self):
if self.run == len(self.cmdlist):
self.close()
self.container.appClosed.remove(self.runFinished)
self.container.dataAvail.remove(self.dataAvail)
def dataAvail(self, str):
lastpage = self["text"].isAtLastPage()
self["text"].setText(self["text"].getText() + str)
if lastpage:
self["text"].lastPage()
| gpl-2.0 |
loopCM/chromium | third_party/mesa/MesaLib/src/mapi/glapi/gen/extension_helper.py | 46 | 8261 | #!/usr/bin/env python
# (C) Copyright IBM Corporation 2005
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <idr@us.ibm.com>
import gl_XML
import license
import sys, getopt, string
vtxfmt = [
"ArrayElement", \
"Color3f", \
"Color3fv", \
"Color4f", \
"Color4fv", \
"EdgeFlag", \
"EdgeFlagv", \
"EvalCoord1f", \
"EvalCoord1fv", \
"EvalCoord2f", \
"EvalCoord2fv", \
"EvalPoint1", \
"EvalPoint2", \
"FogCoordfEXT", \
"FogCoordfvEXT", \
"Indexf", \
"Indexfv", \
"Materialfv", \
"MultiTexCoord1fARB", \
"MultiTexCoord1fvARB", \
"MultiTexCoord2fARB", \
"MultiTexCoord2fvARB", \
"MultiTexCoord3fARB", \
"MultiTexCoord3fvARB", \
"MultiTexCoord4fARB", \
"MultiTexCoord4fvARB", \
"Normal3f", \
"Normal3fv", \
"SecondaryColor3fEXT", \
"SecondaryColor3fvEXT", \
"TexCoord1f", \
"TexCoord1fv", \
"TexCoord2f", \
"TexCoord2fv", \
"TexCoord3f", \
"TexCoord3fv", \
"TexCoord4f", \
"TexCoord4fv", \
"Vertex2f", \
"Vertex2fv", \
"Vertex3f", \
"Vertex3fv", \
"Vertex4f", \
"Vertex4fv", \
"CallList", \
"CallLists", \
"Begin", \
"End", \
"VertexAttrib1fNV", \
"VertexAttrib1fvNV", \
"VertexAttrib2fNV", \
"VertexAttrib2fvNV", \
"VertexAttrib3fNV", \
"VertexAttrib3fvNV", \
"VertexAttrib4fNV", \
"VertexAttrib4fvNV", \
"VertexAttrib1fARB", \
"VertexAttrib1fvARB", \
"VertexAttrib2fARB", \
"VertexAttrib2fvARB", \
"VertexAttrib3fARB", \
"VertexAttrib3fvARB", \
"VertexAttrib4fARB", \
"VertexAttrib4fvARB", \
"Rectf", \
"DrawArrays", \
"DrawElements", \
"DrawRangeElements", \
"EvalMesh1", \
"EvalMesh2", \
]
def all_entrypoints_in_abi(f, abi, api):
for n in f.entry_points:
[category, num] = api.get_category_for_name( n )
if category not in abi:
return 0
return 1
def any_entrypoints_in_abi(f, abi, api):
for n in f.entry_points:
[category, num] = api.get_category_for_name( n )
if category in abi:
return 1
return 0
def condition_for_function(f, abi, all_not_in_ABI):
"""Create a C-preprocessor condition for the function.
There are two modes of operation. If all_not_in_ABI is set, a
condition is only created is all of the entry-point names for f are
not in the selected ABI. If all_not_in_ABI is not set, a condition
is created if any entryp-point name is not in the selected ABI.
"""
condition = []
for n in f.entry_points:
[category, num] = api.get_category_for_name( n )
if category not in abi:
condition.append( 'defined(need_%s)' % (gl_XML.real_category_name( category )) )
elif all_not_in_ABI:
return []
return condition
class PrintGlExtensionGlue(gl_XML.gl_print_base):
def __init__(self):
gl_XML.gl_print_base.__init__(self)
self.name = "extension_helper.py (from Mesa)"
self.license = license.bsd_license_template % ("(C) Copyright IBM Corporation 2005", "IBM")
return
def printRealHeader(self):
print '#include "utils.h"'
print '#include "main/dispatch.h"'
print ''
return
def printBody(self, api):
abi = [ "1.0", "1.1", "1.2", "GL_ARB_multitexture" ]
category_list = {}
print '#ifndef NULL'
print '# define NULL 0'
print '#endif'
print ''
for f in api.functionIterateAll():
condition = condition_for_function(f, abi, 0)
if len(condition):
print '#if %s' % (string.join(condition, " || "))
print 'static const char %s_names[] =' % (f.name)
parameter_signature = ''
for p in f.parameterIterator():
if p.is_padding:
continue
# FIXME: This is a *really* ugly hack. :(
tn = p.type_expr.get_base_type_node()
if p.is_pointer():
parameter_signature += 'p'
elif tn.integer:
parameter_signature += 'i'
elif tn.size == 4:
parameter_signature += 'f'
else:
parameter_signature += 'd'
print ' "%s\\0" /* Parameter signature */' % (parameter_signature)
for n in f.entry_points:
print ' "gl%s\\0"' % (n)
[category, num] = api.get_category_for_name( n )
if category not in abi:
c = gl_XML.real_category_name(category)
if not category_list.has_key(c):
category_list[ c ] = []
category_list[ c ].append( f )
print ' "";'
print '#endif'
print ''
keys = category_list.keys()
keys.sort()
for category in keys:
print '#if defined(need_%s)' % (category)
print 'static const struct dri_extension_function %s_functions[] = {' % (category)
for f in category_list[ category ]:
# A function either has an offset that is
# assigned by the ABI, or it has a remap
# index.
if any_entrypoints_in_abi(f, abi, api):
index_name = "-1"
offset = f.offset
else:
index_name = "%s_remap_index" % (f.name)
offset = -1
print ' { %s_names, %s, %d },' % (f.name, index_name, offset)
print ' { NULL, 0, 0 }'
print '};'
print '#endif'
print ''
return
class PrintInitDispatch(gl_XML.gl_print_base):
def __init__(self):
gl_XML.gl_print_base.__init__(self)
self.name = "extension_helper.py (from Mesa)"
self.license = license.bsd_license_template % ("(C) Copyright IBM Corporation 2005", "IBM")
return
def do_function_body(self, api, abi, vtxfmt_only):
last_condition_string = None
for f in api.functionIterateByOffset():
if (f.name in vtxfmt) and not vtxfmt_only:
continue
if (f.name not in vtxfmt) and vtxfmt_only:
continue
condition = condition_for_function(f, abi, 1)
condition_string = string.join(condition, " || ")
if condition_string != last_condition_string:
if last_condition_string:
print '#endif /* %s */' % (last_condition_string)
if condition_string:
print '#if %s' % (condition_string)
if vtxfmt_only:
print ' disp->%s = vfmt->%s;' % (f.name, f.name)
else:
print ' disp->%s = _mesa_%s;' % (f.name, f.name)
last_condition_string = condition_string
if last_condition_string:
print '#endif /* %s */' % (last_condition_string)
def printBody(self, api):
abi = [ "1.0", "1.1", "1.2", "GL_ARB_multitexture" ]
print 'void driver_init_exec_table(struct _glapi_table *disp)'
print '{'
self.do_function_body(api, abi, 0)
print '}'
print ''
print 'void driver_install_vtxfmt(struct _glapi_table *disp, const GLvertexformat *vfmt)'
print '{'
self.do_function_body(api, abi, 1)
print '}'
return
def show_usage():
print "Usage: %s [-f input_file_name] [-m output_mode]" % sys.argv[0]
print " -m output_mode Output mode can be one of 'extensions' or 'exec_init'."
sys.exit(1)
if __name__ == '__main__':
file_name = "gl_API.xml"
try:
(args, trail) = getopt.getopt(sys.argv[1:], "f:m:")
except Exception,e:
show_usage()
mode = "extensions"
for (arg,val) in args:
if arg == "-f":
file_name = val
if arg == '-m':
mode = val
api = gl_XML.parse_GL_API( file_name )
if mode == "extensions":
printer = PrintGlExtensionGlue()
elif mode == "exec_init":
printer = PrintInitDispatch()
else:
show_usage()
printer.Print( api )
| bsd-3-clause |
singlebrook/AWS-ElasticBeanstalk-CLI | eb/macosx/python2.7/lib/aws/requests/cookies.py | 34 | 13686 | # -*- coding: utf-8 -*-
"""
Compatibility code to be able to use `cookielib.CookieJar` with requests.
requests.utils imports from here, so be careful with imports.
"""
import collections
from .compat import cookielib, urlparse, Morsel
try:
import threading
# grr, pyflakes: this fixes "redefinition of unused 'threading'"
threading
except ImportError:
import dummy_threading as threading
class MockRequest(object):
"""Wraps a `requests.Request` to mimic a `urllib2.Request`.
The code in `cookielib.CookieJar` expects this interface in order to correctly
manage cookie policies, i.e., determine whether a cookie can be set, given the
domains of the request and the cookie.
The original request object is read-only. The client is responsible for collecting
the new headers via `get_new_headers()` and interpreting them appropriately. You
probably want `get_cookie_header`, defined below.
"""
def __init__(self, request):
self._r = request
self._new_headers = {}
self.type = urlparse(self._r.url).scheme
def get_type(self):
return self.type
def get_host(self):
return urlparse(self._r.url).netloc
def get_origin_req_host(self):
return self.get_host()
def get_full_url(self):
return self._r.url
def is_unverifiable(self):
return True
def has_header(self, name):
return name in self._r.headers or name in self._new_headers
def get_header(self, name, default=None):
return self._r.headers.get(name, self._new_headers.get(name, default))
def add_header(self, key, val):
"""cookielib has no legitimate use for this method; add it back if you find one."""
raise NotImplementedError("Cookie headers should be added with add_unredirected_header()")
def add_unredirected_header(self, name, value):
self._new_headers[name] = value
def get_new_headers(self):
return self._new_headers
@property
def unverifiable(self):
return self.is_unverifiable()
class MockResponse(object):
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
...what? Basically, expose the parsed HTTP headers from the server response
the way `cookielib` expects to see them.
"""
def __init__(self, headers):
"""Make a MockResponse for `cookielib` to read.
:param headers: a httplib.HTTPMessage or analogous carrying the headers
"""
self._headers = headers
def info(self):
return self._headers
def getheaders(self, name):
self._headers.getheaders(name)
def extract_cookies_to_jar(jar, request, response):
"""Extract the cookies from the response into a CookieJar.
:param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)
:param request: our own requests.Request object
:param response: urllib3.HTTPResponse object
"""
# the _original_response field is the wrapped httplib.HTTPResponse object,
req = MockRequest(request)
# pull out the HTTPMessage with the headers and put it in the mock:
res = MockResponse(response._original_response.msg)
jar.extract_cookies(res, req)
def get_cookie_header(jar, request):
"""Produce an appropriate Cookie header string to be sent with `request`, or None."""
r = MockRequest(request)
jar.add_cookie_header(r)
return r.get_new_headers().get('Cookie')
def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
"""Unsets a cookie by name, by default over all domains and paths.
Wraps CookieJar.clear(), is O(n).
"""
clearables = []
for cookie in cookiejar:
if cookie.name == name:
if domain is None or domain == cookie.domain:
if path is None or path == cookie.path:
clearables.append((cookie.domain, cookie.path, cookie.name))
for domain, path, name in clearables:
cookiejar.clear(domain, path, name)
class CookieConflictError(RuntimeError):
"""There are two cookies that meet the criteria specified in the cookie jar.
Use .get and .set and include domain and path args in order to be more specific."""
class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
"""Compatibility class; is a cookielib.CookieJar, but exposes a dict interface.
This is the CookieJar we create by default for requests and sessions that
don't specify one, since some clients may expect response.cookies and
session.cookies to support dict operations.
Don't use the dict interface internally; it's just for compatibility with
with external client code. All `requests` code should work out of the box
with externally provided instances of CookieJar, e.g., LWPCookieJar and
FileCookieJar.
Caution: dictionary operations that are normally O(1) may be O(n).
Unlike a regular CookieJar, this class is pickleable.
"""
def get(self, name, default=None, domain=None, path=None):
"""Dict-like get() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains. Caution: operation is O(n), not O(1)."""
try:
return self._find_no_duplicates(name, domain, path)
except KeyError:
return default
def set(self, name, value, **kwargs):
"""Dict-like set() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains."""
# support client code that unsets cookies by assignment of a None value:
if value is None:
remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path'))
return
if isinstance(value, Morsel):
c = morsel_to_cookie(value)
else:
c = create_cookie(name, value, **kwargs)
self.set_cookie(c)
return c
def keys(self):
"""Dict-like keys() that returns a list of names of cookies from the jar.
See values() and items()."""
keys = []
for cookie in iter(self):
keys.append(cookie.name)
return keys
def values(self):
"""Dict-like values() that returns a list of values of cookies from the jar.
See keys() and items()."""
values = []
for cookie in iter(self):
values.append(cookie.value)
return values
def items(self):
"""Dict-like items() that returns a list of name-value tuples from the jar.
See keys() and values(). Allows client-code to call "dict(RequestsCookieJar)
and get a vanilla python dict of key value pairs."""
items = []
for cookie in iter(self):
items.append((cookie.name, cookie.value))
return items
def list_domains(self):
"""Utility method to list all the domains in the jar."""
domains = []
for cookie in iter(self):
if cookie.domain not in domains:
domains.append(cookie.domain)
return domains
def list_paths(self):
"""Utility method to list all the paths in the jar."""
paths = []
for cookie in iter(self):
if cookie.path not in paths:
paths.append(cookie.path)
return paths
def multiple_domains(self):
"""Returns True if there are multiple domains in the jar.
Returns False otherwise."""
domains = []
for cookie in iter(self):
if cookie.domain is not None and cookie.domain in domains:
return True
domains.append(cookie.domain)
return False # there is only one domain in jar
def get_dict(self, domain=None, path=None):
"""Takes as an argument an optional domain and path and returns a plain old
Python dict of name-value pairs of cookies that meet the requirements."""
dictionary = {}
for cookie in iter(self):
if (domain is None or cookie.domain == domain) and (path is None
or cookie.path == path):
dictionary[cookie.name] = cookie.value
return dictionary
def __getitem__(self, name):
"""Dict-like __getitem__() for compatibility with client code. Throws exception
if there are more than one cookie with name. In that case, use the more
explicit get() method instead. Caution: operation is O(n), not O(1)."""
return self._find_no_duplicates(name)
def __setitem__(self, name, value):
"""Dict-like __setitem__ for compatibility with client code. Throws exception
if there is already a cookie of that name in the jar. In that case, use the more
explicit set() method instead."""
self.set(name, value)
def __delitem__(self, name):
"""Deletes a cookie given a name. Wraps cookielib.CookieJar's remove_cookie_by_name()."""
remove_cookie_by_name(self, name)
def _find(self, name, domain=None, path=None):
"""Requests uses this method internally to get cookie values. Takes as args name
and optional domain and path. Returns a cookie.value. If there are conflicting cookies,
_find arbitrarily chooses one. See _find_no_duplicates if you want an exception thrown
if there are conflicting cookies."""
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
return cookie.value
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def _find_no_duplicates(self, name, domain=None, path=None):
"""__get_item__ and get call _find_no_duplicates -- never used in Requests internally.
Takes as args name and optional domain and path. Returns a cookie.value.
Throws KeyError if cookie is not found and CookieConflictError if there are
multiple cookies that match name and optionally domain and path."""
toReturn = None
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
if toReturn is not None: # if there are multiple cookies that meet passed in criteria
raise CookieConflictError('There are multiple cookies with name, %r' % (name))
toReturn = cookie.value # we will eventually return this as long as no cookie conflict
if toReturn:
return toReturn
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def __getstate__(self):
"""Unlike a normal CookieJar, this class is pickleable."""
state = self.__dict__.copy()
# remove the unpickleable RLock object
state.pop('_cookies_lock')
return state
def __setstate__(self, state):
"""Unlike a normal CookieJar, this class is pickleable."""
self.__dict__.update(state)
if '_cookies_lock' not in self.__dict__:
self._cookies_lock = threading.RLock()
def copy(self):
"""This is not implemented. Calling this will throw an exception."""
raise NotImplementedError
def create_cookie(name, value, **kwargs):
"""Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie").
"""
result = dict(
version=0,
name=name,
value=value,
port=None,
domain='',
path='/',
secure=False,
expires=None,
discard=True,
comment=None,
comment_url=None,
rest={'HttpOnly': None},
rfc2109=False,)
badargs = set(kwargs) - set(result)
if badargs:
err = 'create_cookie() got unexpected keyword arguments: %s'
raise TypeError(err % list(badargs))
result.update(kwargs)
result['port_specified'] = bool(result['port'])
result['domain_specified'] = bool(result['domain'])
result['domain_initial_dot'] = result['domain'].startswith('.')
result['path_specified'] = bool(result['path'])
return cookielib.Cookie(**result)
def morsel_to_cookie(morsel):
"""Convert a Morsel object into a Cookie containing the one k/v pair."""
c = create_cookie(
name=morsel.key,
value=morsel.value,
version=morsel['version'] or 0,
port=None,
port_specified=False,
domain=morsel['domain'],
domain_specified=bool(morsel['domain']),
domain_initial_dot=morsel['domain'].startswith('.'),
path=morsel['path'],
path_specified=bool(morsel['path']),
secure=bool(morsel['secure']),
expires=morsel['max-age'] or morsel['expires'],
discard=False,
comment=morsel['comment'],
comment_url=bool(morsel['comment']),
rest={'HttpOnly': morsel['httponly']},
rfc2109=False,)
return c
def cookiejar_from_dict(cookie_dict, cookiejar=None):
"""Returns a CookieJar from a key/value dictionary.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
if cookiejar is None:
cookiejar = RequestsCookieJar()
if cookie_dict is not None:
for name in cookie_dict:
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
return cookiejar
| apache-2.0 |
AlphaSmartDog/DeepLearningNotes | Note-2 RNN处理非线性回归/sonnet/testing/parameterized/parameterized_test.py | 9 | 13550 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sonnet.testing.parameterized."""
import collections
import unittest
# Dependency imports
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from sonnet.testing import parameterized
from tensorflow.python.platform import googletest
class MyOwnClass(object):
pass
def DictDecorator(key, value):
"""Sample implementation of a chained decorator.
Sets a single field in a dict on a test with a dict parameter.
Uses the exposed '_ParameterizedTestIter.testcases' field to
modify arguments from previous decorators to allow decorator chains.
Args:
key: key to map to
value: value to set
Returns:
The test decorator
"""
def Decorator(test_method):
# If decorating result of another DictDecorator
if isinstance(test_method, collections.Iterable):
actual_tests = []
for old_test in test_method.testcases:
# each test is a ('test_suffix', dict) tuple
new_dict = old_test[1].copy()
new_dict[key] = value
test_suffix = '%s_%s_%s' % (old_test[0], key, value)
actual_tests.append((test_suffix, new_dict))
test_method.testcases = actual_tests
return test_method
else:
test_suffix = ('_%s_%s') % (key, value)
tests_to_make = ((test_suffix, {key: value}),)
# 'test_method' here is the original test method
return parameterized.NamedParameters(*tests_to_make)(test_method)
return Decorator
class ParameterizedTestsTest(googletest.TestCase):
# The test testcases are nested so they're not
# picked up by the normal test case loader code.
class GoodAdditionParams(parameterized.ParameterizedTestCase):
@parameterized.Parameters(
(1, 2, 3),
(4, 5, 9))
def testAddition(self, op1, op2, result):
self.arguments = (op1, op2, result)
self.assertEqual(result, op1 + op2)
# This class does not inherit from ParameterizedTestCase.
class BadAdditionParams(googletest.TestCase):
@parameterized.Parameters(
(1, 2, 3),
(4, 5, 9))
def testAddition(self, op1, op2, result):
pass # Always passes, but not called w/out ParameterizedTestCase.
class MixedAdditionParams(parameterized.ParameterizedTestCase):
@parameterized.Parameters(
(1, 2, 1),
(4, 5, 9))
def testAddition(self, op1, op2, result):
self.arguments = (op1, op2, result)
self.assertEqual(result, op1 + op2)
class DictionaryArguments(parameterized.ParameterizedTestCase):
@parameterized.Parameters(
{'op1': 1, 'op2': 2, 'result': 3},
{'op1': 4, 'op2': 5, 'result': 9})
def testAddition(self, op1, op2, result):
self.assertEqual(result, op1 + op2)
class NoParameterizedTests(parameterized.ParameterizedTestCase):
# iterable member with non-matching name
a = 'BCD'
# member with matching name, but not a generator
testInstanceMember = None # pylint: disable=invalid-name
# member with a matching name and iterator, but not a generator
testString = 'foo' # pylint: disable=invalid-name
# generator, but no matching name
def someGenerator(self): # pylint: disable=invalid-name
yield
yield
yield
# Generator function, but not a generator instance.
def testGenerator(self):
yield
yield
yield
def testNormal(self):
self.assertEqual(3, 1 + 2)
class GeneratorTests(parameterized.ParameterizedTestCase):
def generateTestCases(): # pylint: disable=no-method-argument,invalid-name
for _ in xrange(10):
yield lambda x: None
testGeneratedTestCases = generateTestCases() # pylint: disable=invalid-name
class ArgumentsWithAddresses(parameterized.ParameterizedTestCase):
@parameterized.Parameters(
(object(),),
(MyOwnClass(),),
)
def testSomething(self, unused_obj):
pass
class NamedTests(parameterized.ParameterizedTestCase):
@parameterized.NamedParameters(
('Interesting', 0),
('Boring', 1),
)
def testSomething(self, unused_obj):
pass
def testWithoutParameters(self):
pass
class UnderscoreNamedTests(parameterized.ParameterizedTestCase):
"""Example tests using PEP-8 style names instead of camel-case."""
@parameterized.NamedParameters(
('interesting', 0),
('boring', 1),
)
def test_something(self, unused_obj):
pass
def test_without_parameters(self):
pass
class ChainedTests(parameterized.ParameterizedTestCase):
@DictDecorator('cone', 'waffle')
@DictDecorator('flavor', 'strawberry')
def testChained(self, dictionary):
self.assertDictEqual(dictionary, {'cone': 'waffle',
'flavor': 'strawberry'})
class SingletonListExtraction(parameterized.ParameterizedTestCase):
@parameterized.Parameters(
(i, i * 2) for i in range(10))
def testSomething(self, unused_1, unused_2):
pass
class SingletonArgumentExtraction(parameterized.ParameterizedTestCase):
@parameterized.Parameters(1, 2, 3, 4, 5, 6)
def testNumbers(self, unused_1):
pass
@parameterized.Parameters('foo', 'bar', 'baz')
def testStrings(self, unused_1):
pass
@parameterized.Parameters(
(1, 2, 3),
(4, 5, 9))
class DecoratedClass(parameterized.ParameterizedTestCase):
def testAdd(self, arg1, arg2, arg3):
self.assertEqual(arg1 + arg2, arg3)
def testSubtractFail(self, arg1, arg2, arg3):
self.assertEqual(arg3 + arg2, arg1)
@parameterized.Parameters(
(a, b, a+b) for a in range(1, 5) for b in range(1, 5))
class GeneratorDecoratedClass(parameterized.ParameterizedTestCase):
def testAdd(self, arg1, arg2, arg3):
self.assertEqual(arg1 + arg2, arg3)
def testSubtractFail(self, arg1, arg2, arg3):
self.assertEqual(arg3 + arg2, arg1)
@parameterized.Parameters(
(1, 2, 3),
(4, 5, 9),
)
class DecoratedBareClass(googletest.TestCase):
def testAdd(self, arg1, arg2, arg3):
self.assertEqual(arg1 + arg2, arg3)
class OtherDecorator(parameterized.ParameterizedTestCase):
@unittest.skip('wraps _ParameterizedTestIter')
@parameterized.Parameters((1), (2))
def testOtherThenParameterized(self, arg1):
pass
@parameterized.Parameters((1), (2))
@unittest.skip('is wrapped by _ParameterizedTestIter')
def testParameterizedThenOther(self, arg1):
pass
def testMissingInheritance(self):
ts = unittest.makeSuite(self.BadAdditionParams)
self.assertEqual(1, ts.countTestCases())
res = unittest.TestResult()
ts.run(res)
self.assertEqual(1, res.testsRun)
self.assertFalse(res.wasSuccessful())
self.assertIn('without having inherited', str(res.errors[0]))
def testCorrectExtractionNumbers(self):
ts = unittest.makeSuite(self.GoodAdditionParams)
self.assertEqual(2, ts.countTestCases())
def testSuccessfulExecution(self):
ts = unittest.makeSuite(self.GoodAdditionParams)
res = unittest.TestResult()
ts.run(res)
self.assertEqual(2, res.testsRun)
self.assertTrue(res.wasSuccessful())
def testCorrectArguments(self):
ts = unittest.makeSuite(self.GoodAdditionParams)
res = unittest.TestResult()
params = set([
(1, 2, 3),
(4, 5, 9)])
for test in ts:
test(res)
self.assertIn(test.arguments, params)
params.remove(test.arguments)
self.assertEqual(0, len(params))
def testRecordedFailures(self):
ts = unittest.makeSuite(self.MixedAdditionParams)
self.assertEqual(2, ts.countTestCases())
res = unittest.TestResult()
ts.run(res)
self.assertEqual(2, res.testsRun)
self.assertFalse(res.wasSuccessful())
self.assertEqual(1, len(res.failures))
self.assertEqual(0, len(res.errors))
def testId(self):
ts = unittest.makeSuite(self.ArgumentsWithAddresses)
self.assertEqual(
'__main__.ArgumentsWithAddresses.testSomething(<object>)',
list(ts)[0].id())
ts = unittest.makeSuite(self.GoodAdditionParams)
self.assertEqual(
'__main__.GoodAdditionParams.testAddition(1, 2, 3)',
list(ts)[0].id())
def testDictParameters(self):
ts = unittest.makeSuite(self.DictionaryArguments)
res = unittest.TestResult()
ts.run(res)
self.assertEqual(2, res.testsRun)
self.assertTrue(res.wasSuccessful())
def testGeneratorTests(self):
ts = unittest.makeSuite(self.GeneratorTests)
self.assertEqual(10, ts.countTestCases())
def testNamedParametersRun(self):
ts = unittest.makeSuite(self.NamedTests)
self.assertEqual(3, ts.countTestCases())
res = unittest.TestResult()
ts.run(res)
self.assertEqual(3, res.testsRun)
self.assertTrue(res.wasSuccessful())
def testNamedParametersId(self):
ts = sorted(unittest.makeSuite(self.NamedTests),
key=lambda t: t.id())
self.assertEqual(
'__main__.NamedTests.testSomethingBoring',
ts[0].id())
self.assertEqual(
'__main__.NamedTests.testSomethingInteresting',
ts[1].id())
def testNamedParametersIdWithUnderscoreCase(self):
ts = sorted(unittest.makeSuite(self.UnderscoreNamedTests),
key=lambda t: t.id())
self.assertEqual(
'__main__.UnderscoreNamedTests.test_something_boring',
ts[0].id())
self.assertEqual(
'__main__.UnderscoreNamedTests.test_something_interesting',
ts[1].id())
def testLoadNamedTest(self):
loader = unittest.TestLoader()
ts = list(loader.loadTestsFromName('NamedTests.testSomethingInteresting',
module=self))
self.assertEqual(1, len(ts))
self.assertTrue(ts[0].id().endswith('.testSomethingInteresting'))
def testDuplicateNamedTestFails(self):
with self.assertRaises(AssertionError):
class _(parameterized.ParameterizedTestCase):
@parameterized.NamedParameters(
('Interesting', 0),
('Interesting', 1),
)
def testSomething(self, unused_obj):
pass
def testParameterizedTestIterHasTestcasesProperty(self):
@parameterized.Parameters(1, 2, 3, 4, 5, 6)
def testSomething(unused_self, unused_obj): # pylint: disable=invalid-name
pass
expected_testcases = [1, 2, 3, 4, 5, 6]
self.assertTrue(hasattr(testSomething, 'testcases'))
assert_items_equal = (self.assertCountEqual if six.PY3
else self.assertItemsEqual)
assert_items_equal(expected_testcases, testSomething.testcases)
def testChainedDecorator(self):
ts = unittest.makeSuite(self.ChainedTests)
self.assertEqual(1, ts.countTestCases())
test = next(t for t in ts)
self.assertTrue(hasattr(test, 'testChained_flavor_strawberry_cone_waffle'))
res = unittest.TestResult()
ts.run(res)
self.assertEqual(1, res.testsRun)
self.assertTrue(res.wasSuccessful())
def testSingletonListExtraction(self):
ts = unittest.makeSuite(self.SingletonListExtraction)
res = unittest.TestResult()
ts.run(res)
self.assertEqual(10, res.testsRun)
self.assertTrue(res.wasSuccessful())
def testSingletonArgumentExtraction(self):
ts = unittest.makeSuite(self.SingletonArgumentExtraction)
res = unittest.TestResult()
ts.run(res)
self.assertEqual(9, res.testsRun)
self.assertTrue(res.wasSuccessful())
def testDecoratedBareClass(self):
ts = unittest.makeSuite(self.DecoratedBareClass)
res = unittest.TestResult()
ts.run(res)
self.assertEqual(2, res.testsRun)
self.assertTrue(res.wasSuccessful(), msg=str(res.failures))
def testDecoratedClass(self):
ts = unittest.makeSuite(self.DecoratedClass)
res = unittest.TestResult()
ts.run(res)
self.assertEqual(4, res.testsRun)
self.assertEqual(2, len(res.failures))
def testGeneratorDecoratedClass(self):
ts = unittest.makeSuite(self.GeneratorDecoratedClass)
res = unittest.TestResult()
ts.run(res)
self.assertEqual(32, res.testsRun)
self.assertEqual(16, len(res.failures))
def testNoDuplicateDecorations(self):
with self.assertRaises(AssertionError):
@parameterized.Parameters(1, 2, 3, 4)
class _(parameterized.ParameterizedTestCase):
@parameterized.Parameters(5, 6, 7, 8)
def testSomething(self, unused_obj):
pass
def testOtherDecoratorOrdering(self):
ts = unittest.makeSuite(self.OtherDecorator)
res = unittest.TestResult()
ts.run(res)
# Two for when the parameterized tests call the skip wrapper.
# One for when the skip wrapper is called first and doesn't iterate.
self.assertEqual(3, res.testsRun)
self.assertTrue(res.wasSuccessful(), msg=str(res.failures))
def _DecorateWithSideEffects(func, self):
self.sideeffect = True
func(self)
if __name__ == '__main__':
unittest.main()
| mit |
kodabb/pjproject | pjsip-apps/src/swig/python/test.py | 44 | 3447 | import pjsua2 as pj
import sys
import time
#
# Basic data structure test, to make sure basic struct
# and array operations work
#
def ua_data_test():
#
# AuthCredInfo
#
print "UA data types test.."
the_realm = "pjsip.org"
ci = pj.AuthCredInfo()
ci.realm = the_realm
ci.dataType = 20
ci2 = ci
assert ci.dataType == 20
assert ci2.realm == the_realm
#
# UaConfig
# See here how we manipulate std::vector
#
uc = pj.UaConfig()
uc.maxCalls = 10
uc.userAgent = "Python"
uc.nameserver = pj.StringVector(["10.0.0.1", "10.0.0.2"])
uc.nameserver.append("NS1")
uc2 = uc
assert uc2.maxCalls == 10
assert uc2.userAgent == "Python"
assert len(uc2.nameserver) == 3
assert uc2.nameserver[0] == "10.0.0.1"
assert uc2.nameserver[1] == "10.0.0.2"
assert uc2.nameserver[2] == "NS1"
print " Dumping nameservers: ",
for s in uc2.nameserver:
print s,
print ""
#
# Exception test
#
def ua_run_test_exception():
print "Exception test.."
ep = pj.Endpoint()
ep.libCreate()
got_exception = False
try:
ep.natDetectType()
except pj.Error, e:
got_exception = True
print " Got exception: status=%u, reason=%s,\n title=%s,\n srcFile=%s, srcLine=%d" % \
(e.status, e.reason, e.title, e.srcFile, e.srcLine)
assert e.status == 370050
assert e.reason.find("PJNATH_ESTUNINSERVER") >= 0
assert e.title == "pjsua_detect_nat_type()"
assert got_exception
#
# Custom log writer
#
class MyLogWriter(pj.LogWriter):
def write(self, entry):
print "This is Python:", entry.msg
#
# Testing log writer callback
#
def ua_run_log_test():
print "Logging test.."
ep_cfg = pj.EpConfig()
lw = MyLogWriter()
ep_cfg.logConfig.writer = lw
ep_cfg.logConfig.decor = ep_cfg.logConfig.decor & ~(pj.PJ_LOG_HAS_CR | pj.PJ_LOG_HAS_NEWLINE)
ep = pj.Endpoint()
ep.libCreate()
ep.libInit(ep_cfg)
ep.libDestroy()
#
# Simple create, init, start, and destroy sequence
#
def ua_run_ua_test():
print "UA test run.."
ep_cfg = pj.EpConfig()
ep = pj.Endpoint()
ep.libCreate()
ep.libInit(ep_cfg)
ep.libStart()
print "************* Endpoint started ok, now shutting down... *************"
ep.libDestroy()
#
# Tone generator
#
def ua_tonegen_test():
print "UA tonegen test.."
ep_cfg = pj.EpConfig()
ep = pj.Endpoint()
ep.libCreate()
ep.libInit(ep_cfg)
ep.libStart()
tonegen = pj.ToneGenerator()
tonegen.createToneGenerator()
tone = pj.ToneDesc()
tone.freq1 = 400
tone.freq2 = 600
tone.on_msec = 1000
tone.off_msec = 1000
tones = pj.ToneDescVector()
tones.append(tone)
digit = pj.ToneDigit()
digit.digit = '0'
digit.on_msec = 1000
digit.off_msec = 1000
digits = pj.ToneDigitVector()
digits.append(digit)
adm = ep.audDevManager()
spk = adm.getPlaybackDevMedia()
tonegen.play(tones, True)
tonegen.startTransmit(spk)
time.sleep(5)
tonegen.stop()
tonegen.playDigits(digits, True)
time.sleep(5)
dm = tonegen.getDigitMap()
print dm[0].digit
dm[0].freq1 = 400
dm[0].freq2 = 600
tonegen.setDigitMap(dm)
tonegen.stop()
tonegen.playDigits(digits, True)
time.sleep(5)
tonegen = None
ep.libDestroy()
#
# main()
#
if __name__ == "__main__":
ua_data_test()
ua_run_test_exception()
ua_run_log_test()
ua_run_ua_test()
ua_tonegen_test()
sys.exit(0)
| gpl-2.0 |
anhaidgroup/py_entitymatching | py_entitymatching/dask/dask_extract_features.py | 1 | 9597 | import logging
import os
import pandas as pd
import multiprocessing
import numpy as np
import dask
from dask.diagnostics import ProgressBar
from dask import delayed
from cloudpickle import cloudpickle
import tempfile
import py_entitymatching.catalog.catalog_manager as cm
import py_entitymatching.utils.catalog_helper as ch
import py_entitymatching.utils.generic_helper as gh
from py_entitymatching.utils.validation_helper import validate_object_type
from py_entitymatching.feature.extractfeatures import get_feature_vals_by_cand_split
from py_entitymatching.utils.validation_helper import validate_object_type
from py_entitymatching.dask.utils import validate_chunks, get_num_partitions, \
get_num_cores, wrap
logger = logging.getLogger(__name__)
def dask_extract_feature_vecs(candset, attrs_before=None, feature_table=None,
attrs_after=None, verbose=False,
show_progress=True, n_chunks=1):
"""
WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK
This function extracts feature vectors from a DataFrame (typically a
labeled candidate set).
Specifically, this function uses feature
table, ltable and rtable (that is present in the `candset`'s
metadata) to extract feature vectors.
Args:
candset (DataFrame): The input candidate set for which the features
vectors should be extracted.
attrs_before (list): The list of attributes from the input candset,
that should be added before the feature vectors (defaults to None).
feature_table (DataFrame): A DataFrame containing a list of
features that should be used to compute the feature vectors (
defaults to None).
attrs_after (list): The list of attributes from the input candset
that should be added after the feature vectors (defaults to None).
verbose (boolean): A flag to indicate whether the debug information
should be displayed (defaults to False).
show_progress (boolean): A flag to indicate whether the progress of
extracting feature vectors must be displayed (defaults to True).
n_chunks (int): The number of partitions to split the candidate set. If it
is set to -1, the number of partitions will be set to the
number of cores in the machine.
Returns:
A pandas DataFrame containing feature vectors.
The DataFrame will have metadata ltable and rtable, pointing
to the same ltable and rtable as the input candset.
Also, the output
DataFrame will have three columns: key, foreign key ltable, foreign
key rtable copied from input candset to the output DataFrame. These
three columns precede the columns mentioned in `attrs_before`.
Raises:
AssertionError: If `candset` is not of type pandas
DataFrame.
AssertionError: If `attrs_before` has attributes that
are not present in the input candset.
AssertionError: If `attrs_after` has attribtues that
are not present in the input candset.
AssertionError: If `feature_table` is set to None.
AssertionError: If `n_chunks` is not of type
int.
Examples:
>>> import py_entitymatching as em
>>> from py_entitymatching.dask.dask_extract_features import dask_extract_feature_vecs
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='ID')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='ID')
>>> match_f = em.get_features_for_matching(A, B)
>>> # G is the labeled dataframe which should be converted into feature vectors
>>> H = dask_extract_feature_vecs(G, features=match_f, attrs_before=['title'], attrs_after=['gold_labels'])
"""
logger.warning(
"WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK.")
# Validate input parameters
# # We expect the input candset to be of type pandas DataFrame.
validate_object_type(candset, pd.DataFrame, error_prefix='Input cand.set')
# # If the attrs_before is given, Check if the attrs_before are present in
# the input candset
if attrs_before != None:
if not ch.check_attrs_present(candset, attrs_before):
logger.error(
'The attributes mentioned in attrs_before is not present '
'in the input table')
raise AssertionError(
'The attributes mentioned in attrs_before is not present '
'in the input table')
# # If the attrs_after is given, Check if the attrs_after are present in
# the input candset
if attrs_after != None:
if not ch.check_attrs_present(candset, attrs_after):
logger.error(
'The attributes mentioned in attrs_after is not present '
'in the input table')
raise AssertionError(
'The attributes mentioned in attrs_after is not present '
'in the input table')
# We expect the feature table to be a valid object
if feature_table is None:
logger.error('Feature table cannot be null')
raise AssertionError('The feature table cannot be null')
# Do metadata checking
# # Mention what metadata is required to the user
ch.log_info(logger, 'Required metadata: cand.set key, fk ltable, '
'fk rtable, '
'ltable, rtable, ltable key, rtable key', verbose)
# # Get metadata
ch.log_info(logger, 'Getting metadata from catalog', verbose)
key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key = \
cm.get_metadata_for_candset(
candset, logger, verbose)
# # Validate metadata
ch.log_info(logger, 'Validating metadata', verbose)
cm._validate_metadata_for_candset(candset, key, fk_ltable, fk_rtable,
ltable, rtable, l_key, r_key,
logger, verbose)
# Extract features
# id_list = [(row[fk_ltable], row[fk_rtable]) for i, row in
# candset.iterrows()]
# id_list = [tuple(tup) for tup in candset[[fk_ltable, fk_rtable]].values]
# # Set index for convenience
l_df = ltable.set_index(l_key, drop=False)
r_df = rtable.set_index(r_key, drop=False)
# # Apply feature functions
ch.log_info(logger, 'Applying feature functions', verbose)
col_names = list(candset.columns)
fk_ltable_idx = col_names.index(fk_ltable)
fk_rtable_idx = col_names.index(fk_rtable)
validate_object_type(n_chunks, int, 'Parameter n_chunks')
validate_chunks(n_chunks)
n_chunks = get_num_partitions(n_chunks, len(candset))
c_splits = np.array_split(candset, n_chunks)
pickled_obj = cloudpickle.dumps(feature_table)
feat_vals_by_splits = []
for i in range(len(c_splits)):
partial_result = delayed(get_feature_vals_by_cand_split)(pickled_obj,
fk_ltable_idx,
fk_rtable_idx, l_df,
r_df, c_splits[i],
False)
feat_vals_by_splits.append(partial_result)
feat_vals_by_splits = delayed(wrap)(feat_vals_by_splits)
if show_progress:
with ProgressBar():
feat_vals_by_splits = feat_vals_by_splits.compute(scheduler="processes",
num_workers=get_num_cores())
else:
feat_vals_by_splits = feat_vals_by_splits.compute(scheduler="processes",
num_workers=get_num_cores())
feat_vals = sum(feat_vals_by_splits, [])
# Construct output table
feature_vectors = pd.DataFrame(feat_vals, index=candset.index.values)
# # Rearrange the feature names in the input feature table order
feature_names = list(feature_table['feature_name'])
feature_vectors = feature_vectors[feature_names]
ch.log_info(logger, 'Constructing output table', verbose)
# print(feature_vectors)
# # Insert attrs_before
if attrs_before:
if not isinstance(attrs_before, list):
attrs_before = [attrs_before]
attrs_before = gh.list_diff(attrs_before, [key, fk_ltable, fk_rtable])
attrs_before.reverse()
for a in attrs_before:
feature_vectors.insert(0, a, candset[a])
# # Insert keys
feature_vectors.insert(0, fk_rtable, candset[fk_rtable])
feature_vectors.insert(0, fk_ltable, candset[fk_ltable])
feature_vectors.insert(0, key, candset[key])
# # insert attrs after
if attrs_after:
if not isinstance(attrs_after, list):
attrs_after = [attrs_after]
attrs_after = gh.list_diff(attrs_after, [key, fk_ltable, fk_rtable])
attrs_after.reverse()
col_pos = len(feature_vectors.columns)
for a in attrs_after:
feature_vectors.insert(col_pos, a, candset[a])
col_pos += 1
# Reset the index
# feature_vectors.reset_index(inplace=True, drop=True)
# # Update the catalog
cm.init_properties(feature_vectors)
cm.copy_properties(candset, feature_vectors)
# Finally, return the feature vectors
return feature_vectors
| bsd-3-clause |
HLFH/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/mechanize/_mechanize.py | 133 | 24916 | """Stateful programmatic WWW navigation, after Perl's WWW::Mechanize.
Copyright 2003-2006 John J. Lee <jjl@pobox.com>
Copyright 2003 Andy Lester (original Perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import copy, re, os, urllib, urllib2
from _html import DefaultFactory
import _response
import _request
import _rfc3986
import _sockettimeout
import _urllib2_fork
from _useragent import UserAgentBase
class BrowserStateError(Exception): pass
class LinkNotFoundError(Exception): pass
class FormNotFoundError(Exception): pass
def sanepathname2url(path):
urlpath = urllib.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class History:
"""
Though this will become public, the implied interface is not yet stable.
"""
def __init__(self):
self._history = [] # LIFO
def add(self, request, response):
self._history.append((request, response))
def back(self, n, _response):
response = _response # XXX move Browser._response into this class?
while n > 0 or response is None:
try:
request, response = self._history.pop()
except IndexError:
raise BrowserStateError("already at start of history")
n -= 1
return request, response
def clear(self):
del self._history[:]
def close(self):
for request, response in self._history:
if response is not None:
response.close()
del self._history[:]
class HTTPRefererProcessor(_urllib2_fork.BaseHandler):
def http_request(self, request):
# See RFC 2616 14.36. The only times we know the source of the
# request URI has a URI associated with it are redirect, and
# Browser.click() / Browser.submit() / Browser.follow_link().
# Otherwise, it's the user's job to add any Referer header before
# .open()ing.
if hasattr(request, "redirect_dict"):
request = self.parent._add_referer_header(
request, origin_request=False)
return request
https_request = http_request
class Browser(UserAgentBase):
"""Browser-like class with support for history, forms and links.
BrowserStateError is raised whenever the browser is in the wrong state to
complete the requested operation - e.g., when .back() is called when the
browser history is empty, or when .follow_link() is called when the current
response does not contain HTML data.
Public attributes:
request: current request (mechanize.Request)
form: currently selected form (see .select_form())
"""
handler_classes = copy.copy(UserAgentBase.handler_classes)
handler_classes["_referer"] = HTTPRefererProcessor
default_features = copy.copy(UserAgentBase.default_features)
default_features.append("_referer")
def __init__(self,
factory=None,
history=None,
request_class=None,
):
"""
Only named arguments should be passed to this constructor.
factory: object implementing the mechanize.Factory interface.
history: object implementing the mechanize.History interface. Note
this interface is still experimental and may change in future.
request_class: Request class to use. Defaults to mechanize.Request
The Factory and History objects passed in are 'owned' by the Browser,
so they should not be shared across Browsers. In particular,
factory.set_response() should not be called except by the owning
Browser itself.
Note that the supplied factory's request_class is overridden by this
constructor, to ensure only one Request class is used.
"""
self._handle_referer = True
if history is None:
history = History()
self._history = history
if request_class is None:
request_class = _request.Request
if factory is None:
factory = DefaultFactory()
factory.set_request_class(request_class)
self._factory = factory
self.request_class = request_class
self.request = None
self._set_response(None, False)
# do this last to avoid __getattr__ problems
UserAgentBase.__init__(self)
def close(self):
UserAgentBase.close(self)
if self._response is not None:
self._response.close()
if self._history is not None:
self._history.close()
self._history = None
# make use after .close easy to spot
self.form = None
self.request = self._response = None
self.request = self.response = self.set_response = None
self.geturl = self.reload = self.back = None
self.clear_history = self.set_cookie = self.links = self.forms = None
self.viewing_html = self.encoding = self.title = None
self.select_form = self.click = self.submit = self.click_link = None
self.follow_link = self.find_link = None
def set_handle_referer(self, handle):
"""Set whether to add Referer header to each request."""
self._set_handler("_referer", handle)
self._handle_referer = bool(handle)
def _add_referer_header(self, request, origin_request=True):
if self.request is None:
return request
scheme = request.get_type()
original_scheme = self.request.get_type()
if scheme not in ["http", "https"]:
return request
if not origin_request and not self.request.has_header("Referer"):
return request
if (self._handle_referer and
original_scheme in ["http", "https"] and
not (original_scheme == "https" and scheme != "https")):
# strip URL fragment (RFC 2616 14.36)
parts = _rfc3986.urlsplit(self.request.get_full_url())
parts = parts[:-1]+(None,)
referer = _rfc3986.urlunsplit(parts)
request.add_unredirected_header("Referer", referer)
return request
def open_novisit(self, url, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
"""Open a URL without visiting it.
Browser state (including request, response, history, forms and links)
is left unchanged by calling this function.
The interface is the same as for .open().
This is useful for things like fetching images.
See also .retrieve().
"""
return self._mech_open(url, data, visit=False, timeout=timeout)
def open(self, url, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
return self._mech_open(url, data, timeout=timeout)
def _mech_open(self, url, data=None, update_history=True, visit=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
try:
url.get_full_url
except AttributeError:
# string URL -- convert to absolute URL if required
scheme, authority = _rfc3986.urlsplit(url)[:2]
if scheme is None:
# relative URL
if self._response is None:
raise BrowserStateError(
"can't fetch relative reference: "
"not viewing any document")
url = _rfc3986.urljoin(self._response.geturl(), url)
request = self._request(url, data, visit, timeout)
visit = request.visit
if visit is None:
visit = True
if visit:
self._visit_request(request, update_history)
success = True
try:
response = UserAgentBase.open(self, request, data)
except urllib2.HTTPError, error:
success = False
if error.fp is None: # not a response
raise
response = error
## except (IOError, socket.error, OSError), error:
## # Yes, urllib2 really does raise all these :-((
## # See test_urllib2.py for examples of socket.gaierror and OSError,
## # plus note that FTPHandler raises IOError.
## # XXX I don't seem to have an example of exactly socket.error being
## # raised, only socket.gaierror...
## # I don't want to start fixing these here, though, since this is a
## # subclass of OpenerDirector, and it would break old code. Even in
## # Python core, a fix would need some backwards-compat. hack to be
## # acceptable.
## raise
if visit:
self._set_response(response, False)
response = copy.copy(self._response)
elif response is not None:
response = _response.upgrade_response(response)
if not success:
raise response
return response
def __str__(self):
text = []
text.append("<%s " % self.__class__.__name__)
if self._response:
text.append("visiting %s" % self._response.geturl())
else:
text.append("(not visiting a URL)")
if self.form:
text.append("\n selected form:\n %s\n" % str(self.form))
text.append(">")
return "".join(text)
def response(self):
"""Return a copy of the current response.
The returned object has the same interface as the object returned by
.open() (or mechanize.urlopen()).
"""
return copy.copy(self._response)
def open_local_file(self, filename):
path = sanepathname2url(os.path.abspath(filename))
url = 'file://'+path
return self.open(url)
def set_response(self, response):
"""Replace current response with (a copy of) response.
response may be None.
This is intended mostly for HTML-preprocessing.
"""
self._set_response(response, True)
def _set_response(self, response, close_current):
# sanity check, necessary but far from sufficient
if not (response is None or
(hasattr(response, "info") and hasattr(response, "geturl") and
hasattr(response, "read")
)
):
raise ValueError("not a response object")
self.form = None
if response is not None:
response = _response.upgrade_response(response)
if close_current and self._response is not None:
self._response.close()
self._response = response
self._factory.set_response(response)
def visit_response(self, response, request=None):
"""Visit the response, as if it had been .open()ed.
Unlike .set_response(), this updates history rather than replacing the
current response.
"""
if request is None:
request = _request.Request(response.geturl())
self._visit_request(request, True)
self._set_response(response, False)
def _visit_request(self, request, update_history):
if self._response is not None:
self._response.close()
if self.request is not None and update_history:
self._history.add(self.request, self._response)
self._response = None
# we want self.request to be assigned even if UserAgentBase.open
# fails
self.request = request
def geturl(self):
"""Get URL of current document."""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._response.geturl()
def reload(self):
"""Reload current document, and return response object."""
if self.request is None:
raise BrowserStateError("no URL has yet been .open()ed")
if self._response is not None:
self._response.close()
return self._mech_open(self.request, update_history=False)
def back(self, n=1):
"""Go back n steps in history, and return response object.
n: go back this number of steps (default 1 step)
"""
if self._response is not None:
self._response.close()
self.request, response = self._history.back(n, self._response)
self.set_response(response)
if not response.read_complete:
return self.reload()
return copy.copy(response)
def clear_history(self):
self._history.clear()
def set_cookie(self, cookie_string):
"""Request to set a cookie.
Note that it is NOT necessary to call this method under ordinary
circumstances: cookie handling is normally entirely automatic. The
intended use case is rather to simulate the setting of a cookie by
client script in a web page (e.g. JavaScript). In that case, use of
this method is necessary because mechanize currently does not support
JavaScript, VBScript, etc.
The cookie is added in the same way as if it had arrived with the
current response, as a result of the current request. This means that,
for example, if it is not appropriate to set the cookie based on the
current request, no cookie will be set.
The cookie will be returned automatically with subsequent responses
made by the Browser instance whenever that's appropriate.
cookie_string should be a valid value of the Set-Cookie header.
For example:
browser.set_cookie(
"sid=abcdef; expires=Wednesday, 09-Nov-06 23:12:40 GMT")
Currently, this method does not allow for adding RFC 2986 cookies.
This limitation will be lifted if anybody requests it.
"""
if self._response is None:
raise BrowserStateError("not viewing any document")
if self.request.get_type() not in ["http", "https"]:
raise BrowserStateError("can't set cookie for non-HTTP/HTTPS "
"transactions")
cookiejar = self._ua_handlers["_cookies"].cookiejar
response = self.response() # copy
headers = response.info()
headers["Set-cookie"] = cookie_string
cookiejar.extract_cookies(response, self.request)
def links(self, **kwds):
"""Return iterable over links (mechanize.Link objects)."""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
links = self._factory.links()
if kwds:
return self._filter_links(links, **kwds)
else:
return links
def forms(self):
"""Return iterable over forms.
The returned form objects implement the mechanize.HTMLForm interface.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.forms()
def global_form(self):
"""Return the global form object, or None if the factory implementation
did not supply one.
The "global" form object contains all controls that are not descendants
of any FORM element.
The returned form object implements the mechanize.HTMLForm interface.
This is a separate method since the global form is not regarded as part
of the sequence of forms in the document -- mostly for
backwards-compatibility.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.global_form
def viewing_html(self):
"""Return whether the current response contains HTML data."""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._factory.is_html
def encoding(self):
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._factory.encoding
def title(self):
r"""Return title, or None if there is no title element in the document.
Treatment of any tag children of attempts to follow Firefox and IE
(currently, tags are preserved).
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.title
def select_form(self, name=None, predicate=None, nr=None):
"""Select an HTML form for input.
This is a bit like giving a form the "input focus" in a browser.
If a form is selected, the Browser object supports the HTMLForm
interface, so you can call methods like .set_value(), .set(), and
.click().
Another way to select a form is to assign to the .form attribute. The
form assigned should be one of the objects returned by the .forms()
method.
At least one of the name, predicate and nr arguments must be supplied.
If no matching form is found, mechanize.FormNotFoundError is raised.
If name is specified, then the form must have the indicated name.
If predicate is specified, then the form must match that function. The
predicate function is passed the HTMLForm as its single argument, and
should return a boolean value indicating whether the form matched.
nr, if supplied, is the sequence number of the form (where 0 is the
first). Note that control 0 is the first form matching all the other
arguments (if supplied); it is not necessarily the first control in the
form. The "global form" (consisting of all form controls not contained
in any FORM element) is considered not to be part of this sequence and
to have no name, so will not be matched unless both name and nr are
None.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
if (name is None) and (predicate is None) and (nr is None):
raise ValueError(
"at least one argument must be supplied to specify form")
global_form = self._factory.global_form
if nr is None and name is None and \
predicate is not None and predicate(global_form):
self.form = global_form
return
orig_nr = nr
for form in self.forms():
if name is not None and name != form.name:
continue
if predicate is not None and not predicate(form):
continue
if nr:
nr -= 1
continue
self.form = form
break # success
else:
# failure
description = []
if name is not None: description.append("name '%s'" % name)
if predicate is not None:
description.append("predicate %s" % predicate)
if orig_nr is not None: description.append("nr %d" % orig_nr)
description = ", ".join(description)
raise FormNotFoundError("no form matching "+description)
def click(self, *args, **kwds):
"""See mechanize.HTMLForm.click for documentation."""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
request = self.form.click(*args, **kwds)
return self._add_referer_header(request)
def submit(self, *args, **kwds):
"""Submit current form.
Arguments are as for mechanize.HTMLForm.click().
Return value is same as for Browser.open().
"""
return self.open(self.click(*args, **kwds))
def click_link(self, link=None, **kwds):
"""Find a link and return a Request object for it.
Arguments are as for .find_link(), except that a link may be supplied
as the first argument.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
if not link:
link = self.find_link(**kwds)
else:
if kwds:
raise ValueError(
"either pass a Link, or keyword arguments, not both")
request = self.request_class(link.absolute_url)
return self._add_referer_header(request)
def follow_link(self, link=None, **kwds):
"""Find a link and .open() it.
Arguments are as for .click_link().
Return value is same as for Browser.open().
"""
return self.open(self.click_link(link, **kwds))
def find_link(self, **kwds):
"""Find a link in current page.
Links are returned as mechanize.Link objects.
# Return third link that .search()-matches the regexp "python"
# (by ".search()-matches", I mean that the regular expression method
# .search() is used, rather than .match()).
find_link(text_regex=re.compile("python"), nr=2)
# Return first http link in the current page that points to somewhere
# on python.org whose link text (after tags have been removed) is
# exactly "monty python".
find_link(text="monty python",
url_regex=re.compile("http.*python.org"))
# Return first link with exactly three HTML attributes.
find_link(predicate=lambda link: len(link.attrs) == 3)
Links include anchors (<a>), image maps (<area>), and frames (<frame>,
<iframe>).
All arguments must be passed by keyword, not position. Zero or more
arguments may be supplied. In order to find a link, all arguments
supplied must match.
If a matching link is not found, mechanize.LinkNotFoundError is raised.
text: link text between link tags: e.g. <a href="blah">this bit</a> (as
returned by pullparser.get_compressed_text(), ie. without tags but
with opening tags "textified" as per the pullparser docs) must compare
equal to this argument, if supplied
text_regex: link text between tag (as defined above) must match the
regular expression object or regular expression string passed as this
argument, if supplied
name, name_regex: as for text and text_regex, but matched against the
name HTML attribute of the link tag
url, url_regex: as for text and text_regex, but matched against the
URL of the link tag (note this matches against Link.url, which is a
relative or absolute URL according to how it was written in the HTML)
tag: element name of opening tag, e.g. "a"
predicate: a function taking a Link object as its single argument,
returning a boolean result, indicating whether the links
nr: matches the nth link that matches all other criteria (default 0)
"""
try:
return self._filter_links(self._factory.links(), **kwds).next()
except StopIteration:
raise LinkNotFoundError()
def __getattr__(self, name):
# pass through _form.HTMLForm methods and attributes
form = self.__dict__.get("form")
if form is None:
raise AttributeError(
"%s instance has no attribute %s (perhaps you forgot to "
".select_form()?)" % (self.__class__, name))
return getattr(form, name)
def _filter_links(self, links,
text=None, text_regex=None,
name=None, name_regex=None,
url=None, url_regex=None,
tag=None,
predicate=None,
nr=0
):
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
orig_nr = nr
for link in links:
if url is not None and url != link.url:
continue
if url_regex is not None and not re.search(url_regex, link.url):
continue
if (text is not None and
(link.text is None or text != link.text)):
continue
if (text_regex is not None and
(link.text is None or not re.search(text_regex, link.text))):
continue
if name is not None and name != dict(link.attrs).get("name"):
continue
if name_regex is not None:
link_name = dict(link.attrs).get("name")
if link_name is None or not re.search(name_regex, link_name):
continue
if tag is not None and tag != link.tag:
continue
if predicate is not None and not predicate(link):
continue
if nr:
nr -= 1
continue
yield link
nr = orig_nr
| gpl-3.0 |
BoltzmannBrain/nupic.research | projects/union_pooling/experiments/variation_robustness/variation_robustness_experiment.py | 3 | 27896 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import csv
import numpy
from optparse import OptionParser
import os
import pprint
import random
import sys
import time
import yaml
from nupic.data.generators.pattern_machine import PatternMachine
from nupic.data.generators.sequence_machine import SequenceMachine
from nupic.research.monitor_mixin.monitor_mixin_base import MonitorMixinBase
from union_temporal_pooling.experiments.union_temporal_pooler_experiment import (
UnionTemporalPoolerExperiment)
"""
Variation Robustness Experiment
Data: Sequences generated from an alphabet. Patterns do not overlap between
sequences.
Train Phase: Train network on sequences for some number of repetitions, then
train KNN Classifier on sequences learning the same category label for each
element in the sequence.
Test phase: Input sequence pattern by pattern. Sequence-to-sequence
progression is randomly selected. At each step there is a chance that the
next pattern in the sequence is not shown. Specifically the following
perturbations may occur:
1) random Jump to another sequence
2) substitution of some other pattern for the normal expected pattern
3) skipping expected pattern and presenting next pattern in sequence
4) addition of some other pattern putting off expected pattern one time step
Goal: Characterize the variation robustness of the UnionTemporalPooler to various
perturbations. Explore trade-off between remaining stable to variations yet
still changing when sequence actually changes.
"""
_SHOW_PROGRESS_INTERVAL = 3000
class PerturbationType(object):
"""An enum class defining the different types of perturbations used by this
experiment.
"""
# No perturbation
none = 0
# Perturbation switching expected pattern for another random pattern
substitution = 1
# Perturbation skipping over expected pattern and continuing with next
# expected pattern
skip = 2
# Perturbation adding in a random pattern delaying expected pattern
add = 3
# Perturbation switching to entirely different sequence from its beginning
sequenceJump = 4
def runTestPhaseRandom(experiment, inputSequences, sequenceCount,
sequenceLength, testPresentations,
overallPerturbationChance, perturbationTypeChance,
consoleVerbosity):
"""
Performs a number of presentations of sequences with resets afterwards.
Sequence selection is random.
At each step of sequence presentation there is a chance of a perturbation.
Specifically the following perturbations may occur:
1) Substitution of expected pattern with some other random pattern among
the set of sequences
2) Insertion of some other random pattern (among the set of sequences)
that delays the expected pattern one time step
3) Skipping of expected pattern and advancing to next pattern in the
sequence
4) Jump from current sequence to another randomly selected sequence
@param experiment A UnionTemporalPoolerExperiment
@param inputSequences List of sequences each terminated by None.
@param sequenceCount The number of sequences in inputSequences
@param sequenceLength Length of each sequence not counting Nones.
@param testPresentations Number of sequences randomly selected and
presented during the test phase. Sequence
jumps do not count towards the number of
presentations, rather an entire sequence
must be presented (without sequence jumps)
before advancing to the next test
presentation.
@param overallPerturbationChance Rate of perturbations during the test phase
@param perturbationTypeChance A list of relative chances for each
perturbation type:
0 - substitution chance
1 - addition chance
2 - skip chance
3 - sequence jump chance
Note the chances do not need to sum to 1.0,
and the relative weight of each of each
type chance is what determines likelihood.
@param consoleVerbosity Console output verbosity
@return
actualCategories - A list of the actual categories of the patterns
presented during the test phase
classifiedCategores - A list of the classifications of the categories
of each pattern presented during the test phase
perturbationTrace - A list of the perturbations that occurred during
the test phase
"""
actualCategories = []
classifiedCategories = []
perturbationTrace = []
substitutionChance = perturbationTypeChance[0]
additionChance = perturbationTypeChance[1]
skipChance = perturbationTypeChance[2]
sequenceJumpChance = perturbationTypeChance[3]
totalTypeChance = (substitutionChance + additionChance + skipChance +
sequenceJumpChance)
# Compute the bounds for a wheel-of-fortune style roll
patternSubChanceThreshold = float(substitutionChance) / totalTypeChance
patternAddChanceThreshold = (float(substitutionChance + additionChance) /
totalTypeChance)
patternSkipChanceThreshold = float(substitutionChance + additionChance +
skipChance) / totalTypeChance
if consoleVerbosity > 0:
presentationString = "Presentation 0: "
presentation = 0
isStartOfSequenceJump = False
while presentation < testPresentations:
# Randomly select the next sequence to present
sequence = random.randint(0, sequenceCount - 1)
if consoleVerbosity > 0:
presentationString += "Seq-{0} ".format(sequence)
# Present selected sequence to network
sequenceStart = sequence + sequence * sequenceLength
i = sequenceStart
sequenceEnd = sequence + 1 + (sequence + 1) * sequenceLength
while i < sequenceEnd:
# Roll to determine if there will be a perturbation of next pattern
if (inputSequences[i] is not None and
random.random() < overallPerturbationChance):
# Randomly select a perturbation type
perturbationType = random.random()
if perturbationType < patternSubChanceThreshold:
# Substitute in a random pattern and move on to next pattern
# in sequence
currentPattern = getRandomPattern(inputSequences, sequenceStart,
sequenceEnd)
currentPerturbation = PerturbationType.substitution
i += 1
elif perturbationType < patternAddChanceThreshold:
# Add in an extra random pattern
currentPattern = getRandomPattern(inputSequences, sequenceStart,
sequenceEnd)
currentPerturbation = PerturbationType.add
elif perturbationType < patternSkipChanceThreshold:
# Skip to next pattern in sequence
i += 1
if i == sequenceEnd:
experiment.runNetworkOnPattern(None,
tmLearn=False,
upLearn=False)
break;
currentPattern = inputSequences[i]
currentPerturbation = PerturbationType.skip
i += 1
else:
# Random jump to another sequence
isStartOfSequenceJump = True
break
else:
# Normal advancement of sequence
currentPattern = inputSequences[i]
if isStartOfSequenceJump:
currentPerturbation = PerturbationType.sequenceJump
isStartOfSequenceJump = False
else:
currentPerturbation = PerturbationType.none
i += 1
experiment.runNetworkOnPattern(currentPattern,
tmLearn=False,
upLearn=False)
if currentPattern is not None:
# Store classification
unionSDR = experiment.up.getUnionSDR()
denseUnionSDR = numpy.zeros(experiment.up.getNumColumns())
denseUnionSDR[unionSDR] = 1.0
classification, _, _, _ = experiment.classifier.infer(denseUnionSDR)
# Assumes sequence number and sequence category is equivalent
actualCategories.append(sequence)
classifiedCategories.append(classification)
perturbationTrace.append(currentPerturbation)
# While presenting sequence
else:
# Move to next presentation only if a sequence has been completed
# without any sequence jumps
presentation += 1
isStartOfSequenceJump = False
# Presentation finished; prepare for next one
if consoleVerbosity > 0:
print presentationString
presentationString = "Presentation {0}: ".format(presentation)
# Finished sequence presentation
# While running test presentations
if consoleVerbosity > 0:
patternSubCount = perturbationTrace.count(PerturbationType.substitution)
patternSkipCount = perturbationTrace.count(PerturbationType.skip)
patternAddCount = perturbationTrace.count(PerturbationType.add)
sequenceJumpCount = perturbationTrace.count(PerturbationType.sequenceJump)
print ("\nPerturbation Counts: "
"\nPatternSub: {0} "
"\nPatternSkip: {1} "
"\nPatternAdd: {2} "
"\nSequenceJump {3}").format(patternSubCount, patternSkipCount,
patternAddCount, sequenceJumpCount)
return actualCategories, classifiedCategories, perturbationTrace
def getRandomPattern(patterns, ignoreStart, ignoreEnd):
r = random.randint(0, len(patterns)-1)
while (ignoreStart <= r <= ignoreEnd) or (patterns[r] is None):
r = random.randint(0, len(patterns)-1)
return patterns[r]
def getPerturbedSequences(inputSequences, sequenceCount, sequenceLength,
exactSubstitutions):
perturbationTrace = [0] * (sequenceCount * sequenceLength)
perturbedSequences = list(inputSequences)
for i in xrange(sequenceCount):
start = i + i * sequenceLength
end = i + 1 + (i + 1) * sequenceLength
# end - 1 because we don't want the None
sequenceIndices = range(start, end - 1)
subsample = random.sample(sequenceIndices, exactSubstitutions)
for j in subsample:
perturbedSequences[j] = getRandomPattern(inputSequences, start, end - 2)
# Must subtract number of Nones
perturbationTrace[j - i] = PerturbationType.substitution
return perturbedSequences, perturbationTrace
def runTestPhaseFixed(experiment, inputSequences, sequenceCount, sequenceLength,
exactSubstitutions, consoleVerbosity):
"""
Runs a test phase where a fixed number of substitutions perturbations are
performed, i.e. chance does not affect the number of substitutions that occur.
Random chance does still affect where these perturbation occur in each
sequence.
@param experiment A UnionTemporalPoolerExperiment
@param inputSequences List of sequences each terminated by None.
@param sequenceCount The number of sequences in inputSequences
@param sequenceLength Length of each sequence not counting Nones.
@param exactSubstitutions The number of substitution perturbations
guaranteed to be made in each sequence.
@param consoleVerbosity Console output verbosity
@return
actualCategories - A list of the actual categories of the patterns
presented during the test phase
classifiedCategores - A list of the classifications of the categories
of each pattern presented during the test phase
perturbationTrace - A list of the perturbations that occurred during
the test phase
"""
actualCategories = []
classifiedCategories = []
perturbedSequences, perturbationTrace = getPerturbedSequences(inputSequences,
sequenceCount,
sequenceLength,
exactSubstitutions)
for i in xrange(len(perturbedSequences)):
experiment.runNetworkOnPattern(perturbedSequences[i],
tmLearn=False,
upLearn=False)
if perturbedSequences[i] is not None:
# Store classification
unionSDR = experiment.up.getUnionSDR()
denseUnionSDR = numpy.zeros(experiment.up.getNumColumns())
denseUnionSDR[unionSDR] = 1.0
classification, _, _, _ = experiment.classifier.infer(denseUnionSDR)
# Assumes sequence number and sequence category is equivalent
actualCategories.append(i / (sequenceLength + 1))
classifiedCategories.append(classification)
return actualCategories, classifiedCategories, perturbationTrace
def trainTemporalMemory(experiment, inputSequences, inputCategories,
trainingPasses, consoleVerbosity):
burstingColsString = ""
for i in xrange(trainingPasses):
experiment.runNetworkOnSequences(inputSequences,
inputCategories,
tmLearn=True,
upLearn=None,
classifierLearn=False,
verbosity=consoleVerbosity,
progressInterval=_SHOW_PROGRESS_INTERVAL)
if consoleVerbosity > 1:
print
print MonitorMixinBase.mmPrettyPrintMetrics(
experiment.tm.mmGetDefaultMetrics())
print
stats = experiment.getBurstingColumnsStats()
burstingColsString += "{0}\t{1}\t{2}\t{3}\n".format(i, stats[0], stats[1],
stats[2])
experiment.tm.mmClearHistory()
experiment.up.mmClearHistory()
if consoleVerbosity > 0:
print "\nTemporal Memory Bursting Columns stats..."
print "Pass\tMean\t\tStdDev\t\tMax"
print burstingColsString
def trainClassifier(experiment, inputSequences, inputCategories,
numberOfSequences, trainingPasses, consoleVerbosity):
classifResString = ""
for i in xrange(trainingPasses):
experiment.runNetworkOnSequences(inputSequences,
inputCategories,
tmLearn=False,
upLearn=False,
classifierLearn=True,
verbosity=consoleVerbosity,
progressInterval=_SHOW_PROGRESS_INTERVAL)
classifResString += "{0}\t\t{1}\t\t{2}\n".format(i,
experiment.classifier._numPatterns,
numberOfSequences)
experiment.tm.mmClearHistory()
experiment.up.mmClearHistory()
if consoleVerbosity > 1:
print "Pass\tClassifier Patterns\tUnique Sequences"
print classifResString
def generateSequences(patternCardinality, patternDimensionality,
numberOfSequences, sequenceLength, consoleVerbosity):
patternAlphabetSize = sequenceLength * numberOfSequences
patternMachine = PatternMachine(patternDimensionality, patternCardinality,
patternAlphabetSize)
sequenceMachine = SequenceMachine(patternMachine)
numbers = sequenceMachine.generateNumbers(numberOfSequences, sequenceLength)
inputSequences = sequenceMachine.generateFromNumbers(numbers)
inputCategories = []
for i in xrange(numberOfSequences):
for _ in xrange(sequenceLength):
inputCategories.append(i)
inputCategories.append(None)
if consoleVerbosity > 1:
for i in xrange(len(inputSequences)):
if inputSequences[i] is None:
print
else:
print "{0} {1}".format(inputSequences[i], inputCategories[i])
return inputSequences, inputCategories
def run(params, paramDir, outputDir, consoleVerbosity=0, plotVerbosity=0):
"""
Runs the variation robustness experiment.
:param params: A dict containing the following experiment parameters:
patternDimensionality - Dimensionality of sequence patterns
patternCardinality - Cardinality (# ON bits) of sequence
patterns
sequenceLength - Length of sequences shown to network
numberOfSequences - Number of unique sequences used
trainingPasses - Number of times Temporal Memory is trained
on each sequence
testPresentations - Number of sequences presented in test
phase
perturbationChance - Chance of sequence perturbations during
test phase
sequenceJumpPerturbationChance - Chance of a jump-sequence perturbation
type
temporalMemoryParams - A dict of Temporal Memory parameter
overrides
unionPoolerParams - A dict of Union Pooler parameter overrides
classifierParams - A dict of KNNClassifer parameter overrides
:param paramDir: Path of parameter file
:param outputDir: Output will be written to this path
:param consoleVerbosity: Console output verbosity
"""
startTime = time.time()
print "Running Variation robustness experiment...\n"
print "Params dir: {0}".format(os.path.join(os.path.dirname(__file__),
paramDir))
print "Output dir: {0}\n".format(os.path.join(os.path.dirname(__file__),
outputDir))
patternDimensionality = params["patternDimensionality"]
patternCardinality = params["patternCardinality"]
sequenceLength = params["sequenceLength"]
numberOfSequences = params["numberOfSequences"]
trainingPasses = params["trainingPasses"]
testPresentations = params["testPresentations"]
exactSubstitutions = (params["exactSubstitutions"] if "exactSubstitutions" in
params else None)
perturbationChance = params["perturbationChance"]
sequenceJumpChance = params["sequenceJumpPerturbationChance"]
# These if-else/s are for backwards compatibility with older param files that
# didn't specify these chances
if "substitutionPerturbationChance" in params:
substitutionChance = params["substitutionPerturbationChance"]
else:
substitutionChance = (1 - sequenceJumpChance) / 3.0
if "addPerturbationChance" in params:
addChance = params["addPerturbationChance"]
else:
addChance = (1 - sequenceJumpChance) / 3.0
if "skipChance" in params:
skipChance = params["skipChance"]
else:
skipChance = (1 - sequenceJumpChance) / 3.0
perturbationTypeChance = [substitutionChance, addChance, skipChance,
sequenceJumpChance]
tmParamOverrides = params["temporalMemoryParams"]
upParamOverrides = params["unionPoolerParams"]
classifierOverrides = params["classifierParams"]
# Generate a sequence list and an associated labeled list (both containing a
# set of sequences separated by None)
print "Generating sequences..."
inputSequences, inputCategories = generateSequences(patternCardinality,
patternDimensionality,
numberOfSequences,
sequenceLength,
consoleVerbosity)
# Set up the Temporal Memory and Union Pooler network
print "\nCreating network..."
experiment = UnionTemporalPoolerExperiment(tmOverrides=tmParamOverrides,
upOverrides=upParamOverrides,
classifierOverrides=classifierOverrides,
consoleVerbosity=0)
# Training only the Temporal Memory on the generated sequences
print "\nTraining Temporal Memory..."
trainTemporalMemory(experiment, inputSequences, inputCategories,
trainingPasses, consoleVerbosity)
# With learning off, but TM and UP running, train the classifier.
print "\nTraining Classifier..."
trainClassifier(experiment, inputSequences, inputCategories,
numberOfSequences, trainingPasses, consoleVerbosity)
print "\nRunning Test Phase..."
if exactSubstitutions is None:
(actualCategories,
classifiedCategories,
perturbationTrace) = runTestPhaseRandom(experiment,
inputSequences,
numberOfSequences,
sequenceLength,
testPresentations,
perturbationChance,
perturbationTypeChance,
consoleVerbosity)
else:
(actualCategories,
classifiedCategories,
perturbationTrace) = runTestPhaseFixed(experiment,
inputSequences,
numberOfSequences,
sequenceLength,
exactSubstitutions,
consoleVerbosity)
assert len(actualCategories) == len(classifiedCategories)
assert len(actualCategories) == len(perturbationTrace)
correctClassificationTrace = [1 if (actualCategories[i] ==
classifiedCategories[i]) else 0
for i in xrange(len(actualCategories))]
correctClassifications = correctClassificationTrace.count(1)
classificationRate = 100.0 * correctClassifications / len(actualCategories)
# Classification results
print "\n*Results*"
pprint.pprint("Actual Category {0}".format(actualCategories))
pprint.pprint("Classification {0}".format(classifiedCategories))
pprint.pprint("Class. Correct {0}".format(
correctClassificationTrace))
pprint.pprint("Perturb Type {0}".format(perturbationTrace))
numPerturbations = (len(perturbationTrace) -
perturbationTrace.count(PerturbationType.none))
errorDict = {PerturbationType.none: 0,
PerturbationType.substitution: 0,
PerturbationType.skip: 0,
PerturbationType.add: 0,
PerturbationType.sequenceJump: 0}
incorrect = 0
for i in xrange(len(actualCategories)):
if actualCategories[i] != classifiedCategories[i]:
errorDict[perturbationTrace[i]] += 1.0
incorrect += 1
print "\n*** Correct Classification Rate: {0:.2f}%".format(classificationRate)
print "*** Correct / Total: \t{0} / {1}".format(correctClassifications,
len(correctClassificationTrace))
if exactSubstitutions is None:
actualPerturbationRate = 100.0 * numPerturbations / len(perturbationTrace)
print "\nActual perturbation rate: {0:.2f}%".format(actualPerturbationRate)
substitutionErrorRate = (0 if incorrect == 0 else
100.0 * errorDict[PerturbationType.substitution] / incorrect)
skipErrorRate = (0 if incorrect == 0 else
100.0 * errorDict[PerturbationType.skip] / incorrect)
addErrorRate = (0 if incorrect == 0 else
100.0 * errorDict[PerturbationType.add] / incorrect)
sequenceJumpErrorRate = (0 if incorrect == 0 else
100.0 * errorDict[PerturbationType.sequenceJump] / incorrect)
noPerturbationErrorRate = (0 if incorrect == 0 else
100.0 * errorDict[PerturbationType.none] / incorrect)
print "\nError Rate by Perturbation:"
print ( "Substitution: \t{0:.2f}% "
"\nSkip Pattern: \t{1:.2f}% "
"\nAdd Pattern: \t{2:.2f}% "
"\nSequence Jump: \t{3:.2f}% "
"\nNo Perturbation: \t{4:.2f}%").format(substitutionErrorRate,
skipErrorRate,
addErrorRate,
sequenceJumpErrorRate,
noPerturbationErrorRate)
outputFileName = ("testPresentations{0:0>3}_perturbationRate{"
"1:0>3}_exactSubstitutions{2:0>3}.txt").format(
testPresentations, perturbationChance, exactSubstitutions)
print "\nWriting results to {0}/{1}".format(outputDir, outputFileName)
elapsedTime = (time.time() - startTime) / 60.0
print "\nFinished in {0:.2f} minutes.".format(elapsedTime)
writeClassificationTrace(outputDir, outputFileName, classificationRate)
def writeClassificationTrace(outputDir, outputFileName, mean):
"""
Write classification trace to output file.
:param outputDir: dir where output file will be written
:param outputFileName: filename of output file
:param mean: Mean classification performance
"""
if not os.path.exists(outputDir):
os.makedirs(outputDir)
filePath = os.path.join(outputDir, outputFileName)
with open(filePath, "wb") as outputFile:
csvWriter = csv.writer(outputFile)
csvWriter.writerow(["Classification Statistics"])
csvWriter.writerow([mean])
outputFile.flush()
def _getArgs():
parser = OptionParser(usage="%prog PARAMS_DIR OUTPUT_DIR [options]"
"\n\nRun variation robustness experiment using "
"params in PARAMS_DIR (relative to this file) "
"and outputting results to OUTPUT_DIR.")
parser.add_option("-c",
"--console",
type=int,
default=0,
dest="consoleVerbosity",
help="Console message verbosity: 0 => none")
(options, args) = parser.parse_args(sys.argv[1:])
if len(args) < 2:
parser.print_help(sys.stderr)
sys.exit()
absPath = os.path.join(os.path.dirname(__file__), args[0])
with open(absPath) as paramsFile:
params = yaml.safe_load(paramsFile)
return options, args, params
if __name__ == "__main__":
(_options, _args, _params) = _getArgs()
run(_params, _args[0], _args[1], _options.consoleVerbosity)
| agpl-3.0 |
jswxdzc/shadowsocks | shadowsocks/local.py | 1015 | 2248 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
import logging
import signal
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
from shadowsocks import shell, daemon, eventloop, tcprelay, udprelay, asyncdns
def main():
shell.check_python()
# fix py2exe
if hasattr(sys, "frozen") and sys.frozen in \
("windows_exe", "console_exe"):
p = os.path.dirname(os.path.abspath(sys.executable))
os.chdir(p)
config = shell.get_config(True)
daemon.daemon_exec(config)
try:
logging.info("starting local at %s:%d" %
(config['local_address'], config['local_port']))
dns_resolver = asyncdns.DNSResolver()
tcp_server = tcprelay.TCPRelay(config, dns_resolver, True)
udp_server = udprelay.UDPRelay(config, dns_resolver, True)
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
tcp_server.add_to_loop(loop)
udp_server.add_to_loop(loop)
def handler(signum, _):
logging.warn('received SIGQUIT, doing graceful shutting down..')
tcp_server.close(next_tick=True)
udp_server.close(next_tick=True)
signal.signal(getattr(signal, 'SIGQUIT', signal.SIGTERM), handler)
def int_handler(signum, _):
sys.exit(1)
signal.signal(signal.SIGINT, int_handler)
daemon.set_user(config.get('user', None))
loop.run()
except Exception as e:
shell.print_exception(e)
sys.exit(1)
if __name__ == '__main__':
main()
| apache-2.0 |
ahmedaljazzar/edx-platform | manage.py | 11 | 4180 | #!/usr/bin/env python
"""
Usage: manage.py {lms|cms} [--settings env] ...
Run django management commands. Because edx-platform contains multiple django projects,
the first argument specifies which project to run (cms [Studio] or lms [Learning Management System]).
By default, those systems run in with a settings file appropriate for development. However,
by passing the --settings flag, you can specify what environment specific settings file to use.
Any arguments not understood by this manage.py will be passed to django-admin.py
"""
# pylint: disable=wrong-import-order, wrong-import-position
from __future__ import absolute_import, print_function
from openedx.core.lib.logsettings import log_python_warnings
log_python_warnings()
# Patch the xml libs before anything else.
from safe_lxml import defuse_xml_libs
defuse_xml_libs()
import importlib
import os
import sys
from argparse import ArgumentParser
import contracts
def parse_args():
"""Parse edx specific arguments to manage.py"""
parser = ArgumentParser()
subparsers = parser.add_subparsers(title='system', description='edX service to run')
lms = subparsers.add_parser(
'lms',
help='Learning Management System',
add_help=False,
usage='%(prog)s [options] ...'
)
lms.add_argument('-h', '--help', action='store_true', help='show this help message and exit')
lms.add_argument(
'--settings',
help="Which django settings module to use under lms.envs. If not provided, the DJANGO_SETTINGS_MODULE "
"environment variable will be used if it is set, otherwise it will default to lms.envs.devstack_docker")
lms.add_argument(
'--service-variant',
choices=['lms', 'lms-xml', 'lms-preview'],
default='lms',
help='Which service variant to run, when using the aws environment')
lms.add_argument(
'--contracts',
action='store_true',
default=False,
help='Turn on pycontracts for local development')
lms.set_defaults(
help_string=lms.format_help(),
settings_base='lms/envs',
default_settings='lms.envs.devstack_docker',
startup='lms.startup',
)
cms = subparsers.add_parser(
'cms',
help='Studio',
add_help=False,
usage='%(prog)s [options] ...'
)
cms.add_argument(
'--settings',
help="Which django settings module to use under cms.envs. If not provided, the DJANGO_SETTINGS_MODULE "
"environment variable will be used if it is set, otherwise it will default to cms.envs.devstack_docker")
cms.add_argument('-h', '--help', action='store_true', help='show this help message and exit')
cms.add_argument(
'--contracts',
action='store_true',
default=False,
help='Turn on pycontracts for local development')
cms.set_defaults(
help_string=cms.format_help(),
settings_base='cms/envs',
default_settings='cms.envs.devstack_docker',
service_variant='cms',
startup='cms.startup',
)
edx_args, django_args = parser.parse_known_args()
if edx_args.help:
print("edX:")
print(edx_args.help_string)
return edx_args, django_args
if __name__ == "__main__":
edx_args, django_args = parse_args()
if edx_args.settings:
os.environ["DJANGO_SETTINGS_MODULE"] = edx_args.settings_base.replace('/', '.') + "." + edx_args.settings
else:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", edx_args.default_settings)
os.environ.setdefault("SERVICE_VARIANT", edx_args.service_variant)
enable_contracts = os.environ.get('ENABLE_CONTRACTS', False)
# can override with '--contracts' argument
if not enable_contracts and not edx_args.contracts:
contracts.disable_all()
if edx_args.help:
print("Django:")
# This will trigger django-admin.py to print out its help
django_args.append('--help')
startup = importlib.import_module(edx_args.startup)
startup.run()
from django.core.management import execute_from_command_line
execute_from_command_line([sys.argv[0]] + django_args)
| agpl-3.0 |
hollabaq86/haikuna-matata | env/lib/python2.7/site-packages/numpy/distutils/fcompiler/ibm.py | 184 | 3408 | from __future__ import division, absolute_import, print_function
import os
import re
import sys
from numpy.distutils.fcompiler import FCompiler
from numpy.distutils.exec_command import exec_command, find_executable
from numpy.distutils.misc_util import make_temp_file
from distutils import log
compilers = ['IBMFCompiler']
class IBMFCompiler(FCompiler):
compiler_type = 'ibm'
description = 'IBM XL Fortran Compiler'
version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P<version>[^\s*]*)'
#IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004
executables = {
'version_cmd' : ["<F77>", "-qversion"],
'compiler_f77' : ["xlf"],
'compiler_fix' : ["xlf90", "-qfixed"],
'compiler_f90' : ["xlf90"],
'linker_so' : ["xlf95"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
def get_version(self,*args,**kwds):
version = FCompiler.get_version(self,*args,**kwds)
if version is None and sys.platform.startswith('aix'):
# use lslpp to find out xlf version
lslpp = find_executable('lslpp')
xlf = find_executable('xlf')
if os.path.exists(xlf) and os.path.exists(lslpp):
s, o = exec_command(lslpp + ' -Lc xlfcmp')
m = re.search('xlfcmp:(?P<version>\d+([.]\d+)+)', o)
if m: version = m.group('version')
xlf_dir = '/etc/opt/ibmcmp/xlf'
if version is None and os.path.isdir(xlf_dir):
# linux:
# If the output of xlf does not contain version info
# (that's the case with xlf 8.1, for instance) then
# let's try another method:
l = sorted(os.listdir(xlf_dir))
l.reverse()
l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))]
if l:
from distutils.version import LooseVersion
self.version = version = LooseVersion(l[0])
return version
def get_flags(self):
return ['-qextname']
def get_flags_debug(self):
return ['-g']
def get_flags_linker_so(self):
opt = []
if sys.platform=='darwin':
opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress')
else:
opt.append('-bshared')
version = self.get_version(ok_status=[0, 40])
if version is not None:
if sys.platform.startswith('aix'):
xlf_cfg = '/etc/xlf.cfg'
else:
xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version
fo, new_cfg = make_temp_file(suffix='_xlf.cfg')
log.info('Creating '+new_cfg)
fi = open(xlf_cfg, 'r')
crt1_match = re.compile(r'\s*crt\s*[=]\s*(?P<path>.*)/crt1.o').match
for line in fi:
m = crt1_match(line)
if m:
fo.write('crt = %s/bundle1.o\n' % (m.group('path')))
else:
fo.write(line)
fi.close()
fo.close()
opt.append('-F'+new_cfg)
return opt
def get_flags_opt(self):
return ['-O3']
if __name__ == '__main__':
log.set_verbosity(2)
compiler = IBMFCompiler()
compiler.customize()
print(compiler.get_version())
| mit |
arista-eosplus/ansible | test/units/modules/network/eos/test_eos_system.py | 62 | 4530 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.eos import eos_system
from .eos_module import TestEosModule, load_fixture, set_module_args
class TestEosSystemModule(TestEosModule):
module = eos_system
def setUp(self):
self.mock_get_config = patch('ansible.modules.network.eos.eos_system.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.eos.eos_system.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
self.get_config.return_value = load_fixture('eos_system_config.cfg')
self.load_config.return_value = dict(diff=None, session='session')
def test_eos_system_hostname_changed(self):
set_module_args(dict(hostname='foo'))
commands = ['hostname foo']
self.execute_module(changed=True, commands=commands)
def test_eos_system_domain_name(self):
set_module_args(dict(domain_name='test.com'))
commands = ['ip domain-name test.com']
self.execute_module(changed=True, commands=commands)
def test_eos_system_domain_list(self):
set_module_args(dict(domain_list=['ansible.com', 'redhat.com']))
commands = ['no ip domain-list ops.ansible.com',
'ip domain-list redhat.com']
self.execute_module(changed=True, commands=commands)
def test_eos_system_lookup_source(self):
set_module_args(dict(lookup_source=['Ethernet1']))
commands = ['no ip domain lookup source-interface Management1',
'ip domain lookup source-interface Ethernet1']
self.execute_module(changed=True, commands=commands)
def test_eos_system_lookup_source_complex(self):
lookup_source = [{'interface': 'Management1', 'vrf': 'mgmt'},
{'interface': 'Ethernet1'}]
set_module_args(dict(lookup_source=lookup_source))
commands = ['no ip domain lookup source-interface Management1',
'ip domain lookup vrf mgmt source-interface Management1',
'ip domain lookup source-interface Ethernet1']
self.execute_module(changed=True, commands=commands)
# def test_eos_system_name_servers(self):
# name_servers = ['8.8.8.8', '8.8.4.4']
# set_module_args(dict(name_servers=name_servers))
# commands = ['ip name-server 8.8.4.4',
# 'no ip name-server vrf mgmt 8.8.4.4']
# self.execute_module(changed=True, commands=commands)
# def rest_eos_system_name_servers_complex(self):
# name_servers = dict(server='8.8.8.8', vrf='test')
# set_module_args(dict(name_servers=name_servers))
# commands = ['ip name-server vrf test 8.8.8.8',
# 'no ip name-server vrf default 8.8.8.8',
# 'no ip name-server vrf mgmt 8.8.4.4']
# self.execute_module(changed=True, commands=commands)
def test_eos_system_state_absent(self):
set_module_args(dict(state='absent'))
commands = ['no ip domain-name', 'no hostname']
self.execute_module(changed=True, commands=commands)
def test_eos_system_no_change(self):
set_module_args(dict(hostname='switch01', domain_name='eng.ansible.com'))
commands = []
self.execute_module(commands=commands)
def test_eos_system_missing_vrf(self):
name_servers = dict(server='8.8.8.8', vrf='missing')
set_module_args(dict(name_servers=name_servers))
result = self.execute_module(failed=True)
| gpl-3.0 |
giggsey/SickRage | lib/unidecode/x01d.py | 240 | 3608 | data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'b', # 0x6c
'd', # 0x6d
'f', # 0x6e
'm', # 0x6f
'n', # 0x70
'p', # 0x71
'r', # 0x72
'r', # 0x73
's', # 0x74
't', # 0x75
'z', # 0x76
'g', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'p', # 0x7d
'', # 0x7e
'', # 0x7f
'b', # 0x80
'd', # 0x81
'f', # 0x82
'g', # 0x83
'k', # 0x84
'l', # 0x85
'm', # 0x86
'n', # 0x87
'p', # 0x88
'r', # 0x89
's', # 0x8a
'', # 0x8b
'v', # 0x8c
'x', # 0x8d
'z', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'', # 0xc0
'', # 0xc1
'', # 0xc2
'', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'', # 0xcd
'', # 0xce
'', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'', # 0xd4
'', # 0xd5
'', # 0xd6
'', # 0xd7
'', # 0xd8
'', # 0xd9
'', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'', # 0xee
'', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'', # 0xfc
'', # 0xfd
'', # 0xfe
)
| gpl-3.0 |
GarySparrow/mFlaskWeb | venv/Lib/site-packages/werkzeug/_reloader.py | 116 | 7938 | import os
import sys
import time
import subprocess
import threading
from itertools import chain
from werkzeug._internal import _log
from werkzeug._compat import PY2, iteritems, text_type
def _iter_module_files():
"""This iterates over all relevant Python files. It goes through all
loaded files from modules, all files in folders of already loaded modules
as well as all files reachable through a package.
"""
# The list call is necessary on Python 3 in case the module
# dictionary modifies during iteration.
for module in list(sys.modules.values()):
if module is None:
continue
filename = getattr(module, '__file__', None)
if filename:
old = None
while not os.path.isfile(filename):
old = filename
filename = os.path.dirname(filename)
if filename == old:
break
else:
if filename[-4:] in ('.pyc', '.pyo'):
filename = filename[:-1]
yield filename
def _find_observable_paths(extra_files=None):
"""Finds all paths that should be observed."""
rv = set(os.path.abspath(x) for x in sys.path)
for filename in extra_files or ():
rv.add(os.path.dirname(os.path.abspath(filename)))
for module in list(sys.modules.values()):
fn = getattr(module, '__file__', None)
if fn is None:
continue
fn = os.path.abspath(fn)
rv.add(os.path.dirname(fn))
return _find_common_roots(rv)
def _find_common_roots(paths):
"""Out of some paths it finds the common roots that need monitoring."""
paths = [x.split(os.path.sep) for x in paths]
root = {}
for chunks in sorted(paths, key=len, reverse=True):
node = root
for chunk in chunks:
node = node.setdefault(chunk, {})
node.clear()
rv = set()
def _walk(node, path):
for prefix, child in iteritems(node):
_walk(child, path + (prefix,))
if not node:
rv.add('/'.join(path))
_walk(root, ())
return rv
class ReloaderLoop(object):
name = None
# monkeypatched by testsuite. wrapping with `staticmethod` is required in
# case time.sleep has been replaced by a non-c function (e.g. by
# `eventlet.monkey_patch`) before we get here
_sleep = staticmethod(time.sleep)
def __init__(self, extra_files=None, interval=1):
self.extra_files = set(os.path.abspath(x)
for x in extra_files or ())
self.interval = interval
def run(self):
pass
def restart_with_reloader(self):
"""Spawn a new Python interpreter with the same arguments as this one,
but running the reloader thread.
"""
while 1:
_log('info', ' * Restarting with %s' % self.name)
args = [sys.executable] + sys.argv
new_environ = os.environ.copy()
new_environ['WERKZEUG_RUN_MAIN'] = 'true'
# a weird bug on windows. sometimes unicode strings end up in the
# environment and subprocess.call does not like this, encode them
# to latin1 and continue.
if os.name == 'nt' and PY2:
for key, value in iteritems(new_environ):
if isinstance(value, text_type):
new_environ[key] = value.encode('iso-8859-1')
exit_code = subprocess.call(args, env=new_environ)
if exit_code != 3:
return exit_code
def trigger_reload(self, filename):
filename = os.path.abspath(filename)
_log('info', ' * Detected change in %r, reloading' % filename)
sys.exit(3)
class StatReloaderLoop(ReloaderLoop):
name = 'stat'
def run(self):
mtimes = {}
while 1:
for filename in chain(_iter_module_files(), self.extra_files):
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
self.trigger_reload(filename)
self._sleep(self.interval)
class WatchdogReloaderLoop(ReloaderLoop):
def __init__(self, *args, **kwargs):
ReloaderLoop.__init__(self, *args, **kwargs)
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
self.observable_paths = set()
def _check_modification(filename):
if filename in self.extra_files:
self.trigger_reload(filename)
dirname = os.path.dirname(filename)
if dirname.startswith(tuple(self.observable_paths)):
if filename.endswith(('.pyc', '.pyo')):
self.trigger_reload(filename[:-1])
elif filename.endswith('.py'):
self.trigger_reload(filename)
class _CustomHandler(FileSystemEventHandler):
def on_created(self, event):
_check_modification(event.src_path)
def on_modified(self, event):
_check_modification(event.src_path)
reloader_name = Observer.__name__.lower()
if reloader_name.endswith('observer'):
reloader_name = reloader_name[:-8]
reloader_name += ' reloader'
self.name = reloader_name
self.observer_class = Observer
self.event_handler = _CustomHandler()
self.should_reload = False
def trigger_reload(self, filename):
# This is called inside an event handler, which means we can't throw
# SystemExit here. https://github.com/gorakhargosh/watchdog/issues/294
self.should_reload = True
ReloaderLoop.trigger_reload(self, filename)
def run(self):
watches = {}
observer = self.observer_class()
observer.start()
while not self.should_reload:
to_delete = set(watches)
paths = _find_observable_paths(self.extra_files)
for path in paths:
if path not in watches:
try:
watches[path] = observer.schedule(
self.event_handler, path, recursive=True)
except OSError:
# "Path is not a directory". We could filter out
# those paths beforehand, but that would cause
# additional stat calls.
watches[path] = None
to_delete.discard(path)
for path in to_delete:
watch = watches.pop(path, None)
if watch is not None:
observer.unschedule(watch)
self.observable_paths = paths
self._sleep(self.interval)
sys.exit(3)
reloader_loops = {
'stat': StatReloaderLoop,
'watchdog': WatchdogReloaderLoop,
}
try:
__import__('watchdog.observers')
except ImportError:
reloader_loops['auto'] = reloader_loops['stat']
else:
reloader_loops['auto'] = reloader_loops['watchdog']
def run_with_reloader(main_func, extra_files=None, interval=1,
reloader_type='auto'):
"""Run the given function in an independent python interpreter."""
import signal
reloader = reloader_loops[reloader_type](extra_files, interval)
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
try:
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
t = threading.Thread(target=main_func, args=())
t.setDaemon(True)
t.start()
reloader.run()
else:
sys.exit(reloader.restart_with_reloader())
except KeyboardInterrupt:
pass
| mit |
ctk3b/mdtraj | mdtraj/tests/test_rmsd_memmap.py | 5 | 1405 | import os
import os.path
import numpy as np
import tempfile
import mdtraj as md
from mdtraj.testing import get_fn, assert_raises
def test_1():
# https://github.com/mdtraj/mdtraj/issues/438
try:
dir = tempfile.mkdtemp()
fn = os.path.join(dir, 'temp.npy')
traj = md.load(get_fn('frame0.h5'))
np.save(fn, traj.xyz)
traj.xyz = np.load(fn, mmap_mode='r')
# since traj isn't precentered, this requires centering
# the coordinates which is done inplace. but that's not possible
# with mmap_mode = 'r'
with assert_raises(ValueError):
md.rmsd(traj, traj, 0)
# this should work
traj.xyz = np.load(fn, mmap_mode='c')
md.rmsd(traj, traj, 0)
finally:
del traj
os.unlink(fn)
os.rmdir(dir)
def test_2():
# https://github.com/mdtraj/mdtraj/issues/438
try:
dir = tempfile.mkdtemp()
fn = os.path.join(dir, 'temp.npy')
traj = md.load(get_fn('frame0.h5'))
# precenter the coordinates
traj.center_coordinates()
traces = traj._rmsd_traces
np.save(fn, traj.xyz)
traj.xyz = np.load(fn, mmap_mode='r')
traj._rmsd_traces = traces
with assert_raises(ValueError):
md.rmsd(traj, traj, 0, precentered=True)
finally:
del traj
os.unlink(fn)
os.rmdir(dir)
| lgpl-2.1 |
NuAoA/mopidy-alcd | mopidy_AdafruitLCD/Adafruit_LCD_frontend.py | 1 | 3199 | #!/usr/bin/env python
import logging
import traceback
import pykka
import mopidy
import sys
import re #todo: remove
import threading
from time import sleep
from mopidy import core
from .Adafruit_player import AdafruitPlayer
logger = logging.getLogger(__name__)
class AdafruitLCD(pykka.ThreadingActor, core.CoreListener):
def __init__(self,config,core):
super(AdafruitLCD,self).__init__()
self.core = core
self.player = AdafruitPlayer(core)
self.startup = threading.Thread(target=self.media_scan)
#self.player.run()
def media_scan(self):
media_list = []
timeout = 0
self.player.plate.smessage("Loading Media...")
sleep(2)
while self.player.running:
if timeout>=50 or self.player.inMenus:
if not self.player.inMenus:
if len(media_list)==0:
self.player.plate.smessage("No Media Found",line=1)
elif self.player.track!=None:
self.player.displaySongInfo()
break
update = False
list = self.core.library.browse(None).get()
for media in list:
if media.name in media_list:
pass
else:
media_list.append(media.name)
update = True
break
if not self.player.inMenus:
if len(media_list) > 0:
if update:
str = ""
for item in media_list:
if str != "":
str = item+", "+str
else:
str = item
self.player.plate.smessage(str.ljust(16),line=1)
sleep(1)
else:
sleep(5)
else:
sleep(5)
timeout+=1
def on_start(self):
logger.info("[ALCD] Starting AdafruitLCD")
self.player.start()
self.startup.start()
def on_stop(self):
logger.info("[ALCD] Stopping AdafruitLCD")
self.player.stop()
def track_playback_ended(self,tl_track, time_position):
logger.info("[ALCD] track playback ended")
self.player.track_playback_ended(tl_track.track)
def track_playback_started(self,tl_track):
try:
logger.info("[ALCD] Now playing:")
try:
for artist in tl_track.track.artists:
logger.info("[ALCD] >"+tl_track.track.name+ " by " +artist.name)
except:
traceback.print_exc()
self.player.updateCurrentTrack(tl_track.track)
except:
traceback.print_exc()
def playback_state_changed(self,old_state,new_state):
try:
#logger.info("[ALCD] Playback state changed from " + old_state + " to " + new_state)
self.player.updatePlaybackState(old_state,new_state)
except:
traceback.print_exc()
def print_tracks(self,tl_track_list):
for tltrack in tl_track_list:
logger.info("[ALCD] " + tltrack.track.name)
"""
def playlists_loaded(self):
logger.info("[ALCD] Playlists:")
try:
for playlist in self.core.playlists.playlists.get():
if re.search("spotify:user:spotify",playlist.uri):
self.core.tracklist.add(tracks=playlist.tracks)
self.core.playback.play()
except:
traceback.print_exc()
def tracklist_changed(self):
logger.info("[ALCD] Tracklist updated")
print(" Total: "+str(len(self.core.tracklist.tl_tracks.get())))
#self.print_tracks(self.core.tracklist.tl_tracks.get())
def track_playback_ended(self,tl_track,time_position):
logger.info("[ALCD] Playback Ended")
"""
| apache-2.0 |
xiangel/hue | desktop/core/ext-py/boto-2.38.0/boto/configservice/layer1.py | 56 | 17092 | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.configservice import exceptions
class ConfigServiceConnection(AWSQueryConnection):
"""
AWS Config
AWS Config provides a way to keep track of the configurations of
all the AWS resources associated with your AWS account. You can
use AWS Config to get the current and historical configurations of
each AWS resource and also to get information about the
relationship between the resources. An AWS resource can be an
Amazon Compute Cloud (Amazon EC2) instance, an Elastic Block Store
(EBS) volume, an Elastic network Interface (ENI), or a security
group. For a complete list of resources currently supported by AWS
Config, see `Supported AWS Resources`_.
You can access and manage AWS Config through the AWS Management
Console, the AWS Command Line Interface (AWS CLI), the AWS Config
API, or the AWS SDKs for AWS Config
This reference guide contains documentation for the AWS Config API
and the AWS CLI commands that you can use to manage AWS Config.
The AWS Config API uses the Signature Version 4 protocol for
signing requests. For more information about how to sign a request
with this protocol, see `Signature Version 4 Signing Process`_.
For detailed information about AWS Config features and their
associated actions or commands, as well as how to work with AWS
Management Console, see `What Is AWS Config?`_ in the AWS Config
Developer Guide .
"""
APIVersion = "2014-11-12"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "config.us-east-1.amazonaws.com"
ServiceName = "ConfigService"
TargetPrefix = "StarlingDoveService"
ResponseError = JSONResponseError
_faults = {
"InvalidLimitException": exceptions.InvalidLimitException,
"NoSuchBucketException": exceptions.NoSuchBucketException,
"InvalidSNSTopicARNException": exceptions.InvalidSNSTopicARNException,
"ResourceNotDiscoveredException": exceptions.ResourceNotDiscoveredException,
"MaxNumberOfDeliveryChannelsExceededException": exceptions.MaxNumberOfDeliveryChannelsExceededException,
"LastDeliveryChannelDeleteFailedException": exceptions.LastDeliveryChannelDeleteFailedException,
"InsufficientDeliveryPolicyException": exceptions.InsufficientDeliveryPolicyException,
"InvalidRoleException": exceptions.InvalidRoleException,
"InvalidTimeRangeException": exceptions.InvalidTimeRangeException,
"NoSuchDeliveryChannelException": exceptions.NoSuchDeliveryChannelException,
"NoSuchConfigurationRecorderException": exceptions.NoSuchConfigurationRecorderException,
"InvalidS3KeyPrefixException": exceptions.InvalidS3KeyPrefixException,
"InvalidDeliveryChannelNameException": exceptions.InvalidDeliveryChannelNameException,
"NoRunningConfigurationRecorderException": exceptions.NoRunningConfigurationRecorderException,
"ValidationException": exceptions.ValidationException,
"NoAvailableConfigurationRecorderException": exceptions.NoAvailableConfigurationRecorderException,
"InvalidNextTokenException": exceptions.InvalidNextTokenException,
"InvalidConfigurationRecorderNameException": exceptions.InvalidConfigurationRecorderNameException,
"NoAvailableDeliveryChannelException": exceptions.NoAvailableDeliveryChannelException,
"MaxNumberOfConfigurationRecordersExceededException": exceptions.MaxNumberOfConfigurationRecordersExceededException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(ConfigServiceConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def delete_delivery_channel(self, delivery_channel_name):
"""
Deletes the specified delivery channel.
The delivery channel cannot be deleted if it is the only
delivery channel and the configuration recorder is still
running. To delete the delivery channel, stop the running
configuration recorder using the StopConfigurationRecorder
action.
:type delivery_channel_name: string
:param delivery_channel_name: The name of the delivery channel to
delete.
"""
params = {'DeliveryChannelName': delivery_channel_name, }
return self.make_request(action='DeleteDeliveryChannel',
body=json.dumps(params))
def deliver_config_snapshot(self, delivery_channel_name):
"""
Schedules delivery of a configuration snapshot to the Amazon
S3 bucket in the specified delivery channel. After the
delivery has started, AWS Config sends following notifications
using an Amazon SNS topic that you have specified.
+ Notification of starting the delivery.
+ Notification of delivery completed, if the delivery was
successfully completed.
+ Notification of delivery failure, if the delivery failed to
complete.
:type delivery_channel_name: string
:param delivery_channel_name: The name of the delivery channel through
which the snapshot is delivered.
"""
params = {'deliveryChannelName': delivery_channel_name, }
return self.make_request(action='DeliverConfigSnapshot',
body=json.dumps(params))
def describe_configuration_recorder_status(self,
configuration_recorder_names=None):
"""
Returns the current status of the specified configuration
recorder. If a configuration recorder is not specified, this
action returns the status of all configuration recorder
associated with the account.
:type configuration_recorder_names: list
:param configuration_recorder_names: The name(s) of the configuration
recorder. If the name is not specified, the action returns the
current status of all the configuration recorders associated with
the account.
"""
params = {}
if configuration_recorder_names is not None:
params['ConfigurationRecorderNames'] = configuration_recorder_names
return self.make_request(action='DescribeConfigurationRecorderStatus',
body=json.dumps(params))
def describe_configuration_recorders(self,
configuration_recorder_names=None):
"""
Returns the name of one or more specified configuration
recorders. If the recorder name is not specified, this action
returns the names of all the configuration recorders
associated with the account.
:type configuration_recorder_names: list
:param configuration_recorder_names: A list of configuration recorder
names.
"""
params = {}
if configuration_recorder_names is not None:
params['ConfigurationRecorderNames'] = configuration_recorder_names
return self.make_request(action='DescribeConfigurationRecorders',
body=json.dumps(params))
def describe_delivery_channel_status(self, delivery_channel_names=None):
"""
Returns the current status of the specified delivery channel.
If a delivery channel is not specified, this action returns
the current status of all delivery channels associated with
the account.
:type delivery_channel_names: list
:param delivery_channel_names: A list of delivery channel names.
"""
params = {}
if delivery_channel_names is not None:
params['DeliveryChannelNames'] = delivery_channel_names
return self.make_request(action='DescribeDeliveryChannelStatus',
body=json.dumps(params))
def describe_delivery_channels(self, delivery_channel_names=None):
"""
Returns details about the specified delivery channel. If a
delivery channel is not specified, this action returns the
details of all delivery channels associated with the account.
:type delivery_channel_names: list
:param delivery_channel_names: A list of delivery channel names.
"""
params = {}
if delivery_channel_names is not None:
params['DeliveryChannelNames'] = delivery_channel_names
return self.make_request(action='DescribeDeliveryChannels',
body=json.dumps(params))
def get_resource_config_history(self, resource_type, resource_id,
later_time=None, earlier_time=None,
chronological_order=None, limit=None,
next_token=None):
"""
Returns a list of configuration items for the specified
resource. The list contains details about each state of the
resource during the specified time interval. You can specify a
`limit` on the number of results returned on the page. If a
limit is specified, a `nextToken` is returned as part of the
result that you can use to continue this request.
:type resource_type: string
:param resource_type: The resource type.
:type resource_id: string
:param resource_id: The ID of the resource (for example., `sg-xxxxxx`).
:type later_time: timestamp
:param later_time: The time stamp that indicates a later time. If not
specified, current time is taken.
:type earlier_time: timestamp
:param earlier_time: The time stamp that indicates an earlier time. If
not specified, the action returns paginated results that contain
configuration items that start from when the first configuration
item was recorded.
:type chronological_order: string
:param chronological_order: The chronological order for configuration
items listed. By default the results are listed in reverse
chronological order.
:type limit: integer
:param limit: The maximum number of configuration items returned in
each page. The default is 10. You cannot specify a limit greater
than 100.
:type next_token: string
:param next_token: An optional parameter used for pagination of the
results.
"""
params = {
'resourceType': resource_type,
'resourceId': resource_id,
}
if later_time is not None:
params['laterTime'] = later_time
if earlier_time is not None:
params['earlierTime'] = earlier_time
if chronological_order is not None:
params['chronologicalOrder'] = chronological_order
if limit is not None:
params['limit'] = limit
if next_token is not None:
params['nextToken'] = next_token
return self.make_request(action='GetResourceConfigHistory',
body=json.dumps(params))
def put_configuration_recorder(self, configuration_recorder):
"""
Creates a new configuration recorder to record the resource
configurations.
You can use this action to change the role ( `roleARN`) of an
existing recorder. To change the role, call the action on the
existing configuration recorder and specify a role.
:type configuration_recorder: dict
:param configuration_recorder: The configuration recorder object that
records each configuration change made to the resources.
"""
params = {'ConfigurationRecorder': configuration_recorder, }
return self.make_request(action='PutConfigurationRecorder',
body=json.dumps(params))
def put_delivery_channel(self, delivery_channel):
"""
Creates a new delivery channel object to deliver the
configuration information to an Amazon S3 bucket, and to an
Amazon SNS topic.
You can use this action to change the Amazon S3 bucket or an
Amazon SNS topic of the existing delivery channel. To change
the Amazon S3 bucket or an Amazon SNS topic, call this action
and specify the changed values for the S3 bucket and the SNS
topic. If you specify a different value for either the S3
bucket or the SNS topic, this action will keep the existing
value for the parameter that is not changed.
:type delivery_channel: dict
:param delivery_channel: The configuration delivery channel object that
delivers the configuration information to an Amazon S3 bucket, and
to an Amazon SNS topic.
"""
params = {'DeliveryChannel': delivery_channel, }
return self.make_request(action='PutDeliveryChannel',
body=json.dumps(params))
def start_configuration_recorder(self, configuration_recorder_name):
"""
Starts recording configurations of all the resources
associated with the account.
You must have created at least one delivery channel to
successfully start the configuration recorder.
:type configuration_recorder_name: string
:param configuration_recorder_name: The name of the recorder object
that records each configuration change made to the resources.
"""
params = {
'ConfigurationRecorderName': configuration_recorder_name,
}
return self.make_request(action='StartConfigurationRecorder',
body=json.dumps(params))
def stop_configuration_recorder(self, configuration_recorder_name):
"""
Stops recording configurations of all the resources associated
with the account.
:type configuration_recorder_name: string
:param configuration_recorder_name: The name of the recorder object
that records each configuration change made to the resources.
"""
params = {
'ConfigurationRecorderName': configuration_recorder_name,
}
return self.make_request(action='StopConfigurationRecorder',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| apache-2.0 |
holmes/intellij-community | python/helpers/pydev/pydevd_breakpoints.py | 19 | 5217 | from pydevd_constants import *
import pydevd_tracing
import sys
import pydev_log
import pydevd_import_class
_original_excepthook = None
_handle_exceptions = None
import _pydev_threading as threading
threadingCurrentThread = threading.currentThread
from pydevd_comm import GetGlobalDebugger
class ExceptionBreakpoint:
def __init__(
self,
qname,
notify_always,
notify_on_terminate,
notify_on_first_raise_only,
ignore_libraries
):
exctype = _get_class(qname)
self.qname = qname
if exctype is not None:
self.name = exctype.__name__
else:
self.name = None
self.notify_on_terminate = notify_on_terminate
self.notify_always = notify_always
self.notify_on_first_raise_only = notify_on_first_raise_only
self.ignore_libraries = ignore_libraries
self.type = exctype
def __str__(self):
return self.qname
class LineBreakpoint(object):
def __init__(self, line, condition, func_name, expression):
self.line = line
self.condition = condition
self.func_name = func_name
self.expression = expression
def get_exception_full_qname(exctype):
if not exctype:
return None
return str(exctype.__module__) + '.' + exctype.__name__
def get_exception_name(exctype):
if not exctype:
return None
return exctype.__name__
def get_exception_breakpoint(exctype, exceptions):
exception_full_qname = get_exception_full_qname(exctype)
exc = None
if exceptions is not None:
try:
return exceptions[exception_full_qname]
except KeyError:
for exception_breakpoint in DictIterValues(exceptions):
if exception_breakpoint.type is not None and issubclass(exctype, exception_breakpoint.type):
if exc is None or issubclass(exception_breakpoint.type, exc.type):
exc = exception_breakpoint
return exc
#=======================================================================================================================
# _excepthook
#=======================================================================================================================
def _excepthook(exctype, value, tb):
global _handle_exceptions
if _handle_exceptions:
exception_breakpoint = get_exception_breakpoint(exctype, _handle_exceptions)
else:
exception_breakpoint = None
#Always call the original excepthook before going on to call the debugger post mortem to show it.
_original_excepthook(exctype, value, tb)
if not exception_breakpoint:
return
if tb is None: #sometimes it can be None, e.g. with GTK
return
frames = []
debugger = GetGlobalDebugger()
user_frames = []
while tb:
frame = tb.tb_frame
if exception_breakpoint.ignore_libraries and not debugger.not_in_scope(frame.f_code.co_filename):
user_frames.append(tb.tb_frame)
frames.append(tb.tb_frame)
tb = tb.tb_next
thread = threadingCurrentThread()
frames_byid = dict([(id(frame),frame) for frame in frames])
if exception_breakpoint.ignore_libraries:
frame = user_frames[-1]
else:
frame = frames[-1]
thread.additionalInfo.exception = (exctype, value, tb)
thread.additionalInfo.pydev_force_stop_at_exception = (frame, frames_byid)
thread.additionalInfo.message = exception_breakpoint.qname
pydevd_tracing.SetTrace(None) #no tracing from here
pydev_log.debug('Handling post-mortem stop on exception breakpoint %s'% exception_breakpoint.qname)
debugger.handle_post_mortem_stop(thread.additionalInfo, thread)
#=======================================================================================================================
# _set_pm_excepthook
#=======================================================================================================================
def _set_pm_excepthook(handle_exceptions_dict=None):
'''
Should be called to register the excepthook to be used.
It's only useful for uncaught exceptions. I.e.: exceptions that go up to the excepthook.
@param handle_exceptions: dict(exception -> ExceptionBreakpoint)
The exceptions that should be handled.
'''
global _handle_exceptions
global _original_excepthook
if sys.excepthook != _excepthook:
#Only keep the original if it's not our own _excepthook (if called many times).
_original_excepthook = sys.excepthook
_handle_exceptions = handle_exceptions_dict
sys.excepthook = _excepthook
def _restore_pm_excepthook():
global _original_excepthook
if _original_excepthook:
sys.excepthook = _original_excepthook
_original_excepthook = None
def update_exception_hook(dbg):
if dbg.break_on_uncaught_exceptions:
_set_pm_excepthook(dbg.break_on_uncaught_exceptions)
else:
_restore_pm_excepthook()
def _get_class( kls ):
if IS_PY24 and "BaseException" == kls:
kls = "Exception"
try:
return eval(kls)
except:
return pydevd_import_class.ImportName(kls)
| apache-2.0 |
Lekensteyn/buildbot | master/buildbot/test/unit/test_db_schedulers.py | 10 | 17020 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.db import schedulers
from buildbot.test.fake import fakedb
from buildbot.test.fake import fakemaster
from buildbot.test.util import connector_component
from buildbot.test.util import db
from buildbot.test.util import interfaces
from buildbot.test.util import validation
class Tests(interfaces.InterfaceTests):
# test data
ss92 = fakedb.SourceStamp(id=92)
change3 = fakedb.Change(changeid=3)
change4 = fakedb.Change(changeid=4)
change5 = fakedb.Change(changeid=5)
change6 = fakedb.Change(changeid=6, branch='sql')
scheduler24 = fakedb.Scheduler(id=24, name='schname')
master13 = fakedb.Master(id=13, name='m1', active=1)
scheduler24master = fakedb.SchedulerMaster(schedulerid=24, masterid=13)
scheduler25 = fakedb.Scheduler(id=25, name='schname2')
master14 = fakedb.Master(id=14, name='m2', active=0)
scheduler25master = fakedb.SchedulerMaster(schedulerid=25, masterid=14)
# tests
def test_signature_enable(self):
@self.assertArgSpecMatches(self.db.schedulers.enable)
def enable(self, schedulerid, v):
pass
@defer.inlineCallbacks
def test_enable(self):
yield self.insertTestData([self.scheduler24, self.master13,
self.scheduler24master])
sch = yield self.db.schedulers.getScheduler(24)
validation.verifyDbDict(self, 'schedulerdict', sch)
self.assertEqual(sch, dict(
id=24,
name='schname',
enabled=True,
masterid=13))
yield self.db.schedulers.enable(24, False)
sch = yield self.db.schedulers.getScheduler(24)
validation.verifyDbDict(self, 'schedulerdict', sch)
self.assertEqual(sch, dict(
id=24,
name='schname',
enabled=False,
masterid=13))
yield self.db.schedulers.enable(24, True)
sch = yield self.db.schedulers.getScheduler(24)
validation.verifyDbDict(self, 'schedulerdict', sch)
self.assertEqual(sch, dict(
id=24,
name='schname',
enabled=True,
masterid=13))
def test_signature_classifyChanges(self):
@self.assertArgSpecMatches(self.db.schedulers.classifyChanges)
def classifyChanges(self, schedulerid, classifications):
pass
@defer.inlineCallbacks
def test_classifyChanges(self):
yield self.insertTestData([self.ss92, self.change3, self.change4,
self.scheduler24])
yield self.db.schedulers.classifyChanges(24,
{3: False, 4: True})
res = yield self.db.schedulers.getChangeClassifications(24)
self.assertEqual(res, {3: False, 4: True})
@defer.inlineCallbacks
def test_classifyChanges_again(self):
# test reclassifying changes, which may happen during some timing
# conditions. It's important that this test uses multiple changes,
# only one of which already exists
yield self.insertTestData([
self.ss92,
self.change3,
self.change4,
self.change5,
self.change6,
self.scheduler24,
fakedb.SchedulerChange(schedulerid=24, changeid=5, important=0),
])
yield self.db.schedulers.classifyChanges(
24, {3: True, 4: False, 5: True, 6: False})
res = yield self.db.schedulers.getChangeClassifications(24)
self.assertEqual(res, {3: True, 4: False, 5: True, 6: False})
def test_signature_flushChangeClassifications(self):
@self.assertArgSpecMatches(
self.db.schedulers.flushChangeClassifications)
def flushChangeClassifications(self, schedulerid, less_than=None):
pass
@defer.inlineCallbacks
def test_flushChangeClassifications(self):
yield self.insertTestData([self.ss92, self.change3, self.change4,
self.change5, self.scheduler24])
yield self.addClassifications(24,
(3, 1), (4, 0), (5, 1))
res = yield self.db.schedulers.getChangeClassifications(24)
self.assertEqual(res, {3: True, 4: False, 5: True})
yield self.db.schedulers.flushChangeClassifications(24)
res = yield self.db.schedulers.getChangeClassifications(24)
self.assertEqual(res, {})
@defer.inlineCallbacks
def test_flushChangeClassifications_less_than(self):
yield self.insertTestData([self.ss92, self.change3,
self.change4, self.change5, self.scheduler24])
yield self.addClassifications(24,
(3, 1), (4, 0), (5, 1))
yield self.db.schedulers.flushChangeClassifications(24, less_than=5)
res = yield self.db.schedulers.getChangeClassifications(24)
self.assertEqual(res, {5: True})
def test_signature_getChangeClassifications(self):
@self.assertArgSpecMatches(self.db.schedulers.getChangeClassifications)
def getChangeClassifications(self, schedulerid, branch=-1,
repository=-1, project=-1, codebase=-1):
pass
@defer.inlineCallbacks
def test_getChangeClassifications(self):
yield self.insertTestData([self.ss92, self.change3, self.change4,
self.change5, self.change6, self.scheduler24])
yield self.addClassifications(24,
(3, 1), (4, 0), (5, 1), (6, 1))
res = yield self.db.schedulers.getChangeClassifications(24)
self.assertEqual(res, {3: True, 4: False, 5: True, 6: True})
@defer.inlineCallbacks
def test_getChangeClassifications_branch(self):
yield self.insertTestData([self.ss92, self.change3, self.change4,
self.change5, self.change6, self.scheduler24])
yield self.addClassifications(24,
(3, 1), (4, 0), (5, 1), (6, 1))
res = yield self.db.schedulers.getChangeClassifications(24,
branch='sql')
self.assertEqual(res, {6: True})
def test_signature_findSchedulerId(self):
@self.assertArgSpecMatches(self.db.schedulers.findSchedulerId)
def findSchedulerId(self, name):
pass
@defer.inlineCallbacks
def test_findSchedulerId_new(self):
id = yield self.db.schedulers.findSchedulerId('schname')
sch = yield self.db.schedulers.getScheduler(id)
self.assertEqual(sch['name'], 'schname')
@defer.inlineCallbacks
def test_findSchedulerId_existing(self):
id = yield self.db.schedulers.findSchedulerId('schname')
id2 = yield self.db.schedulers.findSchedulerId('schname')
self.assertEqual(id, id2)
def test_signature_setSchedulerMaster(self):
@self.assertArgSpecMatches(self.db.schedulers.setSchedulerMaster)
def setSchedulerMaster(self, schedulerid, masterid):
pass
@defer.inlineCallbacks
def test_setSchedulerMaster_fresh(self):
yield self.insertTestData([self.scheduler24, self.master13])
yield self.db.schedulers.setSchedulerMaster(24, 13)
sch = yield self.db.schedulers.getScheduler(24)
self.assertEqual(sch['masterid'], 13)
def test_setSchedulerMaster_inactive_but_linked(self):
d = self.insertTestData([
self.master13,
self.scheduler25, self.master14, self.scheduler25master,
])
d.addCallback(lambda _:
self.db.schedulers.setSchedulerMaster(25, 13))
self.assertFailure(d, schedulers.SchedulerAlreadyClaimedError)
return d
def test_setSchedulerMaster_inactive_but_linked_to_this_master(self):
d = self.insertTestData([
self.scheduler25, self.master14, self.scheduler25master,
])
d.addCallback(lambda _:
self.db.schedulers.setSchedulerMaster(25, 14))
return d
def test_setSchedulerMaster_active(self):
d = self.insertTestData([
self.scheduler24, self.master13, self.scheduler24master,
])
d.addCallback(lambda _:
self.db.schedulers.setSchedulerMaster(24, 14))
self.assertFailure(d, schedulers.SchedulerAlreadyClaimedError)
return d
@defer.inlineCallbacks
def test_setSchedulerMaster_None(self):
yield self.insertTestData([
self.scheduler25, self.master14, self.scheduler25master,
])
yield self.db.schedulers.setSchedulerMaster(25, None)
sch = yield self.db.schedulers.getScheduler(25)
self.assertEqual(sch['masterid'], None)
@defer.inlineCallbacks
def test_setSchedulerMaster_None_unowned(self):
yield self.insertTestData([self.scheduler25])
yield self.db.schedulers.setSchedulerMaster(25, None)
sch = yield self.db.schedulers.getScheduler(25)
self.assertEqual(sch['masterid'], None)
def test_signature_getScheduler(self):
@self.assertArgSpecMatches(self.db.schedulers.getScheduler)
def getScheduler(self, schedulerid):
pass
@defer.inlineCallbacks
def test_getScheduler(self):
yield self.insertTestData([self.scheduler24])
sch = yield self.db.schedulers.getScheduler(24)
validation.verifyDbDict(self, 'schedulerdict', sch)
self.assertEqual(sch, dict(
id=24,
name='schname',
enabled=True,
masterid=None))
@defer.inlineCallbacks
def test_getScheduler_missing(self):
sch = yield self.db.schedulers.getScheduler(24)
self.assertEqual(sch, None)
@defer.inlineCallbacks
def test_getScheduler_active(self):
yield self.insertTestData([self.scheduler24, self.master13,
self.scheduler24master])
sch = yield self.db.schedulers.getScheduler(24)
validation.verifyDbDict(self, 'schedulerdict', sch)
self.assertEqual(sch, dict(
id=24,
name='schname',
enabled=True,
masterid=13))
@defer.inlineCallbacks
def test_getScheduler_inactive_but_linked(self):
yield self.insertTestData([self.scheduler25, self.master14,
self.scheduler25master])
sch = yield self.db.schedulers.getScheduler(25)
validation.verifyDbDict(self, 'schedulerdict', sch)
self.assertEqual(sch, dict(
id=25,
name='schname2',
enabled=True,
masterid=14)) # row exists, but marked inactive
def test_signature_getSchedulers(self):
@self.assertArgSpecMatches(self.db.schedulers.getSchedulers)
def getSchedulers(self, active=None, masterid=None):
pass
@defer.inlineCallbacks
def test_getSchedulers(self):
yield self.insertTestData([
self.scheduler24, self.master13, self.scheduler24master,
self.scheduler25,
])
def schKey(sch):
return sch['id']
schlist = yield self.db.schedulers.getSchedulers()
[validation.verifyDbDict(self, 'schedulerdict', sch)
for sch in schlist]
self.assertEqual(sorted(schlist, key=schKey), sorted([
dict(id=24, name='schname', enabled=True, masterid=13),
dict(id=25, name='schname2', enabled=True, masterid=None),
], key=schKey))
@defer.inlineCallbacks
def test_getSchedulers_masterid(self):
yield self.insertTestData([
self.scheduler24, self.master13, self.scheduler24master,
self.scheduler25,
])
schlist = yield self.db.schedulers.getSchedulers(masterid=13)
[validation.verifyDbDict(self, 'schedulerdict', sch)
for sch in schlist]
self.assertEqual(sorted(schlist), sorted([
dict(id=24, name='schname', enabled=True, masterid=13),
]))
@defer.inlineCallbacks
def test_getSchedulers_active(self):
yield self.insertTestData([
self.scheduler24, self.master13, self.scheduler24master,
self.scheduler25
])
schlist = yield self.db.schedulers.getSchedulers(active=True)
[validation.verifyDbDict(self, 'schedulerdict', sch)
for sch in schlist]
self.assertEqual(sorted(schlist), sorted([
dict(id=24, name='schname', enabled=True, masterid=13),
]))
@defer.inlineCallbacks
def test_getSchedulers_active_masterid(self):
yield self.insertTestData([
self.scheduler24, self.master13, self.scheduler24master,
self.scheduler25
])
schlist = yield self.db.schedulers.getSchedulers(
active=True, masterid=13)
[validation.verifyDbDict(self, 'schedulerdict', sch)
for sch in schlist]
self.assertEqual(sorted(schlist), sorted([
dict(id=24, name='schname', enabled=True, masterid=13),
]))
schlist = yield self.db.schedulers.getSchedulers(
active=True, masterid=14)
[validation.verifyDbDict(self, 'schedulerdict', sch)
for sch in schlist]
self.assertEqual(sorted(schlist), [])
@defer.inlineCallbacks
def test_getSchedulers_inactive(self):
yield self.insertTestData([
self.scheduler24, self.master13, self.scheduler24master,
self.scheduler25
])
schlist = yield self.db.schedulers.getSchedulers(active=False)
[validation.verifyDbDict(self, 'schedulerdict', sch)
for sch in schlist]
self.assertEqual(sorted(schlist), sorted([
dict(id=25, name='schname2', enabled=True, masterid=None),
]))
@defer.inlineCallbacks
def test_getSchedulers_inactive_masterid(self):
yield self.insertTestData([
self.scheduler24, self.master13, self.scheduler24master,
self.scheduler25
])
schlist = yield self.db.schedulers.getSchedulers(
active=False, masterid=13)
[validation.verifyDbDict(self, 'schedulerdict', sch)
for sch in schlist]
self.assertEqual(sorted(schlist), [])
schlist = yield self.db.schedulers.getSchedulers(
active=False, masterid=14)
[validation.verifyDbDict(self, 'schedulerdict', sch)
for sch in schlist]
self.assertEqual(sorted(schlist), []) # always returns [] by spec!
class RealTests(Tests):
# tests that only "real" implementations will pass
pass
class TestFakeDB(unittest.TestCase, Tests):
def setUp(self):
self.master = fakemaster.make_master(testcase=self, wantDb=True)
self.db = self.master.db
self.db.checkForeignKeys = True
self.insertTestData = self.db.insertTestData
def addClassifications(self, schedulerid, *classifications):
self.db.schedulers.fakeClassifications(schedulerid,
dict(classifications))
return defer.succeed(None)
class TestRealDB(db.TestCase,
connector_component.ConnectorComponentMixin,
RealTests):
def setUp(self):
d = self.setUpConnectorComponent(
table_names=['changes', 'schedulers', 'masters',
'sourcestamps', 'patches', 'scheduler_masters',
'scheduler_changes'])
def finish_setup(_):
self.db.schedulers = \
schedulers.SchedulersConnectorComponent(self.db)
d.addCallback(finish_setup)
return d
def tearDown(self):
return self.tearDownConnectorComponent()
def addClassifications(self, schedulerid, *classifications):
def thd(conn):
q = self.db.model.scheduler_changes.insert()
conn.execute(q, [
dict(changeid=c[0], schedulerid=schedulerid, important=c[1])
for c in classifications])
return self.db.pool.do(thd)
| gpl-2.0 |
bottompawn/kbengine | kbe/src/lib/python/Lib/test/test_devpoll.py | 87 | 4637 | # Test case for the select.devpoll() function
# Initial tests are copied as is from "test_poll.py"
import os
import random
import select
import sys
import unittest
from test.support import TESTFN, run_unittest, cpython_only
if not hasattr(select, 'devpoll') :
raise unittest.SkipTest('test works only on Solaris OS family')
def find_ready_matching(ready, flag):
match = []
for fd, mode in ready:
if mode & flag:
match.append(fd)
return match
class DevPollTests(unittest.TestCase):
def test_devpoll1(self):
# Basic functional test of poll object
# Create a bunch of pipe and test that poll works with them.
p = select.devpoll()
NUM_PIPES = 12
MSG = b" This is a test."
MSG_LEN = len(MSG)
readers = []
writers = []
r2w = {}
w2r = {}
for i in range(NUM_PIPES):
rd, wr = os.pipe()
p.register(rd)
p.modify(rd, select.POLLIN)
p.register(wr, select.POLLOUT)
readers.append(rd)
writers.append(wr)
r2w[rd] = wr
w2r[wr] = rd
bufs = []
while writers:
ready = p.poll()
ready_writers = find_ready_matching(ready, select.POLLOUT)
if not ready_writers:
self.fail("no pipes ready for writing")
wr = random.choice(ready_writers)
os.write(wr, MSG)
ready = p.poll()
ready_readers = find_ready_matching(ready, select.POLLIN)
if not ready_readers:
self.fail("no pipes ready for reading")
self.assertEqual([w2r[wr]], ready_readers)
rd = ready_readers[0]
buf = os.read(rd, MSG_LEN)
self.assertEqual(len(buf), MSG_LEN)
bufs.append(buf)
os.close(r2w[rd]) ; os.close(rd)
p.unregister(r2w[rd])
p.unregister(rd)
writers.remove(r2w[rd])
self.assertEqual(bufs, [MSG] * NUM_PIPES)
def test_timeout_overflow(self):
pollster = select.devpoll()
w, r = os.pipe()
pollster.register(w)
pollster.poll(-1)
self.assertRaises(OverflowError, pollster.poll, -2)
self.assertRaises(OverflowError, pollster.poll, -1 << 31)
self.assertRaises(OverflowError, pollster.poll, -1 << 64)
pollster.poll(0)
pollster.poll(1)
pollster.poll(1 << 30)
self.assertRaises(OverflowError, pollster.poll, 1 << 31)
self.assertRaises(OverflowError, pollster.poll, 1 << 63)
self.assertRaises(OverflowError, pollster.poll, 1 << 64)
def test_close(self):
open_file = open(__file__, "rb")
self.addCleanup(open_file.close)
fd = open_file.fileno()
devpoll = select.devpoll()
# test fileno() method and closed attribute
self.assertIsInstance(devpoll.fileno(), int)
self.assertFalse(devpoll.closed)
# test close()
devpoll.close()
self.assertTrue(devpoll.closed)
self.assertRaises(ValueError, devpoll.fileno)
# close() can be called more than once
devpoll.close()
# operations must fail with ValueError("I/O operation on closed ...")
self.assertRaises(ValueError, devpoll.modify, fd, select.POLLIN)
self.assertRaises(ValueError, devpoll.poll)
self.assertRaises(ValueError, devpoll.register, fd, fd, select.POLLIN)
self.assertRaises(ValueError, devpoll.unregister, fd)
def test_fd_non_inheritable(self):
devpoll = select.devpoll()
self.addCleanup(devpoll.close)
self.assertEqual(os.get_inheritable(devpoll.fileno()), False)
def test_events_mask_overflow(self):
pollster = select.devpoll()
w, r = os.pipe()
pollster.register(w)
# Issue #17919
self.assertRaises(OverflowError, pollster.register, 0, -1)
self.assertRaises(OverflowError, pollster.register, 0, 1 << 64)
self.assertRaises(OverflowError, pollster.modify, 1, -1)
self.assertRaises(OverflowError, pollster.modify, 1, 1 << 64)
@cpython_only
def test_events_mask_overflow_c_limits(self):
from _testcapi import USHRT_MAX
pollster = select.devpoll()
w, r = os.pipe()
pollster.register(w)
# Issue #17919
self.assertRaises(OverflowError, pollster.register, 0, USHRT_MAX + 1)
self.assertRaises(OverflowError, pollster.modify, 1, USHRT_MAX + 1)
def test_main():
run_unittest(DevPollTests)
if __name__ == '__main__':
test_main()
| lgpl-3.0 |
chilitechno/barrioSquare | setup.py | 1 | 1178 | from py2deb import Py2deb
from glob import glob
version = "0.1.20"
p=Py2deb("barriosquare")
p.author="Chris J. Burris"
p.mail="chris@chlitechno.com"
p.description="Maemo application to access foursquare.com api functionality"
p["/opt/barrioSquare"] = ["barriosq.py|barriosq","barrioConfig.py","barrioStyles.py","get-location.py","oauth.py","oauthclient.py","loading.gif","loading2.gif","loading.html","refreshing.gif","friendsIcon.png","myInfoIcon.png","placesIcon.png","refreshIcon.png","searchIcon.png","settingsIcon.png","signOutIcon.png","CHANGELOG","README","LICENSE.txt","SignInFourSquare.png","powerbyfoursquare2.png","historyIcon.png",]
p["/usr/share/applications/hildon"] = ["barrioSquare.desktop",]
p["/usr/share/icons/hicolor/48x48/apps"] = ["barrioSquare.png",]
p["/usr/share/icons/hicolor/64x64/apps"] = ["barrioSquare64.png",]
p.url = "http://www.chilitechno.com/fster"
p.depends="python2.5, python-osso, python2.5-qt4-common, python2.5-qt4-core, python2.5-qt4-gui, python2.5-qt4-network, python2.5-qt4-webkit, python-location"
p.license="gpl"
p.arch="all"
p.section="net"
# p.postinstall="gtk-update-icon-cache -f /usr/share/icons/hicolor"
p.generate(version)
| gpl-3.0 |
luo66/scikit-learn | sklearn/isotonic.py | 206 | 12307 | # Authors: Fabian Pedregosa <fabian@fseoane.net>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Nelle Varoquaux <nelle.varoquaux@gmail.com>
# License: BSD 3 clause
import numpy as np
from scipy import interpolate
from scipy.stats import spearmanr
from .base import BaseEstimator, TransformerMixin, RegressorMixin
from .utils import as_float_array, check_array, check_consistent_length
from .utils.fixes import astype
from ._isotonic import _isotonic_regression, _make_unique
import warnings
import math
__all__ = ['check_increasing', 'isotonic_regression',
'IsotonicRegression']
def check_increasing(x, y):
"""Determine whether y is monotonically correlated with x.
y is found increasing or decreasing with respect to x based on a Spearman
correlation test.
Parameters
----------
x : array-like, shape=(n_samples,)
Training data.
y : array-like, shape=(n_samples,)
Training target.
Returns
-------
`increasing_bool` : boolean
Whether the relationship is increasing or decreasing.
Notes
-----
The Spearman correlation coefficient is estimated from the data, and the
sign of the resulting estimate is used as the result.
In the event that the 95% confidence interval based on Fisher transform
spans zero, a warning is raised.
References
----------
Fisher transformation. Wikipedia.
http://en.wikipedia.org/w/index.php?title=Fisher_transformation
"""
# Calculate Spearman rho estimate and set return accordingly.
rho, _ = spearmanr(x, y)
increasing_bool = rho >= 0
# Run Fisher transform to get the rho CI, but handle rho=+/-1
if rho not in [-1.0, 1.0]:
F = 0.5 * math.log((1. + rho) / (1. - rho))
F_se = 1 / math.sqrt(len(x) - 3)
# Use a 95% CI, i.e., +/-1.96 S.E.
# http://en.wikipedia.org/wiki/Fisher_transformation
rho_0 = math.tanh(F - 1.96 * F_se)
rho_1 = math.tanh(F + 1.96 * F_se)
# Warn if the CI spans zero.
if np.sign(rho_0) != np.sign(rho_1):
warnings.warn("Confidence interval of the Spearman "
"correlation coefficient spans zero. "
"Determination of ``increasing`` may be "
"suspect.")
return increasing_bool
def isotonic_regression(y, sample_weight=None, y_min=None, y_max=None,
increasing=True):
"""Solve the isotonic regression model::
min sum w[i] (y[i] - y_[i]) ** 2
subject to y_min = y_[1] <= y_[2] ... <= y_[n] = y_max
where:
- y[i] are inputs (real numbers)
- y_[i] are fitted
- w[i] are optional strictly positive weights (default to 1.0)
Read more in the :ref:`User Guide <isotonic>`.
Parameters
----------
y : iterable of floating-point values
The data.
sample_weight : iterable of floating-point values, optional, default: None
Weights on each point of the regression.
If None, weight is set to 1 (equal weights).
y_min : optional, default: None
If not None, set the lowest value of the fit to y_min.
y_max : optional, default: None
If not None, set the highest value of the fit to y_max.
increasing : boolean, optional, default: True
Whether to compute ``y_`` is increasing (if set to True) or decreasing
(if set to False)
Returns
-------
y_ : list of floating-point values
Isotonic fit of y.
References
----------
"Active set algorithms for isotonic regression; A unifying framework"
by Michael J. Best and Nilotpal Chakravarti, section 3.
"""
y = np.asarray(y, dtype=np.float)
if sample_weight is None:
sample_weight = np.ones(len(y), dtype=y.dtype)
else:
sample_weight = np.asarray(sample_weight, dtype=np.float)
if not increasing:
y = y[::-1]
sample_weight = sample_weight[::-1]
if y_min is not None or y_max is not None:
y = np.copy(y)
sample_weight = np.copy(sample_weight)
# upper bound on the cost function
C = np.dot(sample_weight, y * y) * 10
if y_min is not None:
y[0] = y_min
sample_weight[0] = C
if y_max is not None:
y[-1] = y_max
sample_weight[-1] = C
solution = np.empty(len(y))
y_ = _isotonic_regression(y, sample_weight, solution)
if increasing:
return y_
else:
return y_[::-1]
class IsotonicRegression(BaseEstimator, TransformerMixin, RegressorMixin):
"""Isotonic regression model.
The isotonic regression optimization problem is defined by::
min sum w_i (y[i] - y_[i]) ** 2
subject to y_[i] <= y_[j] whenever X[i] <= X[j]
and min(y_) = y_min, max(y_) = y_max
where:
- ``y[i]`` are inputs (real numbers)
- ``y_[i]`` are fitted
- ``X`` specifies the order.
If ``X`` is non-decreasing then ``y_`` is non-decreasing.
- ``w[i]`` are optional strictly positive weights (default to 1.0)
Read more in the :ref:`User Guide <isotonic>`.
Parameters
----------
y_min : optional, default: None
If not None, set the lowest value of the fit to y_min.
y_max : optional, default: None
If not None, set the highest value of the fit to y_max.
increasing : boolean or string, optional, default: True
If boolean, whether or not to fit the isotonic regression with y
increasing or decreasing.
The string value "auto" determines whether y should
increase or decrease based on the Spearman correlation estimate's
sign.
out_of_bounds : string, optional, default: "nan"
The ``out_of_bounds`` parameter handles how x-values outside of the
training domain are handled. When set to "nan", predicted y-values
will be NaN. When set to "clip", predicted y-values will be
set to the value corresponding to the nearest train interval endpoint.
When set to "raise", allow ``interp1d`` to throw ValueError.
Attributes
----------
X_ : ndarray (n_samples, )
A copy of the input X.
y_ : ndarray (n_samples, )
Isotonic fit of y.
X_min_ : float
Minimum value of input array `X_` for left bound.
X_max_ : float
Maximum value of input array `X_` for right bound.
f_ : function
The stepwise interpolating function that covers the domain `X_`.
Notes
-----
Ties are broken using the secondary method from Leeuw, 1977.
References
----------
Isotonic Median Regression: A Linear Programming Approach
Nilotpal Chakravarti
Mathematics of Operations Research
Vol. 14, No. 2 (May, 1989), pp. 303-308
Isotone Optimization in R : Pool-Adjacent-Violators
Algorithm (PAVA) and Active Set Methods
Leeuw, Hornik, Mair
Journal of Statistical Software 2009
Correctness of Kruskal's algorithms for monotone regression with ties
Leeuw, Psychometrica, 1977
"""
def __init__(self, y_min=None, y_max=None, increasing=True,
out_of_bounds='nan'):
self.y_min = y_min
self.y_max = y_max
self.increasing = increasing
self.out_of_bounds = out_of_bounds
def _check_fit_data(self, X, y, sample_weight=None):
if len(X.shape) != 1:
raise ValueError("X should be a 1d array")
def _build_f(self, X, y):
"""Build the f_ interp1d function."""
# Handle the out_of_bounds argument by setting bounds_error
if self.out_of_bounds not in ["raise", "nan", "clip"]:
raise ValueError("The argument ``out_of_bounds`` must be in "
"'nan', 'clip', 'raise'; got {0}"
.format(self.out_of_bounds))
bounds_error = self.out_of_bounds == "raise"
if len(y) == 1:
# single y, constant prediction
self.f_ = lambda x: y.repeat(x.shape)
else:
self.f_ = interpolate.interp1d(X, y, kind='slinear',
bounds_error=bounds_error)
def _build_y(self, X, y, sample_weight):
"""Build the y_ IsotonicRegression."""
check_consistent_length(X, y, sample_weight)
X, y = [check_array(x, ensure_2d=False) for x in [X, y]]
y = as_float_array(y)
self._check_fit_data(X, y, sample_weight)
# Determine increasing if auto-determination requested
if self.increasing == 'auto':
self.increasing_ = check_increasing(X, y)
else:
self.increasing_ = self.increasing
# If sample_weights is passed, removed zero-weight values and clean order
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
mask = sample_weight > 0
X, y, sample_weight = X[mask], y[mask], sample_weight[mask]
else:
sample_weight = np.ones(len(y))
order = np.lexsort((y, X))
order_inv = np.argsort(order)
X, y, sample_weight = [astype(array[order], np.float64, copy=False)
for array in [X, y, sample_weight]]
unique_X, unique_y, unique_sample_weight = _make_unique(X, y, sample_weight)
self.X_ = unique_X
self.y_ = isotonic_regression(unique_y, unique_sample_weight, self.y_min,
self.y_max, increasing=self.increasing_)
return order_inv
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape=(n_samples,)
Training data.
y : array-like, shape=(n_samples,)
Training target.
sample_weight : array-like, shape=(n_samples,), optional, default: None
Weights. If set to None, all weights will be set to 1 (equal
weights).
Returns
-------
self : object
Returns an instance of self.
Notes
-----
X is stored for future use, as `transform` needs X to interpolate
new input data.
"""
# Build y_
self._build_y(X, y, sample_weight)
# Handle the left and right bounds on X
self.X_min_ = np.min(self.X_)
self.X_max_ = np.max(self.X_)
# Build f_
self._build_f(self.X_, self.y_)
return self
def transform(self, T):
"""Transform new data by linear interpolation
Parameters
----------
T : array-like, shape=(n_samples,)
Data to transform.
Returns
-------
T_ : array, shape=(n_samples,)
The transformed data
"""
T = as_float_array(T)
if len(T.shape) != 1:
raise ValueError("Isotonic regression input should be a 1d array")
# Handle the out_of_bounds argument by clipping if needed
if self.out_of_bounds not in ["raise", "nan", "clip"]:
raise ValueError("The argument ``out_of_bounds`` must be in "
"'nan', 'clip', 'raise'; got {0}"
.format(self.out_of_bounds))
if self.out_of_bounds == "clip":
T = np.clip(T, self.X_min_, self.X_max_)
return self.f_(T)
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape=(n_samples,)
Data to transform.
Returns
-------
T_ : array, shape=(n_samples,)
Transformed data.
"""
return self.transform(T)
def __getstate__(self):
"""Pickle-protocol - return state of the estimator. """
# copy __dict__
state = dict(self.__dict__)
# remove interpolation method
state.pop('f_', None)
return state
def __setstate__(self, state):
"""Pickle-protocol - set state of the estimator.
We need to rebuild the interpolation function.
"""
self.__dict__.update(state)
self._build_f(self.X_, self.y_)
| bsd-3-clause |
dcroc16/skunk_works | google_appengine/lib/django-1.2/django/utils/dates.py | 120 | 1193 | "Commonly-used date structures"
from django.utils.translation import ugettext_lazy as _
WEEKDAYS = {
0:_('Monday'), 1:_('Tuesday'), 2:_('Wednesday'), 3:_('Thursday'), 4:_('Friday'),
5:_('Saturday'), 6:_('Sunday')
}
WEEKDAYS_ABBR = {
0:_('Mon'), 1:_('Tue'), 2:_('Wed'), 3:_('Thu'), 4:_('Fri'),
5:_('Sat'), 6:_('Sun')
}
WEEKDAYS_REV = {
'monday':0, 'tuesday':1, 'wednesday':2, 'thursday':3, 'friday':4,
'saturday':5, 'sunday':6
}
MONTHS = {
1:_('January'), 2:_('February'), 3:_('March'), 4:_('April'), 5:_('May'), 6:_('June'),
7:_('July'), 8:_('August'), 9:_('September'), 10:_('October'), 11:_('November'),
12:_('December')
}
MONTHS_3 = {
1:_('jan'), 2:_('feb'), 3:_('mar'), 4:_('apr'), 5:_('may'), 6:_('jun'),
7:_('jul'), 8:_('aug'), 9:_('sep'), 10:_('oct'), 11:_('nov'), 12:_('dec')
}
MONTHS_3_REV = {
'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6, 'jul':7, 'aug':8,
'sep':9, 'oct':10, 'nov':11, 'dec':12
}
MONTHS_AP = { # month names in Associated Press style
1:_('Jan.'), 2:_('Feb.'), 3:_('March'), 4:_('April'), 5:_('May'), 6:_('June'), 7:_('July'),
8:_('Aug.'), 9:_('Sept.'), 10:_('Oct.'), 11:_('Nov.'), 12:_('Dec.')
}
| mit |
loulich/Couchpotato | libs/subliminal/services/subswiki.py | 105 | 5182 | # -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from . import ServiceBase
from ..exceptions import ServiceError
from ..language import language_set, Language
from ..subtitles import get_subtitle_path, ResultSubtitle
from ..utils import get_keywords, split_keyword
from ..videos import Episode, Movie
from bs4 import BeautifulSoup
import logging
import urllib
logger = logging.getLogger(__name__)
class SubsWiki(ServiceBase):
server_url = 'http://www.subswiki.com'
api_based = False
languages = language_set(['eng-US', 'eng-GB', 'eng', 'fre', 'por-BR', 'por', 'spa-ES', u'spa', u'ita', u'cat'])
language_map = {u'Español': Language('spa'), u'Español (España)': Language('spa'), u'Español (Latinoamérica)': Language('spa'),
u'Català': Language('cat'), u'Brazilian': Language('por-BR'), u'English (US)': Language('eng-US'),
u'English (UK)': Language('eng-GB')}
language_code = 'name'
videos = [Episode, Movie]
require_video = False
required_features = ['permissive']
def list_checked(self, video, languages):
results = []
if isinstance(video, Episode):
results = self.query(video.path or video.release, languages, get_keywords(video.guess), series=video.series, season=video.season, episode=video.episode)
elif isinstance(video, Movie) and video.year:
results = self.query(video.path or video.release, languages, get_keywords(video.guess), movie=video.title, year=video.year)
return results
def query(self, filepath, languages, keywords=None, series=None, season=None, episode=None, movie=None, year=None):
if series and season and episode:
request_series = series.lower().replace(' ', '_')
if isinstance(request_series, unicode):
request_series = request_series.encode('utf-8')
logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
r = self.session.get('%s/serie/%s/%s/%s/' % (self.server_url, urllib.quote(request_series), season, episode))
if r.status_code == 404:
logger.debug(u'Could not find subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
return []
elif movie and year:
request_movie = movie.title().replace(' ', '_')
if isinstance(request_movie, unicode):
request_movie = request_movie.encode('utf-8')
logger.debug(u'Getting subtitles for %s (%d) with languages %r' % (movie, year, languages))
r = self.session.get('%s/film/%s_(%d)' % (self.server_url, urllib.quote(request_movie), year))
if r.status_code == 404:
logger.debug(u'Could not find subtitles for %s (%d) with languages %r' % (movie, year, languages))
return []
else:
raise ServiceError('One or more parameter missing')
if r.status_code != 200:
logger.error(u'Request %s returned status code %d' % (r.url, r.status_code))
return []
soup = BeautifulSoup(r.content, self.required_features)
subtitles = []
for sub in soup('td', {'class': 'NewsTitle'}):
sub_keywords = split_keyword(sub.b.string.lower())
if not keywords & sub_keywords:
logger.debug(u'None of subtitle keywords %r in %r' % (sub_keywords, keywords))
continue
for html_language in sub.parent.parent.find_all('td', {'class': 'language'}):
language = self.get_language(html_language.string.strip())
if language not in languages:
logger.debug(u'Language %r not in wanted languages %r' % (language, languages))
continue
html_status = html_language.find_next_sibling('td')
status = html_status.strong.string.strip()
if status != 'Completado':
logger.debug(u'Wrong subtitle status %s' % status)
continue
path = get_subtitle_path(filepath, language, self.config.multi)
subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), '%s%s' % (self.server_url, html_status.find_next('td').find('a')['href']))
subtitles.append(subtitle)
return subtitles
Service = SubsWiki
| gpl-3.0 |
nishad-jobsglobal/odoo-marriot | addons/stock_account/wizard/stock_return_picking.py | 342 | 2715 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class stock_return_picking(osv.osv_memory):
_inherit = 'stock.return.picking'
_columns = {
'invoice_state': fields.selection([('2binvoiced', 'To be refunded/invoiced'), ('none', 'No invoicing')], 'Invoicing',required=True),
}
def default_get(self, cr, uid, fields, context=None):
res = super(stock_return_picking, self).default_get(cr, uid, fields, context=context)
record_id = context and context.get('active_id', False) or False
pick_obj = self.pool.get('stock.picking')
pick = pick_obj.browse(cr, uid, record_id, context=context)
if pick:
if 'invoice_state' in fields:
if pick.invoice_state=='invoiced':
res.update({'invoice_state': '2binvoiced'})
else:
res.update({'invoice_state': 'none'})
return res
def _create_returns(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.browse(cr, uid, ids[0], context=context)
new_picking, picking_type_id = super(stock_return_picking, self)._create_returns(cr, uid, ids, context=context)
if data.invoice_state == '2binvoiced':
pick_obj = self.pool.get("stock.picking")
move_obj = self.pool.get("stock.move")
move_ids = [x.id for x in pick_obj.browse(cr, uid, new_picking, context=context).move_lines]
move_obj.write(cr, uid, move_ids, {'invoice_state': '2binvoiced'})
return new_picking, picking_type_id
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
skiz/warzone2100 | tools/tile_texpage2mipmap_tileset/texpage2mipmap.py | 13 | 12883 | #!/usr/bin/env python
""" Splits and scales tertile texpages into mipmaps.
Finds and filters files based on the 'extension' configuration setting, and
of those, groups where available to allow mipmap output to be created from
as few as one input texpage and as many texpages as there are output
resolutions.
Filenames are grouped together if, after all cased letters are lowered and
file extensions are stripped, they have identical names. if, after the
dotted is stripped they still have at least one period in the filename, and
only numeric digits proceed that period, then all text up to, but not
including the last remaining period is used in the comparison. Thus, the
following filenames are grouped together to represent different resolutions
of the same conceptual texpage:
tertilec1hw.tga
tertilec1hw.53.pcx
tertilec1hw.128.pcx
However, 'tertilec1hw.a23.pcx' will create a new 'tertilec1hw.a23' group.
Files are handled on a first-come basis, if two files that are considered
to be part of the same group also have the same resolution, the first one
found will be used, and all subsequent ones with the same resolution in
that group will be discarded. Since this behavior is unpredictable and
operating-system-dependent, it should not be relied upon, and the user
should take care to have have no more than one texpage of each resolution
per group.
"""
__version__ = "1.2"
__author__ = "Kevin Gillette"
# --------------------------------------------------------------------------
# texpage_convert.py v1.2 by Kevin Gillette (kage)
# --------------------------------------------------------------------------
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
# --------------------------------------------------------------------------
import os, sys, shutil
from subprocess import Popen, PIPE
def ds_ppm_parser(file):
tokens = list()
gen = iter(file)
for line in gen:
comment = line.find('#')
if comment != -1:
line = line[:comment]
tokens.extend(line.split())
if len(tokens) >= 4: break
else:
raise RuntimeError('Invalid PPM file')
header = [tokens[0]] + map(int, tokens[1:4])
yield header
del tokens[:4]
magicnum, w, h, maxval = header
magicnum = magicnum.lower()
if magicnum == "p3":
while True:
while len(tokens) >= 3:
yield map(int, tokens[:3])
del tokens[:3]
try:
tokens.extend(gen.next().split())
except StopIteration:
break
elif magicnum == "p6":
if len(tokens) < 1:
tokens = list(gen)
if maxval < 256: chunksize = 1
else: chunksize = 2
raster, tokens = ''.join(tokens), list()
for i in xrange(0, len(raster), chunksize):
num = ord(raster[i])
if chunksize > 1: num = (num << 8) + ord(raster[i + 1])
tokens.append(num)
if len(tokens) >= 3:
yield tokens[:3]
del tokens[:3]
else:
raise RuntimeError('Parser can only handle P3 or P6 files')
def dump_to_radar(gen, outfile):
gen.next()
for rgb in gen:
rgb = [hex(int(c)).split('x')[-1].zfill(2) for c in rgb]
outfile.write(''.join(rgb) + '\n')
def handle_conf(iterable, config):
""" Simple configuration loader.
When provided with an iterable datatype, be it a file object (from
which it will extract one directive per line), or arguments from the
command line (each distinct "word" will represent a full directive),
each token will either be ignored (blank lines and comments), unset a
directive (nothing after the equal-sign), or set a directive.
Unsetting a directive will cause scripted defaults to be used, and not
setting a given directive will have the same effect.
"""
for text in iterable:
text = text.lstrip().rstrip('\n')
if text.startswith('#'): continue
parts = text.split('=', 1)
if len(parts) == 2:
parts[0] = parts[0].rstrip()
if parts[1]:
print 'setting "%s" to "%s"' % tuple(parts)
config[parts[0]] = parts[1]
else:
print 'defaulting "%s"' % parts[0]
del config[parts[0]]
def makedir(exportpath, dirname):
""" Given a basename, create a directory in exportpath
If the directory already exists, remove all png's contained therein.
If it exists but is not a directory, rename the file to something
similar but unused. If no exception is raised, then a directory by
the specified name will have been created and its absolute path will
have been returned as a string.
"""
dirpath = os.path.join(exportpath, dirname)
if os.path.exists(dirpath):
if os.path.isdir(dirpath):
print dirname, "already exists: removing contained png files"
for fn in os.listdir(dirpath):
if fn.lower().endswith('.png'): os.remove(os.path.join(dirpath, fn))
return dirpath
else:
count = 0
while os.path.exists("%s.%i" % (dirpath, count)):
count += 1
new_path = "%s.%i" % (dirpath, count)
shutil.move(dirpath, new_path)
print "error:", dirname, "is not a directory. moving file to", new_path
print "creating directory:", dirname
os.mkdir(dirpath)
return dirpath
def nearest_resolution(initial_index, arr):
""" Find the best resolution from which to scale.
initial_index - the position in the global variable 'resolutions' of
the desired output resolution.
arr - same length as 'resolutions' and contains only True and False
values. True represents a full-quality texpage for use in scaling,
while False represents resolutions for which scaling will be
needed to generate.
Resolutions greater than the desired one are always favored above
smaller resolutions, with priority being given to resolutions closer
to the desired output resolution.
"""
for i in range(initial_index + 1, len(arr)):
if arr[i]: return (i, True)
seq = range(0, initial_index)
seq.reverse()
for i in seq:
if arr[i]: return (i, False)
assert False, "should always have at least one input resolution"
def process():
is_windows = use_shell = os.name is 'nt' or sys.platform is 'win32'
conf = dict()
scriptloc = os.path.abspath(os.path.dirname(sys.argv[0]))
f = os.path.splitext(os.path.basename(sys.argv[0]))[0] + ".conf"
f = os.path.join(scriptloc, f)
if os.path.exists(f):
print "reading config data from", f
handle_conf(open(f, "rU"), conf)
print
print "scanning script arguments"
handle_conf(sys.argv[1:], conf)
print
log = conf.get('log')
if log:
print "using log:", log
sys.stdout = sys.stderr = open(log, 'wt')
del log
impath = conf.get('imagemagick-path', '')
identify_path = os.path.join(impath, 'identify')
convert_path = os.path.join(impath, 'convert')
exportpath = os.path.abspath(conf.get('export-path', '.'))
importpath = os.path.abspath(conf.get('import-path', '.'))
dir_contents = os.listdir(importpath)
extensions = conf.get('extensions', '.png .pcx .tga').lower().split()
try:
resolutions = set(map(int, conf.get('resolutions', '16 32 64 128').split()))
except ValueError:
sys.exit("error: the 'resolutions' directive in", f, "must contain only base-ten integers")
try:
columns = int(conf.get('columns', 9))
except ValueError:
sys.exit("error: the 'columns' directive in", f, "must contain only base-ten integers")
resolutions = list(resolutions)
resolutions.sort()
filter_increase = conf.get('filter-increase')
if filter_increase == 'default': filter_increase = None
filter_decrease = conf.get('filter-decrease')
if filter_decrease == 'default': filter_decrease = None
names = dict()
for f in dir_contents:
name, ext = os.path.splitext(f.lower())
if ext not in extensions: continue
fpath = os.path.join(importpath, f)
print "fpath:", fpath
pieces = name.rsplit('.', 1)
is_radar = False
if len(pieces) == 2:
if pieces[1].isdigit():
name = pieces[0]
if pieces[1] == 'radar':
is_radar = True
name = pieces[0]
args = [identify_path, '-format', '%w %h', fpath]
p = Popen(args, stdout=PIPE, stderr=PIPE, shell=use_shell)
o = p.communicate()
if p.returncode:
print "args for identify:"
print args
print "ignoring %s: %s" % (f, o[1])
continue
w, h = map(int, o[0].split())
if is_radar:
if w != columns:
print "ignoring %s: radar image is not", columns, "pixels wide." % f
continue
res = names.setdefault(name, [False] * (len(resolutions) + 1) + [h])
if res[-2]:
print "ignoring %s: radar image already found for %s" % (f, name)
continue
if res[-1] != h:
print "ignoring %s: group has %i tile rows. this has %i rows" % \
(f, res[-1], h)
continue
args = [convert_path, fpath, '-depth', '8', 'ppm:-']
p = Popen(args, stdout=PIPE, stderr=PIPE, shell=use_shell)
out = open(os.path.join(exportpath, name + '.radar'), 'wb')
dump_to_radar(ds_ppm_parser(p.stdout), out)
out.close()
if p.wait():
print "args for convert:"
print args
sys.exit("error while running convert on %s: %s" % \
(f, p.stderr.read()))
res[-2] = True
print "using %s to generate the radar file for %s" % (f, name)
continue
tilesize, extra = divmod(w, columns)
print f + ": tiles determined to be", tilesize, "pixels per dimension"
if tilesize not in resolutions:
print "ignoring %s: does not use one of the listed tile resolutions" % f
continue
fixed_w = tilesize * columns
rows, extra = divmod(h, tilesize)
fixed_h = tilesize * rows
if fixed_h != h or fixed_w != w:
print "ignoring %s: expected dimensions of %ix%i, but found %ix%i" % \
(f, fixed_w, fixed_h, w, h)
continue
index = resolutions.index(tilesize)
res = names.setdefault(name, [False] * (len(resolutions) + 1) + [rows])
if rows != res[-1]:
print "ignoring %s: group has %i tile rows. this has %i rows" % \
(f, res[-1], h)
continue
if res[index]:
print "ignoring %s: resolution already filled" % f
continue
dirpath = makedir(exportpath, "%s-%i" % (name, tilesize))
print "splitting tiles from", f, "into", dirpath, "at", tilesize, "resolution"
args = [convert_path, '-depth', '8']
args.extend(['-crop', '%ix%i' % (tilesize, tilesize)])
args.extend([fpath, os.path.join(dirpath, 'tile-%02d.png')])
p = Popen(args, stderr=PIPE, shell=use_shell)
o = p.communicate()
if p.returncode:
print "args for convert:"
print args
exit("error while running convert on %s: %s" % (f, o[1]))
res[index] = True
def filesortkey(name):
try:
return int(name[name.find('-') + 1:name.find('.')])
except ValueError:
return -1
for name, levels in names.iteritems():
for i, res in enumerate(resolutions):
if not True in levels[:-2]: continue
if levels[i]:
if levels[-2]: continue
out = open(os.path.join(exportpath, name + ".radar"), 'wb')
args = [convert_path, None, '-sample', '1x1!', '-depth', '8', 'ppm:-']
dirpath = os.path.join(exportpath, "%s-%i" % (name, res))
files = os.listdir(dirpath)
files.sort(key=filesortkey)
print "generating radar file from files in", dirpath
for f in files:
if not f.endswith('.png'): continue
args[1] = os.path.join(dirpath, f)
p = Popen(args, stdout=PIPE, stderr=PIPE, shell=use_shell)
dump_to_radar(ds_ppm_parser(p.stdout), out)
if p.wait():
print "args for convert:"
print args
sys.exit("error while running convert on %s: %s" % \
(f, p.stderr.read()))
out.close()
levels[-2] = True
continue
dirpath = makedir(exportpath, "%s-%i" % (name, res))
index, scale_down = nearest_resolution(i, levels[:-2])
input_res = resolutions[index]
input_dirpath = os.path.join(exportpath, "%s-%i" % (name, input_res))
print "resizing tiles from", input_dirpath, "to", dirpath
for f in os.listdir(input_dirpath):
if not f.endswith('.png'): continue
args = [convert_path, os.path.join(input_dirpath, f)]
if scale_down:
if filter_decrease:
args.extend(['-filter', filter_decrease])
elif filter_increase:
args.extend(['-filter', filter_increase])
args.extend(['-resize', "%ix%i!" % (res, res)])
args.append(os.path.join(dirpath, f))
p = Popen(args, stdout=PIPE, stderr=PIPE, shell=use_shell)
if p.wait():
print "args for convert:"
print args
sys.exit("error while running convert on %s: %s" % \
(f, p.stderr.read()))
if __name__ == '__main__':
process()
| gpl-2.0 |
ToonTownInfiniteRepo/ToontownInfinite | Panda3D-1.9.0/python/Lib/encodings/iso8859_4.py | 593 | 13632 | """ Python Character Mapping Codec iso8859_4 generated from 'MAPPINGS/ISO8859/8859-4.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-4',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u0138' # 0xA2 -> LATIN SMALL LETTER KRA
u'\u0156' # 0xA3 -> LATIN CAPITAL LETTER R WITH CEDILLA
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE
u'\u013b' # 0xA6 -> LATIN CAPITAL LETTER L WITH CEDILLA
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON
u'\u0112' # 0xAA -> LATIN CAPITAL LETTER E WITH MACRON
u'\u0122' # 0xAB -> LATIN CAPITAL LETTER G WITH CEDILLA
u'\u0166' # 0xAC -> LATIN CAPITAL LETTER T WITH STROKE
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
u'\u02db' # 0xB2 -> OGONEK
u'\u0157' # 0xB3 -> LATIN SMALL LETTER R WITH CEDILLA
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE
u'\u013c' # 0xB6 -> LATIN SMALL LETTER L WITH CEDILLA
u'\u02c7' # 0xB7 -> CARON
u'\xb8' # 0xB8 -> CEDILLA
u'\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON
u'\u0113' # 0xBA -> LATIN SMALL LETTER E WITH MACRON
u'\u0123' # 0xBB -> LATIN SMALL LETTER G WITH CEDILLA
u'\u0167' # 0xBC -> LATIN SMALL LETTER T WITH STROKE
u'\u014a' # 0xBD -> LATIN CAPITAL LETTER ENG
u'\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON
u'\u014b' # 0xBF -> LATIN SMALL LETTER ENG
u'\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\u012a' # 0xCF -> LATIN CAPITAL LETTER I WITH MACRON
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA
u'\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON
u'\u0136' # 0xD3 -> LATIN CAPITAL LETTER K WITH CEDILLA
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u0168' # 0xDD -> LATIN CAPITAL LETTER U WITH TILDE
u'\u016a' # 0xDE -> LATIN CAPITAL LETTER U WITH MACRON
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\u012b' # 0xEF -> LATIN SMALL LETTER I WITH MACRON
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA
u'\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON
u'\u0137' # 0xF3 -> LATIN SMALL LETTER K WITH CEDILLA
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u0169' # 0xFD -> LATIN SMALL LETTER U WITH TILDE
u'\u016b' # 0xFE -> LATIN SMALL LETTER U WITH MACRON
u'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
fkakuma/ryu | ryu/tests/unit/packet/test_ipv6.py | 23 | 36834 | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import logging
import inspect
import six
import struct
from nose.tools import *
from ryu.lib import addrconv
from ryu.lib import ip
from ryu.lib.packet import ipv6
LOG = logging.getLogger(__name__)
class Test_ipv6(unittest.TestCase):
def setUp(self):
self.version = 6
self.traffic_class = 0
self.flow_label = 0
self.payload_length = 817
self.nxt = 6
self.hop_limit = 128
self.src = '2002:4637:d5d3::4637:d5d3'
self.dst = '2001:4860:0:2001::68'
self.ext_hdrs = []
self.ip = ipv6.ipv6(
self.version, self.traffic_class, self.flow_label,
self.payload_length, self.nxt, self.hop_limit, self.src,
self.dst, self.ext_hdrs)
self.v_tc_flow = (
self.version << 28 | self.traffic_class << 20 |
self.flow_label << 12)
self.buf = struct.pack(
ipv6.ipv6._PACK_STR, self.v_tc_flow,
self.payload_length, self.nxt, self.hop_limit,
addrconv.ipv6.text_to_bin(self.src),
addrconv.ipv6.text_to_bin(self.dst))
def setUp_with_hop_opts(self):
self.opt1_type = 5
self.opt1_len = 2
self.opt1_data = b'\x00\x00'
self.opt2_type = 1
self.opt2_len = 0
self.opt2_data = None
self.options = [
ipv6.option(self.opt1_type, self.opt1_len, self.opt1_data),
ipv6.option(self.opt2_type, self.opt2_len, self.opt2_data),
]
self.hop_opts_nxt = 6
self.hop_opts_size = 0
self.hop_opts = ipv6.hop_opts(
self.hop_opts_nxt, self.hop_opts_size, self.options)
self.ext_hdrs = [self.hop_opts]
self.payload_length += len(self.hop_opts)
self.nxt = ipv6.hop_opts.TYPE
self.ip = ipv6.ipv6(
self.version, self.traffic_class, self.flow_label,
self.payload_length, self.nxt, self.hop_limit, self.src,
self.dst, self.ext_hdrs)
self.buf = struct.pack(
ipv6.ipv6._PACK_STR, self.v_tc_flow,
self.payload_length, self.nxt, self.hop_limit,
addrconv.ipv6.text_to_bin(self.src),
addrconv.ipv6.text_to_bin(self.dst))
self.buf += self.hop_opts.serialize()
def setUp_with_dst_opts(self):
self.opt1_type = 5
self.opt1_len = 2
self.opt1_data = b'\x00\x00'
self.opt2_type = 1
self.opt2_len = 0
self.opt2_data = None
self.options = [
ipv6.option(self.opt1_type, self.opt1_len, self.opt1_data),
ipv6.option(self.opt2_type, self.opt2_len, self.opt2_data),
]
self.dst_opts_nxt = 6
self.dst_opts_size = 0
self.dst_opts = ipv6.dst_opts(
self.dst_opts_nxt, self.dst_opts_size, self.options)
self.ext_hdrs = [self.dst_opts]
self.payload_length += len(self.dst_opts)
self.nxt = ipv6.dst_opts.TYPE
self.ip = ipv6.ipv6(
self.version, self.traffic_class, self.flow_label,
self.payload_length, self.nxt, self.hop_limit, self.src,
self.dst, self.ext_hdrs)
self.buf = struct.pack(
ipv6.ipv6._PACK_STR, self.v_tc_flow,
self.payload_length, self.nxt, self.hop_limit,
addrconv.ipv6.text_to_bin(self.src),
addrconv.ipv6.text_to_bin(self.dst))
self.buf += self.dst_opts.serialize()
def setUp_with_routing_type3(self):
self.routing_nxt = 6
self.routing_size = 6
self.routing_type = 3
self.routing_seg = 2
self.routing_cmpi = 0
self.routing_cmpe = 0
self.routing_adrs = ["2001:db8:dead::1", "2001:db8:dead::2",
"2001:db8:dead::3"]
self.routing = ipv6.routing_type3(
self.routing_nxt, self.routing_size,
self.routing_type, self.routing_seg,
self.routing_cmpi, self.routing_cmpe,
self.routing_adrs)
self.ext_hdrs = [self.routing]
self.payload_length += len(self.routing)
self.nxt = ipv6.routing.TYPE
self.ip = ipv6.ipv6(
self.version, self.traffic_class, self.flow_label,
self.payload_length, self.nxt, self.hop_limit, self.src,
self.dst, self.ext_hdrs)
self.buf = struct.pack(
ipv6.ipv6._PACK_STR, self.v_tc_flow,
self.payload_length, self.nxt, self.hop_limit,
addrconv.ipv6.text_to_bin(self.src),
addrconv.ipv6.text_to_bin(self.dst))
self.buf += self.routing.serialize()
def setUp_with_fragment(self):
self.fragment_nxt = 6
self.fragment_offset = 50
self.fragment_more = 1
self.fragment_id = 123
self.fragment = ipv6.fragment(
self.fragment_nxt, self.fragment_offset, self.fragment_more,
self.fragment_id)
self.ext_hdrs = [self.fragment]
self.payload_length += len(self.fragment)
self.nxt = ipv6.fragment.TYPE
self.ip = ipv6.ipv6(
self.version, self.traffic_class, self.flow_label,
self.payload_length, self.nxt, self.hop_limit, self.src,
self.dst, self.ext_hdrs)
self.buf = struct.pack(
ipv6.ipv6._PACK_STR, self.v_tc_flow,
self.payload_length, self.nxt, self.hop_limit,
addrconv.ipv6.text_to_bin(self.src),
addrconv.ipv6.text_to_bin(self.dst))
self.buf += self.fragment.serialize()
def setUp_with_auth(self):
self.auth_nxt = 6
self.auth_size = 4
self.auth_spi = 256
self.auth_seq = 1
self.auth_data = b'\xa0\xe7\xf8\xab\xf9\x69\x1a\x8b\xf3\x9f\x7c\xae'
self.auth = ipv6.auth(
self.auth_nxt, self.auth_size, self.auth_spi, self.auth_seq,
self.auth_data)
self.ext_hdrs = [self.auth]
self.payload_length += len(self.auth)
self.nxt = ipv6.auth.TYPE
self.ip = ipv6.ipv6(
self.version, self.traffic_class, self.flow_label,
self.payload_length, self.nxt, self.hop_limit, self.src,
self.dst, self.ext_hdrs)
self.buf = struct.pack(
ipv6.ipv6._PACK_STR, self.v_tc_flow,
self.payload_length, self.nxt, self.hop_limit,
addrconv.ipv6.text_to_bin(self.src),
addrconv.ipv6.text_to_bin(self.dst))
self.buf += self.auth.serialize()
def setUp_with_multi_headers(self):
self.opt1_type = 5
self.opt1_len = 2
self.opt1_data = b'\x00\x00'
self.opt2_type = 1
self.opt2_len = 0
self.opt2_data = None
self.options = [
ipv6.option(self.opt1_type, self.opt1_len, self.opt1_data),
ipv6.option(self.opt2_type, self.opt2_len, self.opt2_data),
]
self.hop_opts_nxt = ipv6.auth.TYPE
self.hop_opts_size = 0
self.hop_opts = ipv6.hop_opts(
self.hop_opts_nxt, self.hop_opts_size, self.options)
self.auth_nxt = 6
self.auth_size = 4
self.auth_spi = 256
self.auth_seq = 1
self.auth_data = b'\xa0\xe7\xf8\xab\xf9\x69\x1a\x8b\xf3\x9f\x7c\xae'
self.auth = ipv6.auth(
self.auth_nxt, self.auth_size, self.auth_spi, self.auth_seq,
self.auth_data)
self.ext_hdrs = [self.hop_opts, self.auth]
self.payload_length += len(self.hop_opts) + len(self.auth)
self.nxt = ipv6.hop_opts.TYPE
self.ip = ipv6.ipv6(
self.version, self.traffic_class, self.flow_label,
self.payload_length, self.nxt, self.hop_limit, self.src,
self.dst, self.ext_hdrs)
self.buf = struct.pack(
ipv6.ipv6._PACK_STR, self.v_tc_flow,
self.payload_length, self.nxt, self.hop_limit,
addrconv.ipv6.text_to_bin(self.src),
addrconv.ipv6.text_to_bin(self.dst))
self.buf += self.hop_opts.serialize()
self.buf += self.auth.serialize()
def tearDown(self):
pass
def test_init(self):
eq_(self.version, self.ip.version)
eq_(self.traffic_class, self.ip.traffic_class)
eq_(self.flow_label, self.ip.flow_label)
eq_(self.payload_length, self.ip.payload_length)
eq_(self.nxt, self.ip.nxt)
eq_(self.hop_limit, self.ip.hop_limit)
eq_(self.src, self.ip.src)
eq_(self.dst, self.ip.dst)
eq_(str(self.ext_hdrs), str(self.ip.ext_hdrs))
def test_init_with_hop_opts(self):
self.setUp_with_hop_opts()
self.test_init()
def test_init_with_dst_opts(self):
self.setUp_with_dst_opts()
self.test_init()
def test_init_with_routing_type3(self):
self.setUp_with_routing_type3()
self.test_init()
def test_init_with_fragment(self):
self.setUp_with_fragment()
self.test_init()
def test_init_with_auth(self):
self.setUp_with_auth()
self.test_init()
def test_init_with_multi_headers(self):
self.setUp_with_multi_headers()
self.test_init()
def test_parser(self):
_res = self.ip.parser(six.binary_type(self.buf))
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.version, res.version)
eq_(self.traffic_class, res.traffic_class)
eq_(self.flow_label, res.flow_label)
eq_(self.payload_length, res.payload_length)
eq_(self.nxt, res.nxt)
eq_(self.hop_limit, res.hop_limit)
eq_(self.src, res.src)
eq_(self.dst, res.dst)
eq_(str(self.ext_hdrs), str(res.ext_hdrs))
def test_parser_with_hop_opts(self):
self.setUp_with_hop_opts()
self.test_parser()
def test_parser_with_dst_opts(self):
self.setUp_with_dst_opts()
self.test_parser()
def test_parser_with_routing_type3(self):
self.setUp_with_routing_type3()
self.test_parser()
def test_parser_with_fragment(self):
self.setUp_with_fragment()
self.test_parser()
def test_parser_with_auth(self):
self.setUp_with_auth()
self.test_parser()
def test_parser_with_multi_headers(self):
self.setUp_with_multi_headers()
self.test_parser()
def test_serialize(self):
data = bytearray()
prev = None
buf = self.ip.serialize(data, prev)
res = struct.unpack_from(ipv6.ipv6._PACK_STR, six.binary_type(buf))
eq_(self.v_tc_flow, res[0])
eq_(self.payload_length, res[1])
eq_(self.nxt, res[2])
eq_(self.hop_limit, res[3])
eq_(self.src, addrconv.ipv6.bin_to_text(res[4]))
eq_(self.dst, addrconv.ipv6.bin_to_text(res[5]))
def test_serialize_with_hop_opts(self):
self.setUp_with_hop_opts()
self.test_serialize()
data = bytearray()
prev = None
buf = self.ip.serialize(data, prev)
hop_opts = ipv6.hop_opts.parser(six.binary_type(buf[ipv6.ipv6._MIN_LEN:]))
eq_(repr(self.hop_opts), repr(hop_opts))
def test_serialize_with_dst_opts(self):
self.setUp_with_dst_opts()
self.test_serialize()
data = bytearray()
prev = None
buf = self.ip.serialize(data, prev)
dst_opts = ipv6.dst_opts.parser(six.binary_type(buf[ipv6.ipv6._MIN_LEN:]))
eq_(repr(self.dst_opts), repr(dst_opts))
def test_serialize_with_routing_type3(self):
self.setUp_with_routing_type3()
self.test_serialize()
data = bytearray()
prev = None
buf = self.ip.serialize(data, prev)
routing = ipv6.routing.parser(six.binary_type(buf[ipv6.ipv6._MIN_LEN:]))
eq_(repr(self.routing), repr(routing))
def test_serialize_with_fragment(self):
self.setUp_with_fragment()
self.test_serialize()
data = bytearray()
prev = None
buf = self.ip.serialize(data, prev)
fragment = ipv6.fragment.parser(six.binary_type(buf[ipv6.ipv6._MIN_LEN:]))
eq_(repr(self.fragment), repr(fragment))
def test_serialize_with_auth(self):
self.setUp_with_auth()
self.test_serialize()
data = bytearray()
prev = None
buf = self.ip.serialize(data, prev)
auth = ipv6.auth.parser(six.binary_type(buf[ipv6.ipv6._MIN_LEN:]))
eq_(repr(self.auth), repr(auth))
def test_serialize_with_multi_headers(self):
self.setUp_with_multi_headers()
self.test_serialize()
data = bytearray()
prev = None
buf = self.ip.serialize(data, prev)
offset = ipv6.ipv6._MIN_LEN
hop_opts = ipv6.hop_opts.parser(six.binary_type(buf[offset:]))
offset += len(hop_opts)
auth = ipv6.auth.parser(six.binary_type(buf[offset:]))
eq_(repr(self.hop_opts), repr(hop_opts))
eq_(repr(self.auth), repr(auth))
def test_to_string(self):
ipv6_values = {'version': self.version,
'traffic_class': self.traffic_class,
'flow_label': self.flow_label,
'payload_length': self.payload_length,
'nxt': self.nxt,
'hop_limit': self.hop_limit,
'src': repr(self.src),
'dst': repr(self.dst),
'ext_hdrs': self.ext_hdrs}
_ipv6_str = ','.join(['%s=%s' % (k, ipv6_values[k])
for k, v in inspect.getmembers(self.ip)
if k in ipv6_values])
ipv6_str = '%s(%s)' % (ipv6.ipv6.__name__, _ipv6_str)
eq_(str(self.ip), ipv6_str)
eq_(repr(self.ip), ipv6_str)
def test_to_string_with_hop_opts(self):
self.setUp_with_hop_opts()
self.test_to_string()
def test_to_string_with_dst_opts(self):
self.setUp_with_dst_opts()
self.test_to_string()
def test_to_string_with_fragment(self):
self.setUp_with_fragment()
self.test_to_string()
def test_to_string_with_auth(self):
self.setUp_with_auth()
self.test_to_string()
def test_to_string_with_multi_headers(self):
self.setUp_with_multi_headers()
self.test_to_string()
def test_len(self):
eq_(len(self.ip), 40)
def test_len_with_hop_opts(self):
self.setUp_with_hop_opts()
eq_(len(self.ip), 40 + len(self.hop_opts))
def test_len_with_dst_opts(self):
self.setUp_with_dst_opts()
eq_(len(self.ip), 40 + len(self.dst_opts))
def test_len_with_routing_type3(self):
self.setUp_with_routing_type3()
eq_(len(self.ip), 40 + len(self.routing))
def test_len_with_fragment(self):
self.setUp_with_fragment()
eq_(len(self.ip), 40 + len(self.fragment))
def test_len_with_auth(self):
self.setUp_with_auth()
eq_(len(self.ip), 40 + len(self.auth))
def test_len_with_multi_headers(self):
self.setUp_with_multi_headers()
eq_(len(self.ip), 40 + len(self.hop_opts) + len(self.auth))
def test_default_args(self):
ip = ipv6.ipv6()
buf = ip.serialize(bytearray(), None)
res = struct.unpack(ipv6.ipv6._PACK_STR, six.binary_type(buf))
eq_(res[0], 6 << 28)
eq_(res[1], 0)
eq_(res[2], 6)
eq_(res[3], 255)
eq_(res[4], addrconv.ipv6.text_to_bin('10::10'))
eq_(res[5], addrconv.ipv6.text_to_bin('20::20'))
# with extension header
ip = ipv6.ipv6(
nxt=0, ext_hdrs=[
ipv6.hop_opts(58, 0, [
ipv6.option(5, 2, b'\x00\x00'),
ipv6.option(1, 0, None)])])
buf = ip.serialize(bytearray(), None)
res = struct.unpack(ipv6.ipv6._PACK_STR + '8s', six.binary_type(buf))
eq_(res[0], 6 << 28)
eq_(res[1], 8)
eq_(res[2], 0)
eq_(res[3], 255)
eq_(res[4], addrconv.ipv6.text_to_bin('10::10'))
eq_(res[5], addrconv.ipv6.text_to_bin('20::20'))
eq_(res[6], b'\x3a\x00\x05\x02\x00\x00\x01\x00')
def test_json(self):
jsondict = self.ip.to_jsondict()
ip = ipv6.ipv6.from_jsondict(jsondict['ipv6'])
eq_(str(self.ip), str(ip))
def test_json_with_hop_opts(self):
self.setUp_with_hop_opts()
self.test_json()
def test_json_with_dst_opts(self):
self.setUp_with_dst_opts()
self.test_json()
def test_json_with_routing_type3(self):
self.setUp_with_routing_type3()
self.test_json()
def test_json_with_fragment(self):
self.setUp_with_fragment()
self.test_json()
def test_json_with_auth(self):
self.setUp_with_auth()
self.test_json()
def test_json_with_multi_headers(self):
self.setUp_with_multi_headers()
self.test_json()
class Test_hop_opts(unittest.TestCase):
def setUp(self):
self.nxt = 0
self.size = 8
self.data = [
ipv6.option(5, 2, b'\x00\x00'),
ipv6.option(1, 0, None),
ipv6.option(0xc2, 4, b'\x00\x01\x00\x00'),
ipv6.option(1, 0, None),
]
self.hop = ipv6.hop_opts(self.nxt, self.size, self.data)
self.form = '!BB'
self.buf = struct.pack(self.form, self.nxt, self.size) \
+ self.data[0].serialize() \
+ self.data[1].serialize() \
+ self.data[2].serialize() \
+ self.data[3].serialize()
def tearDown(self):
pass
def test_init(self):
eq_(self.nxt, self.hop.nxt)
eq_(self.size, self.hop.size)
eq_(self.data, self.hop.data)
@raises(Exception)
def test_invalid_size(self):
ipv6.hop_opts(self.nxt, 1, self.data)
def test_parser(self):
_res = ipv6.hop_opts.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.nxt, res.nxt)
eq_(self.size, res.size)
eq_(str(self.data), str(res.data))
def test_serialize(self):
buf = self.hop.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self.nxt, res[0])
eq_(self.size, res[1])
offset = struct.calcsize(self.form)
opt1 = ipv6.option.parser(six.binary_type(buf[offset:]))
offset += len(opt1)
opt2 = ipv6.option.parser(six.binary_type(buf[offset:]))
offset += len(opt2)
opt3 = ipv6.option.parser(six.binary_type(buf[offset:]))
offset += len(opt3)
opt4 = ipv6.option.parser(six.binary_type(buf[offset:]))
eq_(5, opt1.type_)
eq_(2, opt1.len_)
eq_(b'\x00\x00', opt1.data)
eq_(1, opt2.type_)
eq_(0, opt2.len_)
eq_(None, opt2.data)
eq_(0xc2, opt3.type_)
eq_(4, opt3.len_)
eq_(b'\x00\x01\x00\x00', opt3.data)
eq_(1, opt4.type_)
eq_(0, opt4.len_)
eq_(None, opt4.data)
def test_len(self):
eq_(16, len(self.hop))
def test_default_args(self):
hdr = ipv6.hop_opts()
buf = hdr.serialize()
res = struct.unpack('!BB', six.binary_type(buf[:2]))
eq_(res[0], 6)
eq_(res[1], 0)
opt = ipv6.option(type_=1, len_=4, data=b'\x00\x00\x00\x00')
eq_(six.binary_type(buf[2:]), opt.serialize())
class Test_dst_opts(unittest.TestCase):
def setUp(self):
self.nxt = 60
self.size = 8
self.data = [
ipv6.option(5, 2, b'\x00\x00'),
ipv6.option(1, 0, None),
ipv6.option(0xc2, 4, b'\x00\x01\x00\x00'),
ipv6.option(1, 0, None),
]
self.dst = ipv6.dst_opts(self.nxt, self.size, self.data)
self.form = '!BB'
self.buf = struct.pack(self.form, self.nxt, self.size) \
+ self.data[0].serialize() \
+ self.data[1].serialize() \
+ self.data[2].serialize() \
+ self.data[3].serialize()
def tearDown(self):
pass
def test_init(self):
eq_(self.nxt, self.dst.nxt)
eq_(self.size, self.dst.size)
eq_(self.data, self.dst.data)
@raises(Exception)
def test_invalid_size(self):
ipv6.dst_opts(self.nxt, 1, self.data)
def test_parser(self):
_res = ipv6.dst_opts.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.nxt, res.nxt)
eq_(self.size, res.size)
eq_(str(self.data), str(res.data))
def test_serialize(self):
buf = self.dst.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self.nxt, res[0])
eq_(self.size, res[1])
offset = struct.calcsize(self.form)
opt1 = ipv6.option.parser(six.binary_type(buf[offset:]))
offset += len(opt1)
opt2 = ipv6.option.parser(six.binary_type(buf[offset:]))
offset += len(opt2)
opt3 = ipv6.option.parser(six.binary_type(buf[offset:]))
offset += len(opt3)
opt4 = ipv6.option.parser(six.binary_type(buf[offset:]))
eq_(5, opt1.type_)
eq_(2, opt1.len_)
eq_(b'\x00\x00', opt1.data)
eq_(1, opt2.type_)
eq_(0, opt2.len_)
eq_(None, opt2.data)
eq_(0xc2, opt3.type_)
eq_(4, opt3.len_)
eq_(b'\x00\x01\x00\x00', opt3.data)
eq_(1, opt4.type_)
eq_(0, opt4.len_)
eq_(None, opt4.data)
def test_len(self):
eq_(16, len(self.dst))
def test_default_args(self):
hdr = ipv6.dst_opts()
buf = hdr.serialize()
res = struct.unpack('!BB', six.binary_type(buf[:2]))
eq_(res[0], 6)
eq_(res[1], 0)
opt = ipv6.option(type_=1, len_=4, data=b'\x00\x00\x00\x00')
eq_(six.binary_type(buf[2:]), opt.serialize())
class Test_option(unittest.TestCase):
def setUp(self):
self.type_ = 5
self.data = b'\x00\x00'
self.len_ = len(self.data)
self.opt = ipv6.option(self.type_, self.len_, self.data)
self.form = '!BB%ds' % self.len_
self.buf = struct.pack(self.form, self.type_, self.len_, self.data)
def tearDown(self):
pass
def test_init(self):
eq_(self.type_, self.opt.type_)
eq_(self.len_, self.opt.len_)
eq_(self.data, self.opt.data)
def test_parser(self):
_res = ipv6.option.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.type_, res.type_)
eq_(self.len_, res.len_)
eq_(self.data, res.data)
def test_serialize(self):
buf = self.opt.serialize()
res = struct.unpack_from(self.form, buf)
eq_(self.type_, res[0])
eq_(self.len_, res[1])
eq_(self.data, res[2])
def test_len(self):
eq_(len(self.opt), 2 + self.len_)
class Test_option_pad1(Test_option):
def setUp(self):
self.type_ = 0
self.len_ = -1
self.data = None
self.opt = ipv6.option(self.type_, self.len_, self.data)
self.form = '!B'
self.buf = struct.pack(self.form, self.type_)
def test_serialize(self):
buf = self.opt.serialize()
res = struct.unpack_from(self.form, buf)
eq_(self.type_, res[0])
def test_default_args(self):
opt = ipv6.option()
buf = opt.serialize()
res = struct.unpack('!B', buf)
eq_(res[0], 0)
class Test_option_padN(Test_option):
def setUp(self):
self.type_ = 1
self.len_ = 0
self.data = None
self.opt = ipv6.option(self.type_, self.len_, self.data)
self.form = '!BB'
self.buf = struct.pack(self.form, self.type_, self.len_)
def test_serialize(self):
buf = self.opt.serialize()
res = struct.unpack_from(self.form, buf)
eq_(self.type_, res[0])
eq_(self.len_, res[1])
class Test_routing(unittest.TestCase):
def setUp(self):
self.nxt = 0
self.size = 6
self.type_ = ipv6.routing.ROUTING_TYPE_3
self.seg = 0
self.cmpi = 0
self.cmpe = 0
self.adrs = ["2001:db8:dead::1",
"2001:db8:dead::2",
"2001:db8:dead::3"]
# calculate pad
self.pad = (8 - ((len(self.adrs) - 1) * (16 - self.cmpi) +
(16 - self.cmpe) % 8)) % 8
# create buf
self.form = '!BBBBBB2x16s16s16s'
self.buf = struct.pack(self.form, self.nxt, self.size,
self.type_, self.seg,
(self.cmpi << 4) | self.cmpe,
self.pad << 4,
addrconv.ipv6.text_to_bin(self.adrs[0]),
addrconv.ipv6.text_to_bin(self.adrs[1]),
addrconv.ipv6.text_to_bin(self.adrs[2]))
def tearDown(self):
pass
def test_parser(self):
_res = ipv6.routing.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.nxt, res.nxt)
eq_(self.size, res.size)
eq_(self.type_, res.type_)
eq_(self.seg, res.seg)
eq_(self.cmpi, res.cmpi)
eq_(self.cmpe, res.cmpe)
eq_(self.pad, res._pad)
eq_(self.adrs[0], res.adrs[0])
eq_(self.adrs[1], res.adrs[1])
eq_(self.adrs[2], res.adrs[2])
def test_not_implemented_type(self):
not_implemented_buf = struct.pack(
'!BBBBBB2x', 0, 6, ipv6.routing.ROUTING_TYPE_2, 0, 0, 0)
instance = ipv6.routing.parser(not_implemented_buf)
assert None == instance
def test_invalid_type(self):
invalid_type = 99
invalid_buf = struct.pack('!BBBBBB2x', 0, 6, invalid_type, 0, 0, 0)
instance = ipv6.routing.parser(invalid_buf)
assert None == instance
class Test_routing_type3(unittest.TestCase):
def setUp(self):
self.nxt = 0
self.size = 6
self.type_ = 3
self.seg = 0
self.cmpi = 0
self.cmpe = 0
self.adrs = ["2001:db8:dead::1",
"2001:db8:dead::2",
"2001:db8:dead::3"]
# calculate pad
self.pad = (8 - ((len(self.adrs) - 1) * (16 - self.cmpi) +
(16 - self.cmpe) % 8)) % 8
self.routing = ipv6.routing_type3(
self.nxt, self.size, self.type_, self.seg, self.cmpi,
self.cmpe, self.adrs)
self.form = '!BBBBBB2x16s16s16s'
self.buf = struct.pack(self.form, self.nxt, self.size,
self.type_, self.seg,
(self.cmpi << 4) | self.cmpe,
self.pad << 4,
addrconv.ipv6.text_to_bin(self.adrs[0]),
addrconv.ipv6.text_to_bin(self.adrs[1]),
addrconv.ipv6.text_to_bin(self.adrs[2]))
def test_init(self):
eq_(self.nxt, self.routing.nxt)
eq_(self.size, self.routing.size)
eq_(self.type_, self.routing.type_)
eq_(self.seg, self.routing.seg)
eq_(self.cmpi, self.routing.cmpi)
eq_(self.cmpe, self.routing.cmpe)
eq_(self.pad, self.routing._pad)
eq_(self.adrs[0], self.routing.adrs[0])
eq_(self.adrs[1], self.routing.adrs[1])
eq_(self.adrs[2], self.routing.adrs[2])
def test_parser(self):
_res = ipv6.routing.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.nxt, res.nxt)
eq_(self.size, res.size)
eq_(self.type_, res.type_)
eq_(self.seg, res.seg)
eq_(self.cmpi, res.cmpi)
eq_(self.cmpe, res.cmpe)
eq_(self.pad, res._pad)
eq_(self.adrs[0], res.adrs[0])
eq_(self.adrs[1], res.adrs[1])
eq_(self.adrs[2], res.adrs[2])
def test_serialize(self):
buf = self.routing.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self.nxt, res[0])
eq_(self.size, res[1])
eq_(self.type_, res[2])
eq_(self.seg, res[3])
eq_(self.cmpi, res[4] >> 4)
eq_(self.cmpe, res[4] & 0xf)
eq_(self.pad, res[5])
eq_(addrconv.ipv6.text_to_bin(self.adrs[0]), res[6])
eq_(addrconv.ipv6.text_to_bin(self.adrs[1]), res[7])
eq_(addrconv.ipv6.text_to_bin(self.adrs[2]), res[8])
def test_parser_with_adrs_zero(self):
nxt = 0
size = 0
type_ = 3
seg = 0
cmpi = 0
cmpe = 0
adrs = []
# calculate pad
pad = (8 - ((len(adrs) - 1) * (16 - cmpi) + (16 - cmpe) % 8)) % 8
form = '!BBBBBB2x'
buf = struct.pack(form, nxt, size, type_, seg,
(cmpi << 4) | cmpe, pad << 4)
_res = ipv6.routing.parser(buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(nxt, res.nxt)
eq_(size, res.size)
eq_(type_, res.type_)
eq_(seg, res.seg)
eq_(cmpi, res.cmpi)
eq_(cmpe, res.cmpe)
eq_(pad, res._pad)
def test_serialize_with_adrs_zero(self):
nxt = 0
size = 0
type_ = 3
seg = 0
cmpi = 0
cmpe = 0
adrs = []
# calculate pad
pad = (8 - ((len(adrs) - 1) * (16 - cmpi) + (16 - cmpe) % 8)) % 8
routing = ipv6.routing_type3(
nxt, size, type_, seg, cmpi,
cmpe, pad)
buf = routing.serialize()
form = '!BBBBBB2x'
res = struct.unpack_from(form, six.binary_type(buf))
eq_(nxt, res[0])
eq_(size, res[1])
eq_(type_, res[2])
eq_(seg, res[3])
eq_(cmpi, res[4] >> 4)
eq_(cmpe, res[4] & 0xf)
eq_(pad, res[5])
def test_parser_with_compression(self):
pass
nxt = 0
size = 3
type_ = 3
seg = 0
cmpi = 8
cmpe = 12
adrs = ["2001:0db8:dead:0123:4567:89ab:cdef:0001",
"2001:0db8:dead:0123:4567:89ab:cdef:0002",
"2001:0db8:dead:0123:4567:89ab:cdef:0003"]
# calculate pad
pad = (8 - ((len(adrs) - 1) * (16 - cmpi) + (16 - cmpe) % 8)) % 8
form = '!BBBBBB2x%ds%ds%ds' % (16 - cmpi, 16 - cmpi, 16 - cmpe)
slice_i = slice(cmpi, 16)
slice_e = slice(cmpe, 16)
buf = struct.pack(form, nxt, size, type_, seg,
(cmpi << 4) | cmpe, pad << 4,
addrconv.ipv6.text_to_bin(adrs[0])[slice_i],
addrconv.ipv6.text_to_bin(adrs[1])[slice_i],
addrconv.ipv6.text_to_bin(adrs[2])[slice_e])
_res = ipv6.routing.parser(buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(nxt, res.nxt)
eq_(size, res.size)
eq_(type_, res.type_)
eq_(seg, res.seg)
eq_(cmpi, res.cmpi)
eq_(cmpe, res.cmpe)
eq_(pad, res._pad)
eq_("::4567:89ab:cdef:1", res.adrs[0])
eq_("::4567:89ab:cdef:2", res.adrs[1])
eq_("::205.239.0.3", res.adrs[2])
def test_serialize_with_compression(self):
nxt = 0
size = 3
type_ = 3
seg = 0
cmpi = 8
cmpe = 8
adrs = ["2001:db8:dead::1",
"2001:db8:dead::2",
"2001:db8:dead::3"]
# calculate pad
pad = (8 - ((len(adrs) - 1) * (16 - cmpi) + (16 - cmpe) % 8)) % 8
slice_i = slice(cmpi, 16)
slice_e = slice(cmpe, 16)
routing = ipv6.routing_type3(
nxt, size, type_, seg, cmpi, cmpe, adrs)
buf = routing.serialize()
form = '!BBBBBB2x8s8s8s'
res = struct.unpack_from(form, six.binary_type(buf))
eq_(nxt, res[0])
eq_(size, res[1])
eq_(type_, res[2])
eq_(seg, res[3])
eq_(cmpi, res[4] >> 4)
eq_(cmpe, res[4] & 0xf)
eq_(pad, res[5])
eq_(addrconv.ipv6.text_to_bin(adrs[0])[slice_i], res[6])
eq_(addrconv.ipv6.text_to_bin(adrs[1])[slice_i], res[7])
eq_(addrconv.ipv6.text_to_bin(adrs[2])[slice_e], res[8])
def test_len(self):
eq_((6 + 1) * 8, len(self.routing))
def test_default_args(self):
hdr = ipv6.routing_type3()
buf = hdr.serialize()
LOG.info(repr(buf))
res = struct.unpack_from(ipv6.routing_type3._PACK_STR, six.binary_type(buf))
LOG.info(res)
eq_(res[0], 6)
eq_(res[1], 0)
eq_(res[2], 3)
eq_(res[3], 0)
eq_(res[4], (0 << 4) | 0)
eq_(res[5], 0)
class Test_fragment(unittest.TestCase):
def setUp(self):
self.nxt = 44
self.offset = 50
self.more = 1
self.id_ = 123
self.fragment = ipv6.fragment(
self.nxt, self.offset, self.more, self.id_)
self.off_m = (self.offset << 3 | self.more)
self.form = '!BxHI'
self.buf = struct.pack(self.form, self.nxt, self.off_m, self.id_)
def test_init(self):
eq_(self.nxt, self.fragment.nxt)
eq_(self.offset, self.fragment.offset)
eq_(self.more, self.fragment.more)
eq_(self.id_, self.fragment.id_)
def test_parser(self):
_res = ipv6.fragment.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.nxt, res.nxt)
eq_(self.offset, res.offset)
eq_(self.more, res.more)
eq_(self.id_, res.id_)
def test_serialize(self):
buf = self.fragment.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self.nxt, res[0])
eq_(self.off_m, res[1])
eq_(self.id_, res[2])
def test_len(self):
eq_(8, len(self.fragment))
def test_default_args(self):
hdr = ipv6.fragment()
buf = hdr.serialize()
res = struct.unpack_from(ipv6.fragment._PACK_STR, buf)
eq_(res[0], 6)
eq_(res[1], 0)
eq_(res[2], 0)
class Test_auth(unittest.TestCase):
def setUp(self):
self.nxt = 0
self.size = 4
self.spi = 256
self.seq = 1
self.data = b'\x21\xd3\xa9\x5c\x5f\xfd\x4d\x18\x46\x22\xb9\xf8'
self.auth = ipv6.auth(
self.nxt, self.size, self.spi, self.seq, self.data)
self.form = '!BB2xII12s'
self.buf = struct.pack(self.form, self.nxt, self.size, self.spi,
self.seq, self.data)
def test_init(self):
eq_(self.nxt, self.auth.nxt)
eq_(self.size, self.auth.size)
eq_(self.spi, self.auth.spi)
eq_(self.seq, self.auth.seq)
eq_(self.data, self.auth.data)
def test_parser(self):
_res = ipv6.auth.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.nxt, res.nxt)
eq_(self.size, res.size)
eq_(self.spi, res.spi)
eq_(self.seq, res.seq)
eq_(self.data, res.data)
def test_serialize(self):
buf = self.auth.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self.nxt, res[0])
eq_(self.size, res[1])
eq_(self.spi, res[2])
eq_(self.seq, res[3])
eq_(self.data, res[4])
def test_len(self):
eq_((4 + 2) * 4, len(self.auth))
def test_len_re(self):
size = 5
auth = ipv6.auth(
0, size, 256, 1,
b'\x21\xd3\xa9\x5c\x5f\xfd\x4d\x18\x46\x22\xb9\xf8\xf8\xf8\xf8\xf8')
eq_((size + 2) * 4, len(auth))
def test_default_args(self):
hdr = ipv6.auth()
buf = hdr.serialize()
LOG.info(repr(buf))
res = struct.unpack_from(ipv6.auth._PACK_STR, six.binary_type(buf))
LOG.info(res)
eq_(res[0], 6)
eq_(res[1], 2)
eq_(res[2], 0)
eq_(res[3], 0)
eq_(buf[ipv6.auth._MIN_LEN:], b'\x00\x00\x00\x00')
| apache-2.0 |
wemanuel/smry | smry/server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/instances/get_serial_port_output.py | 2 | 2504 | # Copyright 2014 Google Inc. All Rights Reserved.
"""Command for reading the serial port output of an instance."""
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.compute.lib import base_classes
from googlecloudsdk.compute.lib import request_helper
from googlecloudsdk.compute.lib import utils
from googlecloudsdk.core import log
class GetSerialPortOutput(base_classes.BaseCommand):
"""Read output from a virtual machine instance's serial port."""
@staticmethod
def Args(parser):
utils.AddZoneFlag(
parser,
resource_type='instance',
operation_type='get serial port output for')
port = parser.add_argument(
'--port',
help=('The number of the requested serial port. '
'Can be 1-4, default is 1.'),
type=arg_parsers.BoundedInt(1, 4))
port.detailed_help = """\
Instances can support up to four serial port outputs. By default, this
command will return the output of the first serial port. Setting this
flag will return the output of the requested serial port.
"""
parser.add_argument(
'name',
help='The name of the instance.')
@property
def resource_type(self):
return 'instances'
def Run(self, args):
instance_ref = self.CreateZonalReference(args.name, args.zone)
request = (self.compute.instances,
'GetSerialPortOutput',
self.messages.ComputeInstancesGetSerialPortOutputRequest(
instance=instance_ref.Name(),
project=self.project,
port=args.port,
zone=instance_ref.zone))
errors = []
objects = list(request_helper.MakeRequests(
requests=[request],
http=self.http,
batch_url=self.batch_url,
errors=errors,
custom_get_requests=None))
if errors:
utils.RaiseToolException(
errors,
error_message='Could not fetch serial port output:')
return objects[0].contents
def Display(self, _, response):
log.out.write(response)
GetSerialPortOutput.detailed_help = {
'brief': "Read output from a virtual machine instance's serial port",
'DESCRIPTION': """\
{command} is used to get the output from a Google Compute
Engine virtual machine's serial port. The serial port output
from the virtual machine will be printed to standard out. This
information can be useful for diagnostic purposes.
""",
}
| apache-2.0 |
eliasdorneles/scrapy | tests/test_downloadermiddleware_useragent.py | 53 | 2206 | from unittest import TestCase
from scrapy.spiders import Spider
from scrapy.http import Request
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
from scrapy.utils.test import get_crawler
class UserAgentMiddlewareTest(TestCase):
def get_spider_and_mw(self, default_useragent):
crawler = get_crawler(Spider, {'USER_AGENT': default_useragent})
spider = crawler._create_spider('foo')
return spider, UserAgentMiddleware.from_crawler(crawler)
def test_default_agent(self):
spider, mw = self.get_spider_and_mw('default_useragent')
req = Request('http://scrapytest.org/')
assert mw.process_request(req, spider) is None
self.assertEquals(req.headers['User-Agent'], 'default_useragent')
def test_remove_agent(self):
# settings UESR_AGENT to None should remove the user agent
spider, mw = self.get_spider_and_mw('default_useragent')
spider.user_agent = None
mw.spider_opened(spider)
req = Request('http://scrapytest.org/')
assert mw.process_request(req, spider) is None
assert req.headers.get('User-Agent') is None
def test_spider_agent(self):
spider, mw = self.get_spider_and_mw('default_useragent')
spider.user_agent = 'spider_useragent'
mw.spider_opened(spider)
req = Request('http://scrapytest.org/')
assert mw.process_request(req, spider) is None
self.assertEquals(req.headers['User-Agent'], 'spider_useragent')
def test_header_agent(self):
spider, mw = self.get_spider_and_mw('default_useragent')
spider.user_agent = 'spider_useragent'
mw.spider_opened(spider)
req = Request('http://scrapytest.org/', headers={'User-Agent': 'header_useragent'})
assert mw.process_request(req, spider) is None
self.assertEquals(req.headers['User-Agent'], 'header_useragent')
def test_no_agent(self):
spider, mw = self.get_spider_and_mw(None)
spider.user_agent = None
mw.spider_opened(spider)
req = Request('http://scrapytest.org/')
assert mw.process_request(req, spider) is None
assert 'User-Agent' not in req.headers
| bsd-3-clause |
sinkpoint/dipy | scratch/very_scratch/simulation_comparisons_modified.py | 20 | 13117 | import nibabel
import os
import numpy as np
import dipy as dp
import dipy.core.generalized_q_sampling as dgqs
import dipy.io.pickles as pkl
import scipy as sp
from matplotlib.mlab import find
import dipy.core.sphere_plots as splots
import dipy.core.sphere_stats as sphats
import dipy.core.geometry as geometry
import get_vertices as gv
#old SimData files
'''
results_SNR030_1fibre
results_SNR030_1fibre+iso
results_SNR030_2fibres_15deg
results_SNR030_2fibres_30deg
results_SNR030_2fibres_60deg
results_SNR030_2fibres_90deg
results_SNR030_2fibres+iso_15deg
results_SNR030_2fibres+iso_30deg
results_SNR030_2fibres+iso_60deg
results_SNR030_2fibres+iso_90deg
results_SNR030_isotropic
'''
#fname='/home/ian/Data/SimData/results_SNR030_1fibre'
''' file has one row for every voxel, every voxel is repeating 1000
times with the same noise level , then we have 100 different
directions. 1000 * 100 is the number of all rows.
The 100 conditions are given by 10 polar angles (in degrees) 0, 20, 40, 60, 80,
80, 60, 40, 20 and 0, and each of these with longitude angle 0, 40, 80,
120, 160, 200, 240, 280, 320, 360.
'''
#new complete SimVoxels files
simdata = ['fibres_2_SNR_80_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_100_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_40_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_100_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_60_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_40_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_20_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_100_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_100_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_1_SNR_20_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_40_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_80_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_80_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_20_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_80_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_100_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_20_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_80_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_80_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_40_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_60_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_1_SNR_40_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_20_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00']
simdir = '/home/ian/Data/SimVoxels/'
def gq_tn_calc_save():
for simfile in simdata:
dataname = simfile
print dataname
sim_data=np.loadtxt(simdir+dataname)
marta_table_fname='/home/ian/Data/SimData/Dir_and_bvals_DSI_marta.txt'
b_vals_dirs=np.loadtxt(marta_table_fname)
bvals=b_vals_dirs[:,0]*1000
gradients=b_vals_dirs[:,1:]
gq = dp.GeneralizedQSampling(sim_data,bvals,gradients)
gqfile = simdir+'gq/'+dataname+'.pkl'
pkl.save_pickle(gqfile,gq)
'''
gq.IN gq.__doc__ gq.glob_norm_param
gq.QA gq.__init__ gq.odf
gq.__class__ gq.__module__ gq.q2odf_params
'''
tn = dp.Tensor(sim_data,bvals,gradients)
tnfile = simdir+'tn/'+dataname+'.pkl'
pkl.save_pickle(tnfile,tn)
'''
tn.ADC tn.__init__ tn._getevals
tn.B tn.__module__ tn._getevecs
tn.D tn.__new__ tn._getndim
tn.FA tn.__reduce__ tn._getshape
tn.IN tn.__reduce_ex__ tn._setevals
tn.MD tn.__repr__ tn._setevecs
tn.__class__ tn.__setattr__ tn.adc
tn.__delattr__ tn.__sizeof__ tn.evals
tn.__dict__ tn.__str__ tn.evecs
tn.__doc__ tn.__subclasshook__ tn.fa
tn.__format__ tn.__weakref__ tn.md
tn.__getattribute__ tn._evals tn.ndim
tn.__getitem__ tn._evecs tn.shape
tn.__hash__ tn._getD
'''
''' file has one row for every voxel, every voxel is repeating 1000
times with the same noise level , then we have 100 different
directions. 100 * 1000 is the number of all rows.
At the moment this module is hardwired to the use of the EDS362
spherical mesh. I am assumung (needs testing) that directions 181 to 361
are the antipodal partners of directions 0 to 180. So when counting the
number of different vertices that occur as maximal directions we wll map
the indices modulo 181.
'''
def analyze_maxima(indices, max_dirs, subsets):
'''This calculates the eigenstats for each of the replicated batches
of the simulation data
'''
results = []
for direction in subsets:
batch = max_dirs[direction,:,:]
index_variety = np.array([len(set(np.remainder(indices[direction,:],181)))])
#normed_centroid, polar_centroid, centre, b1 = sphats.eigenstats(batch)
centre, b1 = sphats.eigenstats(batch)
# make azimuth be in range (0,360) rather than (-180,180)
centre[1] += 360*(centre[1] < 0)
#results.append(np.concatenate((normed_centroid, polar_centroid, centre, b1, index_variety)))
results.append(np.concatenate((centre, b1, index_variety)))
return results
#dt_first_directions = tn.evecs[:,:,0].reshape((100,1000,3))
# these are the principal directions for the full set of simulations
#gq_tn_calc_save()
eds=np.load(os.path.join(os.path.dirname(dp.__file__),'core','matrices','evenly_distributed_sphere_362.npz'))
odf_vertices=eds['vertices']
def run_comparisons(sample_data=35):
for simfile in [simdata[sample_data]]:
dataname = simfile
print dataname
sim_data=np.loadtxt(simdir+dataname)
gqfile = simdir+'gq/'+dataname+'.pkl'
gq = pkl.load_pickle(gqfile)
tnfile = simdir+'tn/'+dataname+'.pkl'
tn = pkl.load_pickle(tnfile)
dt_first_directions_in=odf_vertices[tn.IN]
dt_indices = tn.IN.reshape((100,1000))
dt_results = analyze_maxima(dt_indices, dt_first_directions_in.reshape((100,1000,3)),range(10,90))
gq_indices = np.array(gq.IN[:,0],dtype='int').reshape((100,1000))
gq_first_directions_in=odf_vertices[np.array(gq.IN[:,0],dtype='int')]
#print gq_first_directions_in.shape
gq_results = analyze_maxima(gq_indices, gq_first_directions_in.reshape((100,1000,3)),range(10,90))
#for gqi see example dicoms_2_tracks gq.IN[:,0]
np.set_printoptions(precision=3, suppress=True, linewidth=200, threshold=5000)
out = open('/home/ian/Data/SimVoxels/Out/'+'***_'+dataname,'w')
#print np.vstack(dt_results).shape, np.vstack(gq_results).shape
results = np.hstack((np.vstack(dt_results), np.vstack(gq_results)))
#print results.shape
#results = np.vstack(dt_results)
print >> out, results[:,:]
out.close()
#up = dt_batch[:,2]>= 0
#splots.plot_sphere(dt_batch[up], 'batch '+str(direction))
#splots.plot_lambert(dt_batch[up],'batch '+str(direction), centre)
#spread = gq.q2odf_params e,v = np.linalg.eigh(np.dot(spread,spread.transpose())) effective_dimension = len(find(np.cumsum(e) > 0.05*np.sum(e))) #95%
#rotated = np.dot(dt_batch,evecs)
#rot_evals, rot_evecs = np.linalg.eig(np.dot(rotated.T,rotated)/rotated.shape[0])
#eval_order = np.argsort(rot_evals)
#rotated = rotated[:,eval_order]
#up = rotated[:,2]>= 0
#splot.plot_sphere(rotated[up],'first1000')
#splot.plot_lambert(rotated[up],'batch '+str(direction))
def run_gq_sims(sample_data=[35,23,46,39,40,10,37,27,21,20]):
results = []
out = open('/home/ian/Data/SimVoxels/Out/'+'npa+fa','w')
for j in range(len(sample_data)):
sample = sample_data[j]
simfile = simdata[sample]
dataname = simfile
print dataname
sim_data=np.loadtxt(simdir+dataname)
marta_table_fname='/home/ian/Data/SimData/Dir_and_bvals_DSI_marta.txt'
b_vals_dirs=np.loadtxt(marta_table_fname)
bvals=b_vals_dirs[:,0]*1000
gradients=b_vals_dirs[:,1:]
for j in np.vstack((np.arange(100)*1000,np.arange(100)*1000+1)).T.ravel():
# 0,1,1000,1001,2000,2001,...
s = sim_data[j,:]
gqs = dp.GeneralizedQSampling(s.reshape((1,102)),bvals,gradients,Lambda=3.5)
tn = dp.Tensor(s.reshape((1,102)),bvals,gradients,fit_method='LS')
t0, t1, t2, npa = gqs.npa(s, width = 5)
print >> out, dataname, j, npa, tn.fa()[0]
'''
for (i,o) in enumerate(gqs.odf(s)):
print i,o
for (i,o) in enumerate(gqs.odf_vertices):
print i,o
'''
#o = gqs.odf(s)
#v = gqs.odf_vertices
#pole = v[t0[0]]
#eqv = dgqs.equatorial_zone_vertices(v, pole, 5)
#print 'Number of equatorial vertices: ', len(eqv)
#print np.max(o[eqv]),np.min(o[eqv])
#cos_e_pole = [np.dot(pole.T, v[i]) for i in eqv]
#print np.min(cos1), np.max(cos1)
#print 'equatorial max in equatorial vertices:', t1[0] in eqv
#x = np.cross(v[t0[0]],v[t1[0]])
#x = x/np.sqrt(np.sum(x**2))
#print x
#ptchv = dgqs.patch_vertices(v, x, 5)
#print len(ptchv)
#eqp = eqv[np.argmin([np.abs(np.dot(v[t1[0]].T,v[p])) for p in eqv])]
#print (eqp, o[eqp])
#print t2[0] in ptchv, t2[0] in eqv
#print np.dot(pole.T, v[t1[0]]), np.dot(pole.T, v[t2[0]])
#print ptchv[np.argmin([o[v] for v in ptchv])]
#gq_indices = np.array(gq.IN[:,0],dtype='int').reshape((100,1000))
#gq_first_directions_in=odf_vertices[np.array(gq.IN[:,0],dtype='int')]
#print gq_first_directions_in.shape
#gq_results = analyze_maxima(gq_indices, gq_first_directions_in.reshape((100,1000,3)),range(100))
#for gqi see example dicoms_2_tracks gq.IN[:,0]
#np.set_printoptions(precision=6, suppress=True, linewidth=200, threshold=5000)
#out = open('/home/ian/Data/SimVoxels/Out/'+'+++_'+dataname,'w')
#results = np.hstack((np.vstack(dt_results), np.vstack(gq_results)))
#results = np.vstack(dt_results)
#print >> out, results[:,:]
out.close()
run_comparisons()
#run_gq_sims()
| bsd-3-clause |
yuanzhao/gpdb | gpMgmt/bin/gppylib/test/regress/test_regress_pygresql.py | 12 | 4162 | #!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
""" Unittesting for pygres module
"""
import logging
import unittest
from pygresql import pg
from pygresql import pgdb
from gppylib import gplog
from gppylib.db.dbconn import *
from gppylib.db.test import skipIfDatabaseDown
logger=gplog.get_default_logger()
gplog.enable_verbose_logging()
#TODO: incomplete list.
catalog_names = ['pg_aggregate','pg_amop','pg_attrdef','pg_auth_members',
'pg_autovacuum','pg_class','pg_conversion','pg_database','pg_description',
'pg_group','pg_indexes','pg_language','pg_listener',
'pg_max_external_files','pg_opclass','pg_partition']
datatypes = {'oid':True , 'int2':True, 'regproc':True, 'text':True, 'bool':True,
'int4':True, 'float4':True, 'name':True, 'char':True, 'xid':True,
'_aclitem':True, '_text':True, '_oid':True, 'int8':True,
'int2vector':True,'oidvector':True}
@skipIfDatabaseDown()
class pygresqlTestCase(unittest.TestCase):
def test_connect(self):
dburl = DbURL()
logger.info("YO")
db = pg.DB(dbname=dburl.pgdb)
q = db.query("SELECT 1")
logger.info(q.getresult())
def test_DBI_connect(self):
logger.info("test_dbi_connect")
dburl = DbURL()
db = pgdb.connect(dburl.pguser,host=dburl.pghost,database=dburl.pgdb)
curs = db.cursor()
curs.execute("COMMIT")
curs.execute("BEGIN")
curs.execute("DROP TABLE IF EXISTS jge ")
curs.execute("CREATE TABLE jge ( a int )")
curs.execute("DROP TABLE jge")
db.commit()
def test_utilitymode(self):
db = self.setupConnection("test_utilitymode")
curs=db.cursor()
curs.execute("show gp_role")
logger.info(curs.fetchall())
curs.close()
db.close()
def test_pgcatalog_selects(self):
db = self.setupConnection("test_pgcatalog_selects")
curs=db.cursor()
for table in catalog_names:
sql = "SELECT * FROM %s LIMIT 1" % table
curs.execute(sql)
rows=curs.fetchall()
self.verifyResults(rows,curs.description)
curs.close()
db.close()
def test_nulls(self):
db = self.setupConnection("test_nulls")
curs=db.cursor()
curs.execute("BEGIN")
curs.execute("CREATE TABLE test ( a int, b text )")
curs.execute("INSERT INTO test VALUES (null,null)")
curs.execute("SELECT * FROM test")
row = curs.fetchone()
self.assertTrue(row[0] is None)
self.assertTrue(row[1] is None)
def test_createdb(self):
db = None
try:
db = self.setupConnection("test_createdb")
curs=db.cursor()
curs.execute("COMMIT")
curs.execute("CREATE DATABASE test")
curs.execute("DROP DATABASE test")
finally:
if db is not None:
db.close()
def test_vacuum(self):
db = self.setupConnection("test_vacuumdb")
curs = db.cursor()
curs.execute("COMMIT")
curs.execute("VACUUM FULL pg_catalog.pg_class")
db.close()
#------------------------------- non-test helper --------------------------------
def setupConnection(self,name):
logger.info(name)
dburl = DbURL()
dsn=str(dburl) + "::"
db = pgdb.connect(dsn=dsn)
return db
def verifyResults(self,rows,description):
for col in description:
colname = col[0]
datatype = col[1]
self.assertTrue(datatypes[datatype])
for row in rows:
for col in row:
foo = "" + str(col)
#------------------------------- Mainline --------------------------------
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
achang97/YouTunes | lib/python2.7/site-packages/youtube_dl/extractor/goshgay.py | 64 | 1542 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
)
from ..utils import (
parse_duration,
)
class GoshgayIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?goshgay\.com/video(?P<id>\d+?)($|/)'
_TEST = {
'url': 'http://www.goshgay.com/video299069/diesel_sfw_xxx_video',
'md5': '4b6db9a0a333142eb9f15913142b0ed1',
'info_dict': {
'id': '299069',
'ext': 'flv',
'title': 'DIESEL SFW XXX Video',
'thumbnail': r're:^http://.*\.jpg$',
'duration': 80,
'age_limit': 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<h2>(.*?)<', webpage, 'title')
duration = parse_duration(self._html_search_regex(
r'<span class="duration">\s*-?\s*(.*?)</span>',
webpage, 'duration', fatal=False))
flashvars = compat_parse_qs(self._html_search_regex(
r'<embed.+?id="flash-player-embed".+?flashvars="([^"]+)"',
webpage, 'flashvars'))
thumbnail = flashvars.get('url_bigthumb', [None])[0]
video_url = flashvars['flv_url'][0]
return {
'id': video_id,
'url': video_url,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'age_limit': 18,
}
| mit |
EdDev/vdsm | tests/virttests/vmstats_test.py | 1 | 18952 | #
# Copyright 2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import copy
import uuid
import six
from vdsm.virt import vmstats
from testlib import VdsmTestCase as TestCaseBase
from testlib import permutations, expandPermutations
_FAKE_BULK_STATS = {
'f3243a90-2e9e-4061-b7b3-a6c585e14857': (
{
'state.state': 1,
'state.reason': 1,
'cpu.time': 13755069120,
'cpu.user': 3370000000,
'cpu.system': 6320000000,
'balloon.current': 4194304,
'balloon.maximum': 4194304,
'vcpu.current': 2,
'vcpu.maximum': 16,
'vcpu.0.state': 1,
'vcpu.0.time': 10910000000,
'vcpu.1.state': 1,
'vcpu.1.time': 0,
'net.count': 1,
'net.0.name': 'vnet0',
'net.0.rx.bytes': 0,
'net.0.rx.pkts': 0,
'net.0.rx.errs': 0,
'net.0.rx.drop': 0,
'net.0.tx.bytes': 0,
'net.0.tx.pkts': 0,
'net.0.tx.errs': 0,
'net.0.tx.drop': 0,
'block.count': 2,
'block.0.name': 'hdc',
'block.0.rd.reqs': 0,
'block.0.rd.bytes': 0,
'block.0.rd.times': 0,
'block.0.wr.reqs': 0,
'block.0.wr.bytes': 0,
'block.0.wr.times': 0,
'block.0.fl.reqs': 0,
'block.0.fl.times': 0,
'block.0.allocation': 0,
'block.1.name': 'vda',
'block.1.path': (
'/rhev'
'/data-center'
'/00000001-0001-0001-0001-0000000001e8'
'/bbed5784-b0ee-4a0a-aff2-801da0bcf39e'
'/images'
'/cbe82d1f-a0ba-4af2-af2f-788d15eef043'
'/7ba49d31-4fa7-49df-8df4-37a22de79f62'
),
'block.1.rd.reqs': 1,
'block.1.rd.bytes': 512,
'block.1.rd.times': 58991,
'block.1.wr.reqs': 0,
'block.1.wr.bytes': 0,
'block.1.wr.times': 0,
'block.1.fl.reqs': 0,
'block.1.fl.times': 0,
'block.1.allocation': 0,
'block.1.capacity': 42949672960,
},
{
'state.state': 1,
'state.reason': 1,
'cpu.time': 13755069120,
'cpu.user': 3370000000,
'cpu.system': 6320000000,
'balloon.current': 4194304,
'balloon.maximum': 4194304,
'vcpu.current': 2,
'vcpu.maximum': 16,
'vcpu.0.state': 1,
'vcpu.0.time': 10910000000,
'vcpu.1.state': 1,
'vcpu.1.time': 0,
'net.count': 2,
'net.0.name': 'vnet1',
'net.0.rx.bytes': 0,
'net.0.rx.pkts': 0,
'net.0.rx.errs': 0,
'net.0.rx.drop': 0,
'net.0.tx.bytes': 0,
'net.0.tx.pkts': 0,
'net.0.tx.errs': 0,
'net.0.tx.drop': 0,
'net.1.name': 'vnet0',
'net.1.rx.bytes': 1024,
'net.1.rx.pkts': 128,
'net.1.rx.errs': 0,
'net.1.rx.drop': 0,
'net.1.tx.bytes': 2048,
'net.1.tx.pkts': 256,
'net.1.tx.errs': 0,
'net.1.tx.drop': 0,
'block.count': 3,
'block.0.name': 'hdd',
'block.0.rd.reqs': 0,
'block.0.rd.bytes': 0,
'block.0.rd.times': 0,
'block.0.wr.reqs': 0,
'block.0.wr.bytes': 0,
'block.0.wr.times': 0,
'block.0.fl.reqs': 0,
'block.0.fl.times': 0,
'block.0.allocation': 0,
'block.1.name': 'vda',
'block.1.path': (
'/rhev'
'/data-center'
'/00000001-0001-0001-0001-0000000001e8'
'/bbed5784-b0ee-4a0a-aff2-801da0bcf39e'
'/images'
'/cbe82d1f-a0ba-4af2-af2f-788d15eef043'
'/7ba49d31-4fa7-49df-8df4-37a22de79f62'
),
'block.1.rd.reqs': 1,
'block.1.rd.bytes': 512,
'block.1.rd.times': 58991,
'block.1.wr.reqs': 0,
'block.1.wr.bytes': 0,
'block.1.wr.times': 0,
'block.1.fl.reqs': 0,
'block.1.fl.times': 0,
'block.1.allocation': 0,
'block.1.capacity': 42949672960,
'block.2.name': 'hdc',
'block.2.rd.reqs': 0,
'block.2.rd.bytes': 0,
'block.2.rd.times': 0,
'block.2.wr.reqs': 0,
'block.2.wr.bytes': 0,
'block.2.wr.times': 0,
'block.2.fl.reqs': 0,
'block.2.fl.times': 0,
'block.2.allocation': 0,
},
),
}
# on SR-IOV we seen unexpected net.count == 2 but data only for one nic.
_FAKE_BULK_STATS_SRIOV = {
'f3243a90-2e9e-4061-b7b3-a6c585e14857': (
{
'state.state': 1,
'state.reason': 1,
'cpu.time': 13755069120,
'cpu.user': 3370000000,
'cpu.system': 6320000000,
'balloon.current': 4194304,
'balloon.maximum': 4194304,
'vcpu.current': 2,
'vcpu.maximum': 16,
'vcpu.0.state': 1,
'vcpu.0.time': 10910000000,
'vcpu.1.state': 1,
'vcpu.1.time': 0,
'net.count': 2,
'net.1.name': 'vnet1',
'net.1.rx.bytes': 0,
'net.1.rx.pkts': 0,
'net.1.rx.errs': 0,
'net.1.rx.drop': 0,
'net.1.tx.bytes': 0,
'net.1.tx.pkts': 0,
'net.1.tx.errs': 0,
'net.1.tx.drop': 0,
'block.count': 2,
'block.0.name': 'hdc',
'block.0.rd.reqs': 0,
'block.0.rd.bytes': 0,
'block.0.rd.times': 0,
'block.0.wr.reqs': 0,
'block.0.wr.bytes': 0,
'block.0.wr.times': 0,
'block.0.fl.reqs': 0,
'block.0.fl.times': 0,
'block.0.allocation': 0,
'block.1.name': 'vda',
'block.1.path': (
'/rhev'
'/data-center'
'/00000001-0001-0001-0001-0000000001e8'
'/bbed5784-b0ee-4a0a-aff2-801da0bcf39e'
'/images'
'/cbe82d1f-a0ba-4af2-af2f-788d15eef043'
'/7ba49d31-4fa7-49df-8df4-37a22de79f62'
),
'block.1.rd.reqs': 1,
'block.1.rd.bytes': 512,
'block.1.rd.times': 58991,
'block.1.wr.reqs': 0,
'block.1.wr.bytes': 0,
'block.1.wr.times': 0,
'block.1.fl.reqs': 0,
'block.1.fl.times': 0,
'block.1.allocation': 0,
'block.1.capacity': 42949672960,
},
)
}
class VmStatsTestCase(TestCaseBase):
def setUp(self):
# just pick one sampling
self.samples = next(six.itervalues(_FAKE_BULK_STATS))
self.bulk_stats = self.samples[0]
self.interval = 10 # seconds
def assertNameIsAt(self, stats, group, idx, name):
self.assertEqual(stats['%s.%d.name' % (group, idx)], name)
def assertStatsHaveKeys(self, stats, keys):
for key in keys:
self.assertIn(key, stats)
def assertRepeatedStatsHaveKeys(self, items, stats, keys):
for item in items:
self.assertStatsHaveKeys(stats[item.name], keys)
@expandPermutations
class UtilsFunctionsTests(VmStatsTestCase):
# we should not test private functions, but this one is
# the cornerstone of bulk stats translation, so we make
# one exception for the sake of the practicality.
@permutations([['block', 'hdc'], ['net', 'vnet0']])
def test_find_existing(self, group, name):
indexes = vmstats._find_bulk_stats_reverse_map(
self.bulk_stats, group)
self.assertNameIsAt(
self.bulk_stats, group, indexes[name], name)
@permutations([['block'], ['net']])
def test_find_bogus(self, group):
name = 'inexistent'
indexes = vmstats._find_bulk_stats_reverse_map(
self.bulk_stats, group)
self.assertNotIn(name, indexes)
@permutations([['block', 'hdc'], ['net', 'vnet0']])
def test_index_can_change(self, group, name):
all_indexes = []
for bulk_stats in self.samples:
indexes = vmstats._find_bulk_stats_reverse_map(
bulk_stats, group)
self.assertNameIsAt(bulk_stats, group, indexes[name], name)
all_indexes.append(indexes)
# and indeed indexes must change
self.assertEqual(len(all_indexes), len(self.samples))
def test_network_missing(self):
# seen using SR-IOV
bulk_stats = next(six.itervalues(_FAKE_BULK_STATS_SRIOV))
indexes = vmstats._find_bulk_stats_reverse_map(
bulk_stats[0], 'net')
self.assertTrue(indexes)
@expandPermutations
class NetworkStatsTests(VmStatsTestCase):
# TODO: grab them from the schema
_EXPECTED_KEYS = (
'macAddr',
'name',
'speed',
'state',
'rxErrors',
'rxDropped',
'txErrors',
'txDropped',
'rx',
'tx',
'sampleTime',
)
def test_nic_have_all_keys(self):
nic = FakeNic(name='vnet0', model='virtio',
mac_addr='00:1a:4a:16:01:51')
testvm = FakeVM(nics=(nic,))
stats = vmstats._nic_traffic(
testvm,
nic.name, nic.nicModel, nic.macAddr,
self.bulk_stats, 0,
self.bulk_stats, 0,
self.interval)
self.assertStatsHaveKeys(stats, self._EXPECTED_KEYS)
def test_networks_have_all_keys(self):
nics = (
FakeNic(name='vnet0', model='virtio',
mac_addr='00:1a:4a:16:01:51'),
)
vm = FakeVM(nics=nics)
stats = {}
vmstats.networks(vm, stats,
self.bulk_stats, self.bulk_stats,
self.interval)
self.assertRepeatedStatsHaveKeys(nics, stats['network'],
self._EXPECTED_KEYS)
def test_networks_good_interval(self):
nics = (
FakeNic(name='vnet0', model='virtio',
mac_addr='00:1a:4a:16:01:51'),
)
vm = FakeVM(nics=nics)
stats = {}
self.assertTrue(
vmstats.networks(vm, stats,
self.bulk_stats, self.bulk_stats,
1)
)
@permutations([[-42], [0]])
def test_networks_bad_interval(self, interval):
nics = (
FakeNic(name='vnet0', model='virtio',
mac_addr='00:1a:4a:16:01:51'),
)
vm = FakeVM(nics=nics)
stats = {}
self.assertTrue(
vmstats.networks(vm, stats,
self.bulk_stats, self.bulk_stats,
0) is None
)
@permutations([
['net.0.rx.bytes'], ['net.0.rx.pkts'],
['net.0.rx.errs'], ['net.0.rx.drop'], ['net.0.tx.bytes'],
['net.0.tx.pkts'], ['net.0.tx.errs'], ['net.0.tx.drop'],
])
def test_networks_missing_key(self, key):
nics = (
FakeNic(name='vnet0', model='virtio',
mac_addr='00:1a:4a:16:01:51'),
)
vm = FakeVM(nics=nics)
vm.migrationPending = True
faulty_bulk_stats = {}
faulty_bulk_stats.update(self.bulk_stats)
del faulty_bulk_stats[key]
stats = {}
self.assertTrue(
vmstats.networks(vm, stats,
self.bulk_stats, faulty_bulk_stats,
1)
)
class DiskStatsTests(VmStatsTestCase):
# TODO: grab them from the schema
# Note: these are the minimal set Vdsm exported,
# no clear rationale for this subset.
_EXPECTED_KEYS = (
'truesize',
'apparentsize',
'readLatency',
'writeLatency',
'flushLatency',
'imageID',
# TODO: add test for 'lunGUID'
'readRate',
'writeRate',
'readOps',
'writeOps',
'readBytes',
'writtenBytes',
)
def test_disk_all_keys_present(self):
interval = 10 # seconds
drives = (FakeDrive(name='hdc', size=700 * 1024 * 1024),)
testvm = FakeVM(drives=drives)
stats = {}
stats_before = copy.deepcopy(self.bulk_stats)
stats_after = copy.deepcopy(self.bulk_stats)
_ensure_delta(stats_before, stats_after,
'block.0.rd.reqs', 1024)
_ensure_delta(stats_before, stats_after,
'block.0.rd.bytes', 128 * 1024)
vmstats.disks(testvm, stats,
stats_before, stats_after,
interval)
self.assertRepeatedStatsHaveKeys(drives, stats['disks'],
self._EXPECTED_KEYS)
def test_interval_zero(self):
interval = 0 # seconds
# with zero interval, we won't have {read,write}Rate
expected_keys = tuple(k for k in self._EXPECTED_KEYS
if k not in ('readRate', 'writeRate'))
drives = (FakeDrive(name='hdc', size=700 * 1024 * 1024),)
testvm = FakeVM(drives=drives)
stats = {}
self.assertNotRaises(vmstats.disks,
testvm, stats,
self.bulk_stats, self.bulk_stats,
interval)
self.assertRepeatedStatsHaveKeys(drives,
stats['disks'],
expected_keys)
def test_disk_missing_rate(self):
partial_stats = self._drop_stats(
('block.0.rd.bytes', 'block.1.rd.bytes',
'block.0.wr.bytes', 'block.1.wr.bytes'))
interval = 10 # seconds
drives = (FakeDrive(name='hdc', size=700 * 1024 * 1024),)
testvm = FakeVM(drives=drives)
stats = {}
self.assertNotRaises(vmstats.disks,
testvm, stats,
partial_stats, partial_stats,
interval)
def test_disk_missing_latency(self):
partial_stats = self._drop_stats(
('block.0.rd.times', 'block.1.rd.times',
'block.0.wr.reqs', 'block.1.wr.reqs'))
interval = 10 # seconds
drives = (FakeDrive(name='hdc', size=700 * 1024 * 1024),)
testvm = FakeVM(drives=drives)
stats = {}
self.assertNotRaises(vmstats.disks,
testvm, stats,
partial_stats, partial_stats,
interval)
def _drop_stats(self, keys):
partial_stats = copy.deepcopy(self.bulk_stats)
for key in keys:
del partial_stats[key]
return partial_stats
FIRST_CPU_SAMPLE = {'cpu.user': 4740000000, 'cpu.system': 6490000000}
LAST_CPU_SAMPLE = {'cpu.user': 4760000000, 'cpu.system': 6500000000}
@expandPermutations
class CpuStatsTests(VmStatsTestCase):
# all data stolen from Vdsm and/or virsh -r domstats
INTERVAL = 15. # seconds.
# [first, last]
# intentionally use only one sample, the other empty
@permutations([[{}, {}],
[{}, FIRST_CPU_SAMPLE],
[FIRST_CPU_SAMPLE, {}]])
def test_empty_samples(self, first, last):
stats = {}
res = vmstats.cpu(stats, {}, {}, self.INTERVAL)
self.assertEqual(stats,
{'cpuUser': 0.0, 'cpuSys': 0.0})
self.assertEqual(res, None)
def test_only_cpu_user_system(self):
stats = {}
res = vmstats.cpu(stats, FIRST_CPU_SAMPLE, LAST_CPU_SAMPLE,
self.INTERVAL)
self.assertEqual(stats, {
'cpuUser': 0.0,
'cpuSys': 0.2,
'cpuUsage': '11260000000',
})
self.assertEqual(res, None)
def test_update_all_keys(self):
stats = {}
first_sample = {'cpu.time': 24345584838}
first_sample.update(FIRST_CPU_SAMPLE)
last_sample = {'cpu.time': 24478198023}
last_sample.update(LAST_CPU_SAMPLE)
res = vmstats.cpu(stats, first_sample, last_sample,
self.INTERVAL)
self.assertEqual(stats, {
'cpuUser': 0.6840879,
'cpuSys': 0.2,
'cpuUsage': '11260000000',
})
self.assertNotEquals(res, None)
# helpers
def _ensure_delta(stats_before, stats_after, key, delta):
"""
Set stats_before[key] and stats_after[key] so that
stats_after[key] - stats_before[key] == abs(delta).
"""
stats_before[key] = 0
stats_after[key] = abs(delta)
class FakeNic(object):
def __init__(self, name, model, mac_addr):
self.name = name
self.nicModel = model
self.macAddr = mac_addr
class FakeDrive(object):
def __init__(self, name, size):
self.name = name
self.apparentsize = size
self.truesize = size
self.GUID = str(uuid.uuid4())
self.imageID = str(uuid.uuid4())
self.domainID = str(uuid.uuid4())
self.poolID = str(uuid.uuid4())
self.volumeID = str(uuid.uuid4())
def __contains__(self, item):
# isVdsmImage support
return item in ('imageID', 'domainID', 'poolID', 'volumeID')
class FakeVM(object):
def __init__(self, nics=None, drives=None):
self.id = str(uuid.uuid4())
self.nics = nics if nics is not None else []
self.drives = drives if drives is not None else []
self.migrationPending = False
@property
def monitorable(self):
return not self.migrationPending
def getNicDevices(self):
return self.nics
def getDiskDevices(self):
return self.drives
| gpl-2.0 |
rsvip/Django | django/conf/locale/hr/formats.py | 504 | 2106 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. E Y.'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. E Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M:%S.%f', # '25.10.2006. 14:30:59.000200'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M:%S.%f', # '25.10.06. 14:30:59.000200'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30'
'%d.%m.%y.', # '25.10.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M:%S.%f', # '25. 10. 2006. 14:30:59.000200'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M:%S.%f', # '25. 10. 06. 14:30:59.000200'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
pankajk/MasterThesis | Code/Graph_Kernels/SKG/KroneckerGenerator.py | 1 | 2725 | import numpy as np
import networkx as nx
import math
import random
def convert(something):#use networkx conversion from numpy array
#g = nx.from_numpy_matrix(someNPMat)
g = nx.to_networkx_graph(something)
return g
def deleteSelfLoops(graph, nNodes): #used to take away self loops in final graph for stat purposes
nNodes = int(nNodes)
for i in range(nNodes):
for j in range(nNodes):
if(i == j):
graph[i, j] = 0
return graph
def generateStochasticKron(initMat, k, deleteSelfLoopsForStats=False, directed=False, customEdges=False, edges=0):
initN = initMat.getNumNodes()
nNodes = math.pow(initN, k)#get final size and make empty 'kroned' matrix
mtxDim = initMat.getNumNodes()
mtxSum = initMat.getMtxSum()
if(customEdges == True):
nEdges = edges
if(nEdges > (nNodes*nNodes)):
raise ValueError("More edges than possible with number of Nodes")
else:
nEdges = math.pow(mtxSum, k) #get number of predicted edges
collisions = 0
print ("Edges: ")
print (nEdges)
print ("Nodes: ")
print (nNodes)
#create vector for recursive matrix probability
probToRCPosV = []
cumProb = 0.0
for i in range(mtxDim):
for j in range(mtxDim):
prob = initMat.getValue(i, j)
if(prob > 0.0):
cumProb += prob
probToRCPosV.append((cumProb/mtxSum, i, j))
#print "Prob Vector Value:" #testing
#print cumProb/mtxSum #testing
#add Nodes
finalGraph = np.zeros((nNodes, nNodes))
#add Edges
e = 0
#print nEdges #testing
while(e < nEdges):
rng = nNodes
row = 0
col = 0
for t in range(k):
prob = random.uniform(0, 1)
#print "prob:" #testing
#print prob #testing
n = 0
while(prob > probToRCPosV[n][0]):
n += 1
mrow = probToRCPosV[n][1]
mcol = probToRCPosV[n][2]
rng /= mtxDim
row += mrow * rng
col += mcol * rng
if(finalGraph[row, col] == 0): #if there is no edge
finalGraph[row, col] = 1
e += 1
if(not directed): #symmetry if not directed
if(row != col):
finalGraph[col, row] = 1
e += 1
else:
collisions += 1
print ("Collisions: ")
print (collisions) #testing
#delete self loops if needed for stats
if(deleteSelfLoopsForStats):
finalGraph = deleteSelfLoops(finalGraph, nNodes)
finalGraph = convert(finalGraph)
return finalGraph
| mit |
Livit/Livit.Learn.EdX | common/test/acceptance/tests/discussion/test_discussion.py | 7 | 56480 | """
Tests for discussion pages
"""
import datetime
from uuid import uuid4
from flaky import flaky
from nose.plugins.attrib import attr
from pytz import UTC
from .helpers import BaseDiscussionTestCase
from ..helpers import UniqueCourseTest
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.discussion import (
DiscussionTabSingleThreadPage,
InlineDiscussionPage,
InlineDiscussionThreadPage,
DiscussionUserProfilePage,
DiscussionTabHomePage,
DiscussionSortPreferencePage,
)
from ...pages.lms.learner_profile import LearnerProfilePage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ...fixtures.discussion import (
SingleThreadViewFixture,
UserProfileViewFixture,
SearchResultFixture,
Thread,
Response,
Comment,
SearchResult,
MultipleThreadFixture)
from .helpers import BaseDiscussionMixin
THREAD_CONTENT_WITH_LATEX = """Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
\n\n----------\n\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur. (b).\n\n
**(a)** $H_1(e^{j\\omega}) = \\sum_{n=-\\infty}^{\\infty}h_1[n]e^{-j\\omega n} =
\\sum_{n=-\\infty} ^{\\infty}h[n]e^{-j\\omega n}+\\delta_2e^{-j\\omega n_0}$
$= H(e^{j\\omega})+\\delta_2e^{-j\\omega n_0}=A_e (e^{j\\omega}) e^{-j\\omega n_0}
+\\delta_2e^{-j\\omega n_0}=e^{-j\\omega n_0} (A_e(e^{j\\omega})+\\delta_2)
$H_3(e^{j\\omega})=A_e(e^{j\\omega})+\\delta_2$. Dummy $A_e(e^{j\\omega})$ dummy post $.
$A_e(e^{j\\omega}) \\ge -\\delta_2$, it follows that $H_3(e^{j\\omega})$ is real and
$H_3(e^{j\\omega})\\ge 0$.\n\n**(b)** Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.\n\n
**Case 1:** If $re^{j\\theta}$ is a Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
\n\n**Case 3:** Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem $H_3(e^{j\\omega}) = P(cos\\omega)(cos\\omega - cos\\theta)^k$,
Lorem Lorem Lorem Lorem Lorem Lorem $P(cos\\omega)$ has no
$(cos\\omega - cos\\theta)$ factor.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
$P(cos\\theta) \\neq 0$. Since $P(cos\\omega)$ this is a dummy data post $\\omega$,
dummy $\\delta > 0$ such that for all $\\omega$ dummy $|\\omega - \\theta|
< \\delta$, $P(cos\\omega)$ Lorem ipsum dolor sit amet, consectetur adipiscing elit,
sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim
veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo
consequat. Duis aute irure dolor in reprehenderit in voluptate velit sse cillum dolore
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
"""
class DiscussionResponsePaginationTestMixin(BaseDiscussionMixin):
"""
A mixin containing tests for response pagination for use by both inline
discussion and the discussion tab
"""
def assert_response_display_correct(self, response_total, displayed_responses):
"""
Assert that various aspects of the display of responses are all correct:
* Text indicating total number of responses
* Presence of "Add a response" button
* Number of responses actually displayed
* Presence and text of indicator of how many responses are shown
* Presence and text of button to load more responses
"""
self.assertEqual(
self.thread_page.get_response_total_text(),
str(response_total) + " responses"
)
self.assertEqual(self.thread_page.has_add_response_button(), response_total != 0)
self.assertEqual(self.thread_page.get_num_displayed_responses(), displayed_responses)
self.assertEqual(
self.thread_page.get_shown_responses_text(),
(
None if response_total == 0 else
"Showing all responses" if response_total == displayed_responses else
"Showing first {} responses".format(displayed_responses)
)
)
self.assertEqual(
self.thread_page.get_load_responses_button_text(),
(
None if response_total == displayed_responses else
"Load all responses" if response_total - displayed_responses < 100 else
"Load next 100 responses"
)
)
def test_pagination_no_responses(self):
self.setup_thread(0)
self.assert_response_display_correct(0, 0)
def test_pagination_few_responses(self):
self.setup_thread(5)
self.assert_response_display_correct(5, 5)
def test_pagination_two_response_pages(self):
self.setup_thread(50)
self.assert_response_display_correct(50, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(50, 50)
def test_pagination_exactly_two_response_pages(self):
self.setup_thread(125)
self.assert_response_display_correct(125, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(125, 125)
def test_pagination_three_response_pages(self):
self.setup_thread(150)
self.assert_response_display_correct(150, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(150, 125)
self.thread_page.load_more_responses()
self.assert_response_display_correct(150, 150)
def test_add_response_button(self):
self.setup_thread(5)
self.assertTrue(self.thread_page.has_add_response_button())
self.thread_page.click_add_response_button()
def test_add_response_button_closed_thread(self):
self.setup_thread(5, closed=True)
self.assertFalse(self.thread_page.has_add_response_button())
@attr('shard_2')
class DiscussionHomePageTest(UniqueCourseTest):
"""
Tests for the discussion home page.
"""
SEARCHED_USERNAME = "gizmo"
def setUp(self):
super(DiscussionHomePageTest, self).setUp()
CourseFixture(**self.course_info).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.page = DiscussionTabHomePage(self.browser, self.course_id)
self.page.visit()
def test_new_post_button(self):
"""
Scenario: I can create new posts from the Discussion home page.
Given that I am on the Discussion home page
When I click on the 'New Post' button
Then I should be shown the new post form
"""
self.assertIsNotNone(self.page.new_post_button)
self.page.click_new_post_button()
self.assertIsNotNone(self.page.new_post_form)
@attr('a11y')
def test_page_accessibility(self):
self.page.a11y_audit.config.set_rules({
"ignore": [
'section', # TODO: AC-491
'color-contrast', # TNL-4635
'link-href', # TNL-4636
'icon-aria-hidden', # TNL-4637
]
})
self.page.a11y_audit.check_for_accessibility_errors()
@attr('shard_2')
class DiscussionTabSingleThreadTest(BaseDiscussionTestCase, DiscussionResponsePaginationTestMixin):
"""
Tests for the discussion page displaying a single thread
"""
def setUp(self):
super(DiscussionTabSingleThreadTest, self).setUp()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def setup_thread_page(self, thread_id):
self.thread_page = self.create_single_thread_page(thread_id) # pylint: disable=attribute-defined-outside-init
self.thread_page.visit()
def test_mathjax_rendering(self):
thread_id = "test_thread_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(
id=thread_id,
body=THREAD_CONTENT_WITH_LATEX,
commentable_id=self.discussion_id,
thread_type="discussion"
)
)
thread_fixture.push()
self.setup_thread_page(thread_id)
self.assertTrue(self.thread_page.is_discussion_body_visible())
self.thread_page.verify_mathjax_preview_available()
self.thread_page.verify_mathjax_rendered()
def test_markdown_reference_link(self):
"""
Check markdown editor renders reference link correctly
and colon(:) in reference link is not converted to %3a
"""
sample_link = "http://example.com/colon:test"
thread_content = """[enter link description here][1]\n[1]: http://example.com/colon:test"""
thread_id = "test_thread_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(
id=thread_id,
body=thread_content,
commentable_id=self.discussion_id,
thread_type="discussion"
)
)
thread_fixture.push()
self.setup_thread_page(thread_id)
self.assertEqual(self.thread_page.get_link_href(), sample_link)
def test_marked_answer_comments(self):
thread_id = "test_thread_{}".format(uuid4().hex)
response_id = "test_response_{}".format(uuid4().hex)
comment_id = "test_comment_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(id=thread_id, commentable_id=self.discussion_id, thread_type="question")
)
thread_fixture.addResponse(
Response(id=response_id, endorsed=True),
[Comment(id=comment_id)]
)
thread_fixture.push()
self.setup_thread_page(thread_id)
self.assertFalse(self.thread_page.is_comment_visible(comment_id))
self.assertFalse(self.thread_page.is_add_comment_visible(response_id))
self.assertTrue(self.thread_page.is_show_comments_visible(response_id))
self.thread_page.show_comments(response_id)
self.assertTrue(self.thread_page.is_comment_visible(comment_id))
self.assertTrue(self.thread_page.is_add_comment_visible(response_id))
self.assertFalse(self.thread_page.is_show_comments_visible(response_id))
@attr('shard_2')
class DiscussionTabMultipleThreadTest(BaseDiscussionTestCase):
"""
Tests for the discussion page with multiple threads
"""
def setUp(self):
super(DiscussionTabMultipleThreadTest, self).setUp()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.thread_count = 2
self.thread_ids = []
self.setup_multiple_threads(thread_count=self.thread_count)
self.thread_page_1 = DiscussionTabSingleThreadPage(
self.browser,
self.course_id,
self.discussion_id,
self.thread_ids[0]
)
self.thread_page_2 = DiscussionTabSingleThreadPage(
self.browser,
self.course_id,
self.discussion_id,
self.thread_ids[1]
)
self.thread_page_1.visit()
def setup_multiple_threads(self, thread_count):
threads = []
for i in range(thread_count):
thread_id = "test_thread_{}_{}".format(i, uuid4().hex)
thread_body = "Dummy Long text body." * 50
threads.append(
Thread(id=thread_id, commentable_id=self.discussion_id, body=thread_body),
)
self.thread_ids.append(thread_id)
view = MultipleThreadFixture(threads)
view.push()
def test_page_scroll_on_thread_change_view(self):
"""
Check switching between threads changes the page focus
"""
# verify threads are rendered on the page
self.assertTrue(
self.thread_page_1.check_threads_rendered_successfully(thread_count=self.thread_count)
)
# From the thread_page_1 open & verify next thread
self.thread_page_1.click_and_open_thread(thread_id=self.thread_ids[1])
self.assertTrue(self.thread_page_2.is_browser_on_page())
# Verify that the focus is changed
self.thread_page_2.check_focus_is_set(selector=".discussion-article")
@attr('a11y')
def test_page_accessibility(self):
self.thread_page_1.a11y_audit.config.set_rules({
"ignore": [
'section', # TODO: AC-491
'aria-valid-attr-value', # TNL-4638
'color-contrast', # TNL-4639
'link-href', # TNL-4640
'icon-aria-hidden', # TNL-4641
]
})
self.thread_page_1.a11y_audit.check_for_accessibility_errors()
self.thread_page_2.a11y_audit.config.set_rules({
"ignore": [
'section', # TODO: AC-491
'aria-valid-attr-value', # TNL-4638
'color-contrast', # TNL-4639
'link-href', # TNL-4640
'icon-aria-hidden', # TNL-4641
]
})
self.thread_page_2.a11y_audit.check_for_accessibility_errors()
@attr('shard_2')
class DiscussionOpenClosedThreadTest(BaseDiscussionTestCase):
"""
Tests for checking the display of attributes on open and closed threads
"""
def setUp(self):
super(DiscussionOpenClosedThreadTest, self).setUp()
self.thread_id = "test_thread_{}".format(uuid4().hex)
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self, **thread_kwargs):
thread_kwargs.update({'commentable_id': self.discussion_id})
view = SingleThreadViewFixture(
Thread(id=self.thread_id, **thread_kwargs)
)
view.addResponse(Response(id="response1"))
view.push()
def setup_openclosed_thread_page(self, closed=False):
self.setup_user(roles=['Moderator'])
if closed:
self.setup_view(closed=True)
else:
self.setup_view()
page = self.create_single_thread_page(self.thread_id)
page.visit()
page.close_open_thread()
return page
def test_originally_open_thread_vote_display(self):
page = self.setup_openclosed_thread_page()
self.assertFalse(page.is_element_visible('.thread-main-wrapper .action-vote'))
self.assertTrue(page.is_element_visible('.thread-main-wrapper .display-vote'))
self.assertFalse(page.is_element_visible('.response_response1 .action-vote'))
self.assertTrue(page.is_element_visible('.response_response1 .display-vote'))
def test_originally_closed_thread_vote_display(self):
page = self.setup_openclosed_thread_page(True)
self.assertTrue(page.is_element_visible('.thread-main-wrapper .action-vote'))
self.assertFalse(page.is_element_visible('.thread-main-wrapper .display-vote'))
self.assertTrue(page.is_element_visible('.response_response1 .action-vote'))
self.assertFalse(page.is_element_visible('.response_response1 .display-vote'))
@attr('a11y')
def test_page_accessibility(self):
page = self.setup_openclosed_thread_page()
page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'aria-valid-attr-value', # TNL-4643
'color-contrast', # TNL-4644
'link-href', # TNL-4640
'icon-aria-hidden', # TNL-4645
]
})
page.a11y_audit.check_for_accessibility_errors()
page = self.setup_openclosed_thread_page(True)
page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'aria-valid-attr-value', # TNL-4643
'color-contrast', # TNL-4644
'link-href', # TNL-4640
'icon-aria-hidden', # TNL-4645
]
})
page.a11y_audit.check_for_accessibility_errors()
@attr('shard_2')
class DiscussionCommentDeletionTest(BaseDiscussionTestCase):
"""
Tests for deleting comments displayed beneath responses in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="comment_deletion_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response1"), [
Comment(id="comment_other_author"),
Comment(id="comment_self_author", user_id=self.user_id, thread_id="comment_deletion_test_thread")
]
)
view.push()
def test_comment_deletion_as_student(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_deletion_test_thread")
page.visit()
self.assertTrue(page.is_comment_deletable("comment_self_author"))
self.assertTrue(page.is_comment_visible("comment_other_author"))
self.assertFalse(page.is_comment_deletable("comment_other_author"))
page.delete_comment("comment_self_author")
def test_comment_deletion_as_moderator(self):
self.setup_user(roles=['Moderator'])
self.setup_view()
page = self.create_single_thread_page("comment_deletion_test_thread")
page.visit()
self.assertTrue(page.is_comment_deletable("comment_self_author"))
self.assertTrue(page.is_comment_deletable("comment_other_author"))
page.delete_comment("comment_self_author")
page.delete_comment("comment_other_author")
@attr('shard_2')
class DiscussionResponseEditTest(BaseDiscussionTestCase):
"""
Tests for editing responses displayed beneath thread in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="response_edit_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response_other_author", user_id="other", thread_id="response_edit_test_thread"),
)
view.addResponse(
Response(id="response_self_author", user_id=self.user_id, thread_id="response_edit_test_thread"),
)
view.push()
def edit_response(self, page, response_id):
self.assertTrue(page.is_response_editable(response_id))
page.start_response_edit(response_id)
new_response = "edited body"
page.set_response_editor_value(response_id, new_response)
page.submit_response_edit(response_id, new_response)
def test_edit_response_add_link(self):
"""
Scenario: User submits valid input to the 'add link' form
Given I am editing a response on a discussion page
When I click the 'add link' icon in the editor toolbar
And enter a valid url to the URL input field
And enter a valid string in the Description input field
And click the 'OK' button
Then the edited response should contain the new link
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
response_id = "response_self_author"
url = "http://example.com"
description = "example"
page.start_response_edit(response_id)
page.set_response_editor_value(response_id, "")
page.add_content_via_editor_button(
"link", response_id, url, description)
page.submit_response_edit(response_id, description)
expected_response_html = (
'<p><a href="{}">{}</a></p>'.format(url, description)
)
actual_response_html = page.q(
css=".response_{} .response-body".format(response_id)
).html[0]
self.assertEqual(expected_response_html, actual_response_html)
def test_edit_response_add_image(self):
"""
Scenario: User submits valid input to the 'add image' form
Given I am editing a response on a discussion page
When I click the 'add image' icon in the editor toolbar
And enter a valid url to the URL input field
And enter a valid string in the Description input field
And click the 'OK' button
Then the edited response should contain the new image
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
response_id = "response_self_author"
url = "http://www.example.com/something.png"
description = "image from example.com"
page.start_response_edit(response_id)
page.set_response_editor_value(response_id, "")
page.add_content_via_editor_button(
"image", response_id, url, description)
page.submit_response_edit(response_id, '')
expected_response_html = (
'<p><img src="{}" alt="{}" title=""></p>'.format(url, description)
)
actual_response_html = page.q(
css=".response_{} .response-body".format(response_id)
).html[0]
self.assertEqual(expected_response_html, actual_response_html)
def test_edit_response_add_image_error_msg(self):
"""
Scenario: User submits invalid input to the 'add image' form
Given I am editing a response on a discussion page
When I click the 'add image' icon in the editor toolbar
And enter an invalid url to the URL input field
And enter an empty string in the Description input field
And click the 'OK' button
Then I should be shown 2 error messages
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
page.start_response_edit("response_self_author")
page.add_content_via_editor_button(
"image", "response_self_author", '', '')
page.verify_link_editor_error_messages_shown()
def test_edit_response_add_decorative_image(self):
"""
Scenario: User submits invalid input to the 'add image' form
Given I am editing a response on a discussion page
When I click the 'add image' icon in the editor toolbar
And enter a valid url to the URL input field
And enter an empty string in the Description input field
And I check the 'image is decorative' checkbox
And click the 'OK' button
Then the edited response should contain the new image
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
response_id = "response_self_author"
url = "http://www.example.com/something.png"
description = ""
page.start_response_edit(response_id)
page.set_response_editor_value(response_id, "Some content")
page.add_content_via_editor_button(
"image", response_id, url, description, is_decorative=True)
page.submit_response_edit(response_id, "Some content")
expected_response_html = (
'<p>Some content<img src="{}" alt="{}" title=""></p>'.format(
url, description)
)
actual_response_html = page.q(
css=".response_{} .response-body".format(response_id)
).html[0]
self.assertEqual(expected_response_html, actual_response_html)
def test_edit_response_add_link_error_msg(self):
"""
Scenario: User submits invalid input to the 'add link' form
Given I am editing a response on a discussion page
When I click the 'add link' icon in the editor toolbar
And enter an invalid url to the URL input field
And enter an empty string in the Description input field
And click the 'OK' button
Then I should be shown 2 error messages
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
page.start_response_edit("response_self_author")
page.add_content_via_editor_button(
"link", "response_self_author", '', '')
page.verify_link_editor_error_messages_shown()
def test_edit_response_as_student(self):
"""
Scenario: Students should be able to edit the response they created not responses of other users
Given that I am on discussion page with student logged in
When I try to edit the response created by student
Then the response should be edited and rendered successfully
And responses from other users should be shown over there
And the student should be able to edit the response of other people
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.assertTrue(page.is_response_visible("response_other_author"))
self.assertFalse(page.is_response_editable("response_other_author"))
self.edit_response(page, "response_self_author")
def test_edit_response_as_moderator(self):
"""
Scenario: Moderator should be able to edit the response they created and responses of other users
Given that I am on discussion page with moderator logged in
When I try to edit the response created by moderator
Then the response should be edited and rendered successfully
And I try to edit the response created by other users
Then the response should be edited and rendered successfully
"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.edit_response(page, "response_self_author")
self.edit_response(page, "response_other_author")
def test_vote_report_endorse_after_edit(self):
"""
Scenario: Moderator should be able to vote, report or endorse after editing the response.
Given that I am on discussion page with moderator logged in
When I try to edit the response created by moderator
Then the response should be edited and rendered successfully
And I try to edit the response created by other users
Then the response should be edited and rendered successfully
And I try to vote the response created by moderator
Then the response should not be able to be voted
And I try to vote the response created by other users
Then the response should be voted successfully
And I try to report the response created by moderator
Then the response should not be able to be reported
And I try to report the response created by other users
Then the response should be reported successfully
And I try to endorse the response created by moderator
Then the response should be endorsed successfully
And I try to endorse the response created by other users
Then the response should be endorsed successfully
"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.edit_response(page, "response_self_author")
self.edit_response(page, "response_other_author")
page.cannot_vote_response('response_self_author')
page.vote_response('response_other_author')
page.cannot_report_response('response_self_author')
page.report_response('response_other_author')
page.endorse_response('response_self_author')
page.endorse_response('response_other_author')
@attr('a11y')
def test_page_accessibility(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'aria-valid-attr-value', # TNL-4638
'color-contrast', # TNL-4644
'link-href', # TNL-4640
'icon-aria-hidden', # TNL-4645
'duplicate-id', # TNL-4647
]
})
page.visit()
page.a11y_audit.check_for_accessibility_errors()
@attr('shard_2')
class DiscussionCommentEditTest(BaseDiscussionTestCase):
"""
Tests for editing comments displayed beneath responses in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="comment_edit_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response1"),
[Comment(id="comment_other_author", user_id="other"), Comment(id="comment_self_author", user_id=self.user_id)])
view.push()
def edit_comment(self, page, comment_id):
page.start_comment_edit(comment_id)
new_comment = "edited body"
page.set_comment_editor_value(comment_id, new_comment)
page.submit_comment_edit(comment_id, new_comment)
def test_edit_comment_as_student(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_visible("comment_other_author"))
self.assertFalse(page.is_comment_editable("comment_other_author"))
self.edit_comment(page, "comment_self_author")
def test_edit_comment_as_moderator(self):
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_editable("comment_other_author"))
self.edit_comment(page, "comment_self_author")
self.edit_comment(page, "comment_other_author")
def test_cancel_comment_edit(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
page.set_comment_editor_value("comment_self_author", "edited body")
page.cancel_comment_edit("comment_self_author", original_body)
def test_editor_visibility(self):
"""Only one editor should be visible at a time within a single response"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_editable("comment_other_author"))
self.assertTrue(page.is_add_comment_visible("response1"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
self.assertFalse(page.is_add_comment_visible("response1"))
self.assertTrue(page.is_comment_editor_visible("comment_self_author"))
page.set_comment_editor_value("comment_self_author", "edited body")
page.start_comment_edit("comment_other_author")
self.assertFalse(page.is_comment_editor_visible("comment_self_author"))
self.assertTrue(page.is_comment_editor_visible("comment_other_author"))
self.assertEqual(page.get_comment_body("comment_self_author"), original_body)
page.start_response_edit("response1")
self.assertFalse(page.is_comment_editor_visible("comment_other_author"))
self.assertTrue(page.is_response_editor_visible("response1"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
self.assertFalse(page.is_response_editor_visible("response1"))
self.assertTrue(page.is_comment_editor_visible("comment_self_author"))
page.cancel_comment_edit("comment_self_author", original_body)
self.assertFalse(page.is_comment_editor_visible("comment_self_author"))
self.assertTrue(page.is_add_comment_visible("response1"))
@attr('a11y')
def test_page_accessibility(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'aria-valid-attr-value', # TNL-4643
'color-contrast', # TNL-4644
'link-href', # TNL-4640
'icon-aria-hidden', # TNL-4645
]
})
page.a11y_audit.check_for_accessibility_errors()
@attr('shard_2')
class InlineDiscussionTest(UniqueCourseTest, DiscussionResponsePaginationTestMixin):
"""
Tests for inline discussions
"""
def setUp(self):
super(InlineDiscussionTest, self).setUp()
self.thread_ids = []
self.discussion_id = "test_discussion_{}".format(uuid4().hex)
self.additional_discussion_id = "test_discussion_{}".format(uuid4().hex)
self.course_fix = CourseFixture(**self.course_info).add_children(
XBlockFixtureDesc("chapter", "Test Section").add_children(
XBlockFixtureDesc("sequential", "Test Subsection").add_children(
XBlockFixtureDesc("vertical", "Test Unit").add_children(
XBlockFixtureDesc(
"discussion",
"Test Discussion",
metadata={"discussion_id": self.discussion_id}
),
XBlockFixtureDesc(
"discussion",
"Test Discussion 1",
metadata={"discussion_id": self.additional_discussion_id}
)
)
)
)
).install()
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id).visit().get_user_id()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.courseware_page.visit()
self.discussion_page = InlineDiscussionPage(self.browser, self.discussion_id)
self.additional_discussion_page = InlineDiscussionPage(self.browser, self.additional_discussion_id)
def setup_thread_page(self, thread_id):
self.discussion_page.expand_discussion()
self.assertEqual(self.discussion_page.get_num_displayed_threads(), 1)
self.thread_page = InlineDiscussionThreadPage(self.browser, thread_id) # pylint: disable=attribute-defined-outside-init
self.thread_page.expand()
def setup_multiple_inline_threads(self, thread_count):
"""
Set up multiple treads on the page by passing 'thread_count'
"""
threads = []
for i in range(thread_count):
thread_id = "test_thread_{}_{}".format(i, uuid4().hex)
threads.append(
Thread(id=thread_id, commentable_id=self.discussion_id),
)
self.thread_ids.append(thread_id)
thread_fixture = MultipleThreadFixture(threads)
thread_fixture.add_response(
Response(id="response1"),
[Comment(id="comment1", user_id="other"), Comment(id="comment2", user_id=self.user_id)],
threads[0]
)
thread_fixture.push()
def test_page_while_expanding_inline_discussion(self):
"""
Tests for the Inline Discussion page with multiple treads. Page should not focus 'thread-wrapper'
after loading responses.
"""
self.setup_multiple_inline_threads(thread_count=3)
self.discussion_page.expand_discussion()
thread_page = InlineDiscussionThreadPage(self.browser, self.thread_ids[0])
thread_page.expand()
# Check if 'thread-wrapper' is focused after expanding thread
self.assertFalse(thread_page.check_if_selector_is_focused(selector='.thread-wrapper'))
def test_initial_render(self):
self.assertFalse(self.discussion_page.is_discussion_expanded())
def test_expand_discussion_empty(self):
self.discussion_page.expand_discussion()
self.assertEqual(self.discussion_page.get_num_displayed_threads(), 0)
def check_anonymous_to_peers(self, is_staff):
thread = Thread(id=uuid4().hex, anonymous_to_peers=True, commentable_id=self.discussion_id)
thread_fixture = SingleThreadViewFixture(thread)
thread_fixture.push()
self.setup_thread_page(thread.get("id"))
self.assertEqual(self.thread_page.is_thread_anonymous(), not is_staff)
def test_anonymous_to_peers_threads_as_staff(self):
AutoAuthPage(self.browser, course_id=self.course_id, roles="Administrator").visit()
self.courseware_page.visit()
self.check_anonymous_to_peers(True)
def test_anonymous_to_peers_threads_as_peer(self):
self.check_anonymous_to_peers(False)
def test_discussion_blackout_period(self):
now = datetime.datetime.now(UTC)
self.course_fix.add_advanced_settings(
{
u"discussion_blackouts": {
"value": [
[
(now - datetime.timedelta(days=14)).isoformat(),
(now + datetime.timedelta(days=2)).isoformat()
]
]
}
}
)
self.course_fix._add_advanced_settings()
self.browser.refresh()
thread = Thread(id=uuid4().hex, commentable_id=self.discussion_id)
thread_fixture = SingleThreadViewFixture(thread)
thread_fixture.addResponse(
Response(id="response1"),
[Comment(id="comment1", user_id="other"), Comment(id="comment2", user_id=self.user_id)])
thread_fixture.push()
self.setup_thread_page(thread.get("id"))
self.assertFalse(self.discussion_page.element_exists(".new-post-btn"))
self.assertFalse(self.thread_page.has_add_response_button())
self.assertFalse(self.thread_page.is_response_editable("response1"))
self.assertFalse(self.thread_page.is_add_comment_visible("response1"))
self.assertFalse(self.thread_page.is_comment_editable("comment1"))
self.assertFalse(self.thread_page.is_comment_editable("comment2"))
self.assertFalse(self.thread_page.is_comment_deletable("comment1"))
self.assertFalse(self.thread_page.is_comment_deletable("comment2"))
def test_dual_discussion_xblock(self):
"""
Scenario: Two discussion xblocks in one unit shouldn't override their actions
Given that I'm on courseware page where there are two inline discussion
When I click on one discussion xblock new post button
Then it should add new post form of that xblock in DOM
And I should be shown new post form of that xblock
And I shouldn't be shown second discussion xblock new post form
And I click on second discussion xblock new post button
Then it should add new post form of second xblock in DOM
And I should be shown second discussion new post form
And I shouldn't be shown first discussion xblock new post form
And I have two new post form in the DOM
When I click back on first xblock new post button
And I should be shown new post form of that xblock
And I shouldn't be shown second discussion xblock new post form
"""
self.discussion_page.wait_for_page()
self.additional_discussion_page.wait_for_page()
self.discussion_page.click_new_post_button()
with self.discussion_page.handle_alert():
self.discussion_page.click_cancel_new_post()
self.additional_discussion_page.click_new_post_button()
self.assertFalse(self.discussion_page._is_element_visible(".new-post-article"))
with self.additional_discussion_page.handle_alert():
self.additional_discussion_page.click_cancel_new_post()
self.discussion_page.click_new_post_button()
self.assertFalse(self.additional_discussion_page._is_element_visible(".new-post-article"))
@attr('shard_2')
class DiscussionUserProfileTest(UniqueCourseTest):
"""
Tests for user profile page in discussion tab.
"""
PAGE_SIZE = 20 # django_comment_client.forum.views.THREADS_PER_PAGE
PROFILED_USERNAME = "profiled-user"
def setUp(self):
super(DiscussionUserProfileTest, self).setUp()
CourseFixture(**self.course_info).install()
# The following line creates a user enrolled in our course, whose
# threads will be viewed, but not the one who will view the page.
# It isn't necessary to log them in, but using the AutoAuthPage
# saves a lot of code.
self.profiled_user_id = AutoAuthPage(
self.browser,
username=self.PROFILED_USERNAME,
course_id=self.course_id
).visit().get_user_id()
# now create a second user who will view the profile.
self.user_id = AutoAuthPage(
self.browser,
course_id=self.course_id
).visit().get_user_id()
def check_pages(self, num_threads):
# set up the stub server to return the desired amount of thread results
threads = [Thread(id=uuid4().hex) for _ in range(num_threads)]
UserProfileViewFixture(threads).push()
# navigate to default view (page 1)
page = DiscussionUserProfilePage(
self.browser,
self.course_id,
self.profiled_user_id,
self.PROFILED_USERNAME
)
page.visit()
current_page = 1
total_pages = max(num_threads - 1, 1) / self.PAGE_SIZE + 1
all_pages = range(1, total_pages + 1)
return page
def _check_page():
# ensure the page being displayed as "current" is the expected one
self.assertEqual(page.get_current_page(), current_page)
# ensure the expected threads are being shown in the right order
threads_expected = threads[(current_page - 1) * self.PAGE_SIZE:current_page * self.PAGE_SIZE]
self.assertEqual(page.get_shown_thread_ids(), [t["id"] for t in threads_expected])
# ensure the clickable page numbers are the expected ones
self.assertEqual(page.get_clickable_pages(), [
p for p in all_pages
if p != current_page
and p - 2 <= current_page <= p + 2
or (current_page > 2 and p == 1)
or (current_page < total_pages and p == total_pages)
])
# ensure the previous button is shown, but only if it should be.
# when it is shown, make sure it works.
if current_page > 1:
self.assertTrue(page.is_prev_button_shown(current_page - 1))
page.click_prev_page()
self.assertEqual(page.get_current_page(), current_page - 1)
page.click_next_page()
self.assertEqual(page.get_current_page(), current_page)
else:
self.assertFalse(page.is_prev_button_shown())
# ensure the next button is shown, but only if it should be.
if current_page < total_pages:
self.assertTrue(page.is_next_button_shown(current_page + 1))
else:
self.assertFalse(page.is_next_button_shown())
# click all the way up through each page
for i in range(current_page, total_pages):
_check_page()
if current_page < total_pages:
page.click_on_page(current_page + 1)
current_page += 1
# click all the way back down
for i in range(current_page, 0, -1):
_check_page()
if current_page > 1:
page.click_on_page(current_page - 1)
current_page -= 1
def test_0_threads(self):
self.check_pages(0)
def test_1_thread(self):
self.check_pages(1)
def test_20_threads(self):
self.check_pages(20)
def test_21_threads(self):
self.check_pages(21)
def test_151_threads(self):
self.check_pages(151)
def test_pagination_window_reposition(self):
page = self.check_pages(50)
page.click_next_page()
page.wait_for_ajax()
self.assertTrue(page.is_window_on_top())
def test_redirects_to_learner_profile(self):
"""
Scenario: Verify that learner-profile link is present on forum discussions page and we can navigate to it.
Given that I am on discussion forum user's profile page.
And I can see a username on left sidebar
When I click on my username.
Then I will be navigated to Learner Profile page.
And I can my username on Learner Profile page
"""
learner_profile_page = LearnerProfilePage(self.browser, self.PROFILED_USERNAME)
page = self.check_pages(1)
page.click_on_sidebar_username()
learner_profile_page.wait_for_page()
self.assertTrue(learner_profile_page.field_is_visible('username'))
@attr('shard_2')
class DiscussionSearchAlertTest(UniqueCourseTest):
"""
Tests for spawning and dismissing alerts related to user search actions and their results.
"""
SEARCHED_USERNAME = "gizmo"
def setUp(self):
super(DiscussionSearchAlertTest, self).setUp()
CourseFixture(**self.course_info).install()
# first auto auth call sets up a user that we will search for in some tests
self.searched_user_id = AutoAuthPage(
self.browser,
username=self.SEARCHED_USERNAME,
course_id=self.course_id
).visit().get_user_id()
# this auto auth call creates the actual session user
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.page = DiscussionTabHomePage(self.browser, self.course_id)
self.page.visit()
def setup_corrected_text(self, text):
SearchResultFixture(SearchResult(corrected_text=text)).push()
def check_search_alert_messages(self, expected):
actual = self.page.get_search_alert_messages()
self.assertTrue(all(map(lambda msg, sub: msg.lower().find(sub.lower()) >= 0, actual, expected)))
def test_no_rewrite(self):
self.setup_corrected_text(None)
self.page.perform_search()
self.check_search_alert_messages(["no threads"])
def test_rewrite_dismiss(self):
self.setup_corrected_text("foo")
self.page.perform_search()
self.check_search_alert_messages(["foo"])
self.page.dismiss_alert_message("foo")
self.check_search_alert_messages([])
def test_new_search(self):
self.setup_corrected_text("foo")
self.page.perform_search()
self.check_search_alert_messages(["foo"])
self.setup_corrected_text("bar")
self.page.perform_search()
self.check_search_alert_messages(["bar"])
self.setup_corrected_text(None)
self.page.perform_search()
self.check_search_alert_messages(["no threads"])
def test_rewrite_and_user(self):
self.setup_corrected_text("foo")
self.page.perform_search(self.SEARCHED_USERNAME)
self.check_search_alert_messages(["foo", self.SEARCHED_USERNAME])
def test_user_only(self):
self.setup_corrected_text(None)
self.page.perform_search(self.SEARCHED_USERNAME)
self.check_search_alert_messages(["no threads", self.SEARCHED_USERNAME])
# make sure clicking the link leads to the user profile page
UserProfileViewFixture([]).push()
self.page.get_search_alert_links().first.click()
DiscussionUserProfilePage(
self.browser,
self.course_id,
self.searched_user_id,
self.SEARCHED_USERNAME
).wait_for_page()
@attr('a11y')
def test_page_accessibility(self):
self.page.a11y_audit.config.set_rules({
'ignore': [
'section', # TODO: AC-491
'color-contrast', # TNL-4639
'link-href', # TNL-4640
'icon-aria-hidden', # TNL-4641
]
})
self.page.a11y_audit.check_for_accessibility_errors()
@attr('shard_2')
class DiscussionSortPreferenceTest(UniqueCourseTest):
"""
Tests for the discussion page displaying a single thread.
"""
def setUp(self):
super(DiscussionSortPreferenceTest, self).setUp()
# Create a course to register for.
CourseFixture(**self.course_info).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.sort_page = DiscussionSortPreferencePage(self.browser, self.course_id)
self.sort_page.visit()
def test_default_sort_preference(self):
"""
Test to check the default sorting preference of user. (Default = date )
"""
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, "activity")
def test_change_sort_preference(self):
"""
Test that if user sorting preference is changing properly.
"""
selected_sort = ""
for sort_type in ["votes", "comments", "activity"]:
self.assertNotEqual(selected_sort, sort_type)
self.sort_page.change_sort_preference(sort_type)
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
def test_last_preference_saved(self):
"""
Test that user last preference is saved.
"""
selected_sort = ""
for sort_type in ["votes", "comments", "activity"]:
self.assertNotEqual(selected_sort, sort_type)
self.sort_page.change_sort_preference(sort_type)
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
self.sort_page.refresh_page()
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
| agpl-3.0 |
aequitas/home-assistant | homeassistant/components/emulated_hue/hue_api.py | 2 | 20285 | """Support for a Hue API to control Home Assistant."""
import logging
from aiohttp import web
from homeassistant import core
from homeassistant.components import (
climate, cover, fan, light, media_player, scene, script)
from homeassistant.components.climate.const import (
SERVICE_SET_TEMPERATURE, SUPPORT_TARGET_TEMPERATURE)
from homeassistant.components.cover import (
ATTR_CURRENT_POSITION, ATTR_POSITION, SERVICE_SET_COVER_POSITION,
SUPPORT_SET_POSITION)
from homeassistant.components.fan import (
ATTR_SPEED, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, SPEED_OFF,
SUPPORT_SET_SPEED)
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.const import KEY_REAL_IP
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_HS_COLOR, SUPPORT_BRIGHTNESS, SUPPORT_COLOR)
from homeassistant.components.media_player.const import (
ATTR_MEDIA_VOLUME_LEVEL, SUPPORT_VOLUME_SET)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_SUPPORTED_FEATURES, ATTR_TEMPERATURE,
HTTP_BAD_REQUEST, HTTP_NOT_FOUND, SERVICE_CLOSE_COVER, SERVICE_OPEN_COVER,
SERVICE_TURN_OFF, SERVICE_TURN_ON, SERVICE_VOLUME_SET, STATE_OFF, STATE_ON)
from homeassistant.util.network import is_local
_LOGGER = logging.getLogger(__name__)
HUE_API_STATE_ON = 'on'
HUE_API_STATE_BRI = 'bri'
HUE_API_STATE_HUE = 'hue'
HUE_API_STATE_SAT = 'sat'
HUE_API_STATE_HUE_MAX = 65535.0
HUE_API_STATE_SAT_MAX = 254.0
HUE_API_STATE_BRI_MAX = 255.0
STATE_BRIGHTNESS = HUE_API_STATE_BRI
STATE_HUE = HUE_API_STATE_HUE
STATE_SATURATION = HUE_API_STATE_SAT
class HueUsernameView(HomeAssistantView):
"""Handle requests to create a username for the emulated hue bridge."""
url = '/api'
name = 'emulated_hue:api:create_username'
extra_urls = ['/api/']
requires_auth = False
async def post(self, request):
"""Handle a POST request."""
try:
data = await request.json()
except ValueError:
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
if 'devicetype' not in data:
return self.json_message('devicetype not specified',
HTTP_BAD_REQUEST)
if not is_local(request[KEY_REAL_IP]):
return self.json_message('only local IPs allowed',
HTTP_BAD_REQUEST)
return self.json([{'success': {'username': '12345678901234567890'}}])
class HueAllGroupsStateView(HomeAssistantView):
"""Group handler."""
url = '/api/{username}/groups'
name = 'emulated_hue:all_groups:state'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
@core.callback
def get(self, request, username):
"""Process a request to make the Brilliant Lightpad work."""
if not is_local(request[KEY_REAL_IP]):
return self.json_message('only local IPs allowed',
HTTP_BAD_REQUEST)
return self.json({
})
class HueGroupView(HomeAssistantView):
"""Group handler to get Logitech Pop working."""
url = '/api/{username}/groups/0/action'
name = 'emulated_hue:groups:state'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
@core.callback
def put(self, request, username):
"""Process a request to make the Logitech Pop working."""
if not is_local(request[KEY_REAL_IP]):
return self.json_message('only local IPs allowed',
HTTP_BAD_REQUEST)
return self.json([{
'error': {
'address': '/groups/0/action/scene',
'type': 7,
'description': 'invalid value, dummy for parameter, scene'
}
}])
class HueAllLightsStateView(HomeAssistantView):
"""Handle requests for getting and setting info about entities."""
url = '/api/{username}/lights'
name = 'emulated_hue:lights:state'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
@core.callback
def get(self, request, username):
"""Process a request to get the list of available lights."""
if not is_local(request[KEY_REAL_IP]):
return self.json_message('only local IPs allowed',
HTTP_BAD_REQUEST)
hass = request.app['hass']
json_response = {}
for entity in hass.states.async_all():
if self.config.is_entity_exposed(entity):
state = get_entity_state(self.config, entity)
number = self.config.entity_id_to_number(entity.entity_id)
json_response[number] = entity_to_json(self.config,
entity, state)
return self.json(json_response)
class HueOneLightStateView(HomeAssistantView):
"""Handle requests for getting and setting info about entities."""
url = '/api/{username}/lights/{entity_id}'
name = 'emulated_hue:light:state'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
@core.callback
def get(self, request, username, entity_id):
"""Process a request to get the state of an individual light."""
if not is_local(request[KEY_REAL_IP]):
return self.json_message('only local IPs allowed',
HTTP_BAD_REQUEST)
hass = request.app['hass']
entity_id = self.config.number_to_entity_id(entity_id)
entity = hass.states.get(entity_id)
if entity is None:
_LOGGER.error('Entity not found: %s', entity_id)
return web.Response(text="Entity not found", status=404)
if not self.config.is_entity_exposed(entity):
_LOGGER.error('Entity not exposed: %s', entity_id)
return web.Response(text="Entity not exposed", status=404)
state = get_entity_state(self.config, entity)
json_response = entity_to_json(self.config, entity, state)
return self.json(json_response)
class HueOneLightChangeView(HomeAssistantView):
"""Handle requests for getting and setting info about entities."""
url = '/api/{username}/lights/{entity_number}/state'
name = 'emulated_hue:light:state'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
async def put(self, request, username, entity_number):
"""Process a request to set the state of an individual light."""
if not is_local(request[KEY_REAL_IP]):
return self.json_message('only local IPs allowed',
HTTP_BAD_REQUEST)
config = self.config
hass = request.app['hass']
entity_id = config.number_to_entity_id(entity_number)
if entity_id is None:
_LOGGER.error('Unknown entity number: %s', entity_number)
return self.json_message('Entity not found', HTTP_NOT_FOUND)
entity = hass.states.get(entity_id)
if entity is None:
_LOGGER.error('Entity not found: %s', entity_id)
return self.json_message('Entity not found', HTTP_NOT_FOUND)
if not config.is_entity_exposed(entity):
_LOGGER.error('Entity not exposed: %s', entity_id)
return web.Response(text="Entity not exposed", status=404)
try:
request_json = await request.json()
except ValueError:
_LOGGER.error('Received invalid json')
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
# Parse the request into requested "on" status and brightness
parsed = parse_hue_api_put_light_body(request_json, entity)
if parsed is None:
_LOGGER.error('Unable to parse data: %s', request_json)
return web.Response(text="Bad request", status=400)
# Choose general HA domain
domain = core.DOMAIN
# Entity needs separate call to turn on
turn_on_needed = False
# Convert the resulting "on" status into the service we need to call
service = SERVICE_TURN_ON if parsed[STATE_ON] else SERVICE_TURN_OFF
# Construct what we need to send to the service
data = {ATTR_ENTITY_ID: entity_id}
# Make sure the entity actually supports brightness
entity_features = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if entity.domain == light.DOMAIN:
if parsed[STATE_ON]:
if entity_features & SUPPORT_BRIGHTNESS:
if parsed[STATE_BRIGHTNESS] is not None:
data[ATTR_BRIGHTNESS] = parsed[STATE_BRIGHTNESS]
if entity_features & SUPPORT_COLOR:
if parsed[STATE_HUE] is not None:
if parsed[STATE_SATURATION]:
sat = parsed[STATE_SATURATION]
else:
sat = 0
hue = parsed[STATE_HUE]
# Convert hs values to hass hs values
sat = int((sat / HUE_API_STATE_SAT_MAX) * 100)
hue = int((hue / HUE_API_STATE_HUE_MAX) * 360)
data[ATTR_HS_COLOR] = (hue, sat)
# If the requested entity is a script add some variables
elif entity.domain == script.DOMAIN:
data['variables'] = {
'requested_state': STATE_ON if parsed[STATE_ON] else STATE_OFF
}
if parsed[STATE_BRIGHTNESS] is not None:
data['variables']['requested_level'] = parsed[STATE_BRIGHTNESS]
# If the requested entity is a climate, set the temperature
elif entity.domain == climate.DOMAIN:
# We don't support turning climate devices on or off,
# only setting the temperature
service = None
if entity_features & SUPPORT_TARGET_TEMPERATURE:
if parsed[STATE_BRIGHTNESS] is not None:
domain = entity.domain
service = SERVICE_SET_TEMPERATURE
data[ATTR_TEMPERATURE] = parsed[STATE_BRIGHTNESS]
# If the requested entity is a media player, convert to volume
elif entity.domain == media_player.DOMAIN:
if entity_features & SUPPORT_VOLUME_SET:
if parsed[STATE_BRIGHTNESS] is not None:
turn_on_needed = True
domain = entity.domain
service = SERVICE_VOLUME_SET
# Convert 0-100 to 0.0-1.0
data[ATTR_MEDIA_VOLUME_LEVEL] = \
parsed[STATE_BRIGHTNESS] / 100.0
# If the requested entity is a cover, convert to open_cover/close_cover
elif entity.domain == cover.DOMAIN:
domain = entity.domain
if service == SERVICE_TURN_ON:
service = SERVICE_OPEN_COVER
else:
service = SERVICE_CLOSE_COVER
if entity_features & SUPPORT_SET_POSITION:
if parsed[STATE_BRIGHTNESS] is not None:
domain = entity.domain
service = SERVICE_SET_COVER_POSITION
data[ATTR_POSITION] = parsed[STATE_BRIGHTNESS]
# If the requested entity is a fan, convert to speed
elif entity.domain == fan.DOMAIN:
if entity_features & SUPPORT_SET_SPEED:
if parsed[STATE_BRIGHTNESS] is not None:
domain = entity.domain
# Convert 0-100 to a fan speed
brightness = parsed[STATE_BRIGHTNESS]
if brightness == 0:
data[ATTR_SPEED] = SPEED_OFF
elif 0 < brightness <= 33.3:
data[ATTR_SPEED] = SPEED_LOW
elif 33.3 < brightness <= 66.6:
data[ATTR_SPEED] = SPEED_MEDIUM
elif 66.6 < brightness <= 100:
data[ATTR_SPEED] = SPEED_HIGH
if entity.domain in config.off_maps_to_on_domains:
# Map the off command to on
service = SERVICE_TURN_ON
# Caching is required because things like scripts and scenes won't
# report as "off" to Alexa if an "off" command is received, because
# they'll map to "on". Thus, instead of reporting its actual
# status, we report what Alexa will want to see, which is the same
# as the actual requested command.
config.cached_states[entity_id] = parsed
# Separate call to turn on needed
if turn_on_needed:
hass.async_create_task(hass.services.async_call(
core.DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: entity_id},
blocking=True))
if service is not None:
hass.async_create_task(hass.services.async_call(
domain, service, data, blocking=True))
json_response = \
[create_hue_success_response(
entity_id, HUE_API_STATE_ON, parsed[STATE_ON])]
if parsed[STATE_BRIGHTNESS] is not None:
json_response.append(create_hue_success_response(
entity_id, HUE_API_STATE_BRI, parsed[STATE_BRIGHTNESS]))
if parsed[STATE_HUE] is not None:
json_response.append(create_hue_success_response(
entity_id, HUE_API_STATE_HUE, parsed[STATE_HUE]))
if parsed[STATE_SATURATION] is not None:
json_response.append(create_hue_success_response(
entity_id, HUE_API_STATE_SAT, parsed[STATE_SATURATION]))
return self.json(json_response)
def parse_hue_api_put_light_body(request_json, entity):
"""Parse the body of a request to change the state of a light."""
data = {
STATE_BRIGHTNESS: None,
STATE_HUE: None,
STATE_ON: False,
STATE_SATURATION: None,
}
# Make sure the entity actually supports brightness
entity_features = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if HUE_API_STATE_ON in request_json:
if not isinstance(request_json[HUE_API_STATE_ON], bool):
return None
if request_json[HUE_API_STATE_ON]:
# Echo requested device be turned on
data[STATE_BRIGHTNESS] = None
data[STATE_ON] = True
else:
# Echo requested device be turned off
data[STATE_BRIGHTNESS] = None
data[STATE_ON] = False
if HUE_API_STATE_HUE in request_json:
try:
# Clamp brightness from 0 to 65535
data[STATE_HUE] = \
max(0, min(int(request_json[HUE_API_STATE_HUE]),
HUE_API_STATE_HUE_MAX))
except ValueError:
return None
if HUE_API_STATE_SAT in request_json:
try:
# Clamp saturation from 0 to 254
data[STATE_SATURATION] = \
max(0, min(int(request_json[HUE_API_STATE_SAT]),
HUE_API_STATE_SAT_MAX))
except ValueError:
return None
if HUE_API_STATE_BRI in request_json:
try:
# Clamp brightness from 0 to 255
data[STATE_BRIGHTNESS] = \
max(0, min(int(request_json[HUE_API_STATE_BRI]),
HUE_API_STATE_BRI_MAX))
except ValueError:
return None
if entity.domain == light.DOMAIN:
data[STATE_ON] = (data[STATE_BRIGHTNESS] > 0)
if not entity_features & SUPPORT_BRIGHTNESS:
data[STATE_BRIGHTNESS] = None
elif entity.domain == scene.DOMAIN:
data[STATE_BRIGHTNESS] = None
data[STATE_ON] = True
elif entity.domain in [
script.DOMAIN, media_player.DOMAIN,
fan.DOMAIN, cover.DOMAIN, climate.DOMAIN]:
# Convert 0-255 to 0-100
level = (data[STATE_BRIGHTNESS] / HUE_API_STATE_BRI_MAX) * 100
data[STATE_BRIGHTNESS] = round(level)
data[STATE_ON] = True
return data
def get_entity_state(config, entity):
"""Retrieve and convert state and brightness values for an entity."""
cached_state = config.cached_states.get(entity.entity_id, None)
data = {
STATE_BRIGHTNESS: None,
STATE_HUE: None,
STATE_ON: False,
STATE_SATURATION: None
}
if cached_state is None:
data[STATE_ON] = entity.state != STATE_OFF
if data[STATE_ON]:
data[STATE_BRIGHTNESS] = entity.attributes.get(ATTR_BRIGHTNESS, 0)
hue_sat = entity.attributes.get(ATTR_HS_COLOR, None)
if hue_sat is not None:
hue = hue_sat[0]
sat = hue_sat[1]
# convert hass hs values back to hue hs values
data[STATE_HUE] = int((hue / 360.0) * HUE_API_STATE_HUE_MAX)
data[STATE_SATURATION] = \
int((sat / 100.0) * HUE_API_STATE_SAT_MAX)
else:
data[STATE_BRIGHTNESS] = 0
data[STATE_HUE] = 0
data[STATE_SATURATION] = 0
# Make sure the entity actually supports brightness
entity_features = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if entity.domain == light.DOMAIN:
if entity_features & SUPPORT_BRIGHTNESS:
pass
elif entity.domain == climate.DOMAIN:
temperature = entity.attributes.get(ATTR_TEMPERATURE, 0)
# Convert 0-100 to 0-255
data[STATE_BRIGHTNESS] = round(temperature * 255 / 100)
elif entity.domain == media_player.DOMAIN:
level = entity.attributes.get(
ATTR_MEDIA_VOLUME_LEVEL, 1.0 if data[STATE_ON] else 0.0)
# Convert 0.0-1.0 to 0-255
data[STATE_BRIGHTNESS] = \
round(min(1.0, level) * HUE_API_STATE_BRI_MAX)
elif entity.domain == fan.DOMAIN:
speed = entity.attributes.get(ATTR_SPEED, 0)
# Convert 0.0-1.0 to 0-255
data[STATE_BRIGHTNESS] = 0
if speed == SPEED_LOW:
data[STATE_BRIGHTNESS] = 85
elif speed == SPEED_MEDIUM:
data[STATE_BRIGHTNESS] = 170
elif speed == SPEED_HIGH:
data[STATE_BRIGHTNESS] = 255
elif entity.domain == cover.DOMAIN:
level = entity.attributes.get(ATTR_CURRENT_POSITION, 0)
data[STATE_BRIGHTNESS] = round(level / 100 * HUE_API_STATE_BRI_MAX)
else:
data = cached_state
# Make sure brightness is valid
if data[STATE_BRIGHTNESS] is None:
data[STATE_BRIGHTNESS] = 255 if data[STATE_ON] else 0
# Make sure hue/saturation are valid
if (data[STATE_HUE] is None) or (data[STATE_SATURATION] is None):
data[STATE_HUE] = 0
data[STATE_SATURATION] = 0
# If the light is off, set the color to off
if data[STATE_BRIGHTNESS] == 0:
data[STATE_HUE] = 0
data[STATE_SATURATION] = 0
return data
def entity_to_json(config, entity, state):
"""Convert an entity to its Hue bridge JSON representation."""
return {
'state':
{
HUE_API_STATE_ON: state[STATE_ON],
HUE_API_STATE_BRI: state[STATE_BRIGHTNESS],
HUE_API_STATE_HUE: state[STATE_HUE],
HUE_API_STATE_SAT: state[STATE_SATURATION],
'reachable': True
},
'type': 'Dimmable light',
'name': config.get_entity_name(entity),
'modelid': 'HASS123',
'uniqueid': entity.entity_id,
'swversion': '123'
}
def create_hue_success_response(entity_id, attr, value):
"""Create a success response for an attribute set on a light."""
success_key = '/lights/{}/state/{}'.format(entity_id, attr)
return {'success': {success_key: value}}
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.