repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
prashantas/MyDataScience | GeneralPython/PyDataStructure/multiValuedDict.py | 1 | 1163 | #http://code.activestate.com/recipes/52219-associating-multiple-values-with-each-key-in-a-dic/
#https://www.oreilly.com/library/view/python-cookbook/0596001673/ch01s06.html
def repeatedValue():
# this method allows duplicate values for the same key
d = dict()
# To add a key->value pair, do this:
#d.setdefault(key, []).append(value)
d.setdefault('a',[]).append('apple')
d.setdefault('b',[]).append('ball')
d.setdefault('c',[]).append('cat')
d.setdefault('a',[]).append('aeroplane')
d.setdefault('a',[]).append('anar')
d.setdefault('b',[]).append('balloon')
#aval = d['a']
print(d)
d['a'].remove('apple')
print(d)
def nonRepeatedValue():
example = {}
example.setdefault('a', set()).add('apple')
example.setdefault('b', set()).add('ball')
example.setdefault('a', set()).add('ant')
#example.setdefault('a', set()).add('apple')
#example.setdefault('c', {})['cat']=1
#example.setdefault('a', {})['ant']=1
#example.setdefault('a', {})['apple']=1
print(example)
d = example['a']
print(d)
if __name__ == '__main__':
#repeatedValue()
nonRepeatedValue()
| bsd-2-clause |
redhat-openstack/ironic | ironic/tests/common/test_raid.py | 2 | 11093 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from ironic.common import exception
from ironic.common import raid
from ironic.drivers import base as drivers_base
from ironic.tests import base
from ironic.tests.db import base as db_base
from ironic.tests.objects import utils as obj_utils
from ironic.tests import raid_constants
class ValidateRaidConfigurationTestCase(base.TestCase):
def setUp(self):
with open(drivers_base.RAID_CONFIG_SCHEMA, 'r') as raid_schema_fobj:
self.schema = json.load(raid_schema_fobj)
super(ValidateRaidConfigurationTestCase, self).setUp()
def test_validate_configuration_okay(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_OKAY)
raid.validate_configuration(
raid_config, raid_config_schema=self.schema)
def test_validate_configuration_no_logical_disk(self):
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
{},
raid_config_schema=self.schema)
def test_validate_configuration_zero_logical_disks(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_NO_LOGICAL_DISKS)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_no_raid_level(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_NO_RAID_LEVEL)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_raid_level(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_INVALID_RAID_LEVEL)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_no_size_gb(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_NO_SIZE_GB)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_max_size_gb(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_MAX_SIZE_GB)
raid.validate_configuration(raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_size_gb(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_INVALID_SIZE_GB)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_is_root_volume(self):
raid_config_str = raid_constants.RAID_CONFIG_INVALID_IS_ROOT_VOL
raid_config = json.loads(raid_config_str)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_multiple_is_root_volume(self):
raid_config_str = raid_constants.RAID_CONFIG_MULTIPLE_IS_ROOT_VOL
raid_config = json.loads(raid_config_str)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_share_physical_disks(self):
raid_config_str = raid_constants.RAID_CONFIG_INVALID_SHARE_PHY_DISKS
raid_config = json.loads(raid_config_str)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_disk_type(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_INVALID_DISK_TYPE)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_int_type(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_INVALID_INT_TYPE)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_number_of_phy_disks(self):
raid_config_str = raid_constants.RAID_CONFIG_INVALID_NUM_PHY_DISKS
raid_config = json.loads(raid_config_str)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_invalid_physical_disks(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_INVALID_PHY_DISKS)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_additional_property(self):
raid_config = json.loads(raid_constants.RAID_CONFIG_ADDITIONAL_PROP)
self.assertRaises(exception.InvalidParameterValue,
raid.validate_configuration,
raid_config,
raid_config_schema=self.schema)
def test_validate_configuration_custom_schema(self):
raid_config = json.loads(raid_constants.CUSTOM_SCHEMA_RAID_CONFIG)
schema = json.loads(raid_constants.CUSTOM_RAID_SCHEMA)
raid.validate_configuration(raid_config,
raid_config_schema=schema)
class RaidPublicMethodsTestCase(db_base.DbTestCase):
def test_get_logical_disk_properties(self):
with open(drivers_base.RAID_CONFIG_SCHEMA, 'r') as raid_schema_fobj:
schema = json.load(raid_schema_fobj)
logical_disk_properties = raid.get_logical_disk_properties(schema)
self.assertIn('raid_level', logical_disk_properties)
self.assertIn('size_gb', logical_disk_properties)
self.assertIn('volume_name', logical_disk_properties)
self.assertIn('is_root_volume', logical_disk_properties)
self.assertIn('share_physical_disks', logical_disk_properties)
self.assertIn('disk_type', logical_disk_properties)
self.assertIn('interface_type', logical_disk_properties)
self.assertIn('number_of_physical_disks', logical_disk_properties)
self.assertIn('controller', logical_disk_properties)
self.assertIn('physical_disks', logical_disk_properties)
def test_get_logical_disk_properties_custom_schema(self):
raid_schema = json.loads(raid_constants.CUSTOM_RAID_SCHEMA)
logical_disk_properties = raid.get_logical_disk_properties(
raid_config_schema=raid_schema)
self.assertIn('raid_level', logical_disk_properties)
self.assertIn('size_gb', logical_disk_properties)
self.assertIn('foo', logical_disk_properties)
def _test_update_raid_info(self, current_config,
capabilities=None):
node = obj_utils.create_test_node(self.context,
driver='fake')
if capabilities:
properties = node.properties
properties['capabilities'] = capabilities
del properties['local_gb']
node.properties = properties
target_raid_config = json.loads(raid_constants.RAID_CONFIG_OKAY)
node.target_raid_config = target_raid_config
node.save()
raid.update_raid_info(node, current_config)
properties = node.properties
current = node.raid_config
target = node.target_raid_config
self.assertIsNotNone(current['last_updated'])
self.assertIsInstance(current['logical_disks'][0], dict)
if current_config['logical_disks'][0].get('is_root_volume'):
self.assertEqual({'wwn': '600508B100'},
properties['root_device'])
self.assertEqual(100, properties['local_gb'])
self.assertIn('raid_level:1', properties['capabilities'])
if capabilities:
self.assertIn(capabilities, properties['capabilities'])
else:
self.assertNotIn('local_gb', properties)
self.assertNotIn('root_device', properties)
if capabilities:
self.assertNotIn('raid_level:1', properties['capabilities'])
# Verify node.target_raid_config is preserved.
self.assertEqual(target_raid_config, target)
def test_update_raid_info_okay(self):
current_config = json.loads(raid_constants.CURRENT_RAID_CONFIG)
self._test_update_raid_info(current_config,
capabilities='boot_mode:bios')
def test_update_raid_info_okay_no_root_volumes(self):
current_config = json.loads(raid_constants.CURRENT_RAID_CONFIG)
del current_config['logical_disks'][0]['is_root_volume']
del current_config['logical_disks'][0]['root_device_hint']
self._test_update_raid_info(current_config,
capabilities='boot_mode:bios')
def test_update_raid_info_okay_current_capabilities_empty(self):
current_config = json.loads(raid_constants.CURRENT_RAID_CONFIG)
self._test_update_raid_info(current_config,
capabilities=None)
def test_update_raid_info_multiple_root_volumes(self):
current_config = json.loads(raid_constants.RAID_CONFIG_MULTIPLE_ROOT)
self.assertRaises(exception.InvalidParameterValue,
self._test_update_raid_info,
current_config)
| apache-2.0 |
zjj/trac_hack | trac/upgrades/db24.py | 5 | 2911 | from trac.db import Table, Column, Index, DatabaseManager
def do_upgrade(env, ver, cursor):
# Change repository key from reponame to a surrogate id
cursor.execute("SELECT id FROM repository "
"UNION SELECT repos AS id FROM revision "
"UNION SELECT repos AS id FROM node_change "
"ORDER BY id")
id_name_list = [(i + 1, name) for i, (name,) in enumerate(cursor)]
cursor.execute("CREATE TEMPORARY TABLE repo_old "
"AS SELECT * FROM repository")
cursor.execute("DROP TABLE repository")
cursor.execute("CREATE TEMPORARY TABLE rev_old "
"AS SELECT * FROM revision")
cursor.execute("DROP TABLE revision")
cursor.execute("CREATE TEMPORARY TABLE nc_old "
"AS SELECT * FROM node_change")
cursor.execute("DROP TABLE node_change")
tables = [Table('repository', key=('id', 'name'))[
Column('id', type='int'),
Column('name'),
Column('value')],
Table('revision', key=('repos', 'rev'))[
Column('repos', type='int'),
Column('rev', key_size=20),
Column('time', type='int'),
Column('author'),
Column('message'),
Index(['repos', 'time'])],
Table('node_change', key=('repos', 'rev', 'path', 'change_type'))[
Column('repos', type='int'),
Column('rev', key_size=20),
Column('path', key_size=255),
Column('node_type', size=1),
Column('change_type', size=1, key_size=2),
Column('base_path'),
Column('base_rev'),
Index(['repos', 'rev'])]]
db_connector, _ = DatabaseManager(env)._get_connector()
for table in tables:
for stmt in db_connector.to_sql(table):
cursor.execute(stmt)
cursor.executemany("INSERT INTO repository (id,name,value) "
"VALUES (%s,'name',%s)", id_name_list)
cursor.executemany("INSERT INTO repository (id,name,value) "
"SELECT %s,name,value FROM repo_old WHERE id=%s",
id_name_list)
cursor.execute("DROP TABLE repo_old")
cursor.executemany("INSERT INTO revision (repos,rev,time,author,message) "
"SELECT %s,rev,time,author,message FROM rev_old "
"WHERE repos=%s", id_name_list)
cursor.execute("DROP TABLE rev_old")
cursor.executemany("INSERT INTO node_change (repos,rev,path,node_type,"
" change_type,base_path,base_rev) "
"SELECT %s,rev,path,node_type,change_type,base_path,"
" base_rev FROM nc_old WHERE repos=%s", id_name_list)
cursor.execute("DROP TABLE nc_old")
| bsd-3-clause |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/matplotlib/testing/compare.py | 11 | 12935 | """
Provides a collection of utilities for comparing (image) results.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import hashlib
import os
import shutil
import numpy as np
import matplotlib
from matplotlib.compat import subprocess
from matplotlib.testing.noseclasses import ImageComparisonFailure
from matplotlib import _png
from matplotlib import _get_cachedir
from matplotlib import cbook
from distutils import version
__all__ = ['compare_float', 'compare_images', 'comparable_formats']
def make_test_filename(fname, purpose):
"""
Make a new filename by inserting `purpose` before the file's
extension.
"""
base, ext = os.path.splitext(fname)
return '%s-%s%s' % (base, purpose, ext)
def compare_float(expected, actual, relTol=None, absTol=None):
"""
Fail if the floating point values are not close enough, with
the given message.
You can specify a relative tolerance, absolute tolerance, or both.
"""
if relTol is None and absTol is None:
raise ValueError("You haven't specified a 'relTol' relative "
"tolerance or a 'absTol' absolute tolerance "
"function argument. You must specify one.")
msg = ""
if absTol is not None:
absDiff = abs(expected - actual)
if absTol < absDiff:
template = ['',
'Expected: {expected}',
'Actual: {actual}',
'Abs diff: {absDiff}',
'Abs tol: {absTol}']
msg += '\n '.join([line.format(**locals()) for line in template])
if relTol is not None:
# The relative difference of the two values. If the expected value is
# zero, then return the absolute value of the difference.
relDiff = abs(expected - actual)
if expected:
relDiff = relDiff / abs(expected)
if relTol < relDiff:
# The relative difference is a ratio, so it's always unit-less.
template = ['',
'Expected: {expected}',
'Actual: {actual}',
'Rel diff: {relDiff}',
'Rel tol: {relTol}']
msg += '\n '.join([line.format(**locals()) for line in template])
return msg or None
def get_cache_dir():
cachedir = _get_cachedir()
if cachedir is None:
raise RuntimeError('Could not find a suitable configuration directory')
cache_dir = os.path.join(cachedir, 'test_cache')
if not os.path.exists(cache_dir):
try:
cbook.mkdirs(cache_dir)
except IOError:
return None
if not os.access(cache_dir, os.W_OK):
return None
return cache_dir
def get_file_hash(path, block_size=2 ** 20):
md5 = hashlib.md5()
with open(path, 'rb') as fd:
while True:
data = fd.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
def make_external_conversion_command(cmd):
def convert(old, new):
cmdline = cmd(old, new)
pipe = subprocess.Popen(
cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
errcode = pipe.wait()
if not os.path.exists(new) or errcode:
msg = "Conversion command failed:\n%s\n" % ' '.join(cmdline)
if stdout:
msg += "Standard output:\n%s\n" % stdout
if stderr:
msg += "Standard error:\n%s\n" % stderr
raise IOError(msg)
return convert
def _update_converter():
gs, gs_v = matplotlib.checkdep_ghostscript()
if gs_v is not None:
cmd = lambda old, new: \
[gs, '-q', '-sDEVICE=png16m', '-dNOPAUSE', '-dBATCH',
'-sOutputFile=' + new, old]
converter['pdf'] = make_external_conversion_command(cmd)
converter['eps'] = make_external_conversion_command(cmd)
if matplotlib.checkdep_inkscape() is not None:
cmd = lambda old, new: \
['inkscape', '-z', old, '--export-png', new]
converter['svg'] = make_external_conversion_command(cmd)
#: A dictionary that maps filename extensions to functions which
#: themselves map arguments `old` and `new` (filenames) to a list of strings.
#: The list can then be passed to Popen to convert files with that
#: extension to png format.
converter = {}
_update_converter()
def comparable_formats():
"""
Returns the list of file formats that compare_images can compare
on this system.
"""
return ['png'] + list(six.iterkeys(converter))
def convert(filename, cache):
"""
Convert the named file into a png file. Returns the name of the
created file.
If *cache* is True, the result of the conversion is cached in
`matplotlib._get_cachedir() + '/test_cache/'`. The caching is based
on a hash of the exact contents of the input file. The is no limit
on the size of the cache, so it may need to be manually cleared
periodically.
"""
base, extension = filename.rsplit('.', 1)
if extension not in converter:
raise ImageComparisonFailure(
"Don't know how to convert %s files to png" % extension)
newname = base + '_' + extension + '.png'
if not os.path.exists(filename):
raise IOError("'%s' does not exist" % filename)
# Only convert the file if the destination doesn't already exist or
# is out of date.
if (not os.path.exists(newname) or
os.stat(newname).st_mtime < os.stat(filename).st_mtime):
if cache:
cache_dir = get_cache_dir()
else:
cache_dir = None
if cache_dir is not None:
hash_value = get_file_hash(filename)
new_ext = os.path.splitext(newname)[1]
cached_file = os.path.join(cache_dir, hash_value + new_ext)
if os.path.exists(cached_file):
shutil.copyfile(cached_file, newname)
return newname
converter[extension](filename, newname)
if cache_dir is not None:
shutil.copyfile(newname, cached_file)
return newname
#: Maps file extensions to a function which takes a filename as its
#: only argument to return a list suitable for execution with Popen.
#: The purpose of this is so that the result file (with the given
#: extension) can be verified with tools such as xmllint for svg.
verifiers = {}
# Turning this off, because it seems to cause multiprocessing issues
if matplotlib.checkdep_xmllint() and False:
verifiers['svg'] = lambda filename: [
'xmllint', '--valid', '--nowarning', '--noout', filename]
def verify(filename):
"""Verify the file through some sort of verification tool."""
if not os.path.exists(filename):
raise IOError("'%s' does not exist" % filename)
base, extension = filename.rsplit('.', 1)
verifier = verifiers.get(extension, None)
if verifier is not None:
cmd = verifier(filename)
pipe = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
errcode = pipe.wait()
if errcode != 0:
msg = "File verification command failed:\n%s\n" % ' '.join(cmd)
if stdout:
msg += "Standard output:\n%s\n" % stdout
if stderr:
msg += "Standard error:\n%s\n" % stderr
raise IOError(msg)
def crop_to_same(actual_path, actual_image, expected_path, expected_image):
# clip the images to the same size -- this is useful only when
# comparing eps to pdf
if actual_path[-7:-4] == 'eps' and expected_path[-7:-4] == 'pdf':
aw, ah = actual_image.shape
ew, eh = expected_image.shape
actual_image = actual_image[int(aw / 2 - ew / 2):int(
aw / 2 + ew / 2), int(ah / 2 - eh / 2):int(ah / 2 + eh / 2)]
return actual_image, expected_image
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
num_values = np.prod(expectedImage.shape)
abs_diff_image = abs(expectedImage - actualImage)
# On Numpy 1.6, we can use bincount with minlength, which is much
# faster than using histogram
expected_version = version.LooseVersion("1.6")
found_version = version.LooseVersion(np.__version__)
if found_version >= expected_version:
histogram = np.bincount(abs_diff_image.ravel(), minlength=256)
else:
histogram = np.histogram(abs_diff_image, bins=np.arange(257))[0]
sum_of_squares = np.sum(histogram * np.arange(len(histogram)) ** 2)
rms = np.sqrt(float(sum_of_squares) / num_values)
return rms
def compare_images(expected, actual, tol, in_decorator=False):
"""
Compare two "image" files checking differences within a tolerance.
The two given filenames may point to files which are convertible to
PNG via the `.converter` dictionary. The underlying RMS is calculated
with the `.calculate_rms` function.
Parameters
----------
expected : str
The filename of the expected image.
actual :str
The filename of the actual image.
tol : float
The tolerance (a color value difference, where 255 is the
maximal difference). The test fails if the average pixel
difference is greater than this value.
in_decorator : bool
If called from image_comparison decorator, this should be
True. (default=False)
Example
-------
img1 = "./baseline/plot.png"
img2 = "./output/plot.png"
compare_images( img1, img2, 0.001 ):
"""
if not os.path.exists(actual):
msg = "Output image %s does not exist." % actual
raise Exception(msg)
if os.stat(actual).st_size == 0:
msg = "Output image file %s is empty." % actual
raise Exception(msg)
verify(actual)
# Convert the image to png
extension = expected.split('.')[-1]
if not os.path.exists(expected):
raise IOError('Baseline image %r does not exist.' % expected)
if extension != 'png':
actual = convert(actual, False)
expected = convert(expected, True)
# open the image files and remove the alpha channel (if it exists)
expectedImage = _png.read_png_int(expected)
actualImage = _png.read_png_int(actual)
expectedImage = expectedImage[:, :, :3]
actualImage = actualImage[:, :, :3]
actualImage, expectedImage = crop_to_same(
actual, actualImage, expected, expectedImage)
# convert to signed integers, so that the images can be subtracted without
# overflow
expectedImage = expectedImage.astype(np.int16)
actualImage = actualImage.astype(np.int16)
rms = calculate_rms(expectedImage, actualImage)
diff_image = make_test_filename(actual, 'failed-diff')
if rms <= tol:
if os.path.exists(diff_image):
os.unlink(diff_image)
return None
save_diff_image(expected, actual, diff_image)
results = dict(rms=rms, expected=str(expected),
actual=str(actual), diff=str(diff_image), tol=tol)
if not in_decorator:
# Then the results should be a string suitable for stdout.
template = ['Error: Image files did not match.',
'RMS Value: {rms}',
'Expected: \n {expected}',
'Actual: \n {actual}',
'Difference:\n {diff}',
'Tolerance: \n {tol}', ]
results = '\n '.join([line.format(**results) for line in template])
return results
def save_diff_image(expected, actual, output):
expectedImage = _png.read_png(expected)
actualImage = _png.read_png(actual)
actualImage, expectedImage = crop_to_same(
actual, actualImage, expected, expectedImage)
expectedImage = np.array(expectedImage).astype(np.float)
actualImage = np.array(actualImage).astype(np.float)
assert expectedImage.ndim == actualImage.ndim
assert expectedImage.shape == actualImage.shape
absDiffImage = abs(expectedImage - actualImage)
# expand differences in luminance domain
absDiffImage *= 255 * 10
save_image_np = np.clip(absDiffImage, 0, 255).astype(np.uint8)
height, width, depth = save_image_np.shape
# The PDF renderer doesn't produce an alpha channel, but the
# matplotlib PNG writer requires one, so expand the array
if depth == 3:
with_alpha = np.empty((height, width, 4), dtype=np.uint8)
with_alpha[:, :, 0:3] = save_image_np
save_image_np = with_alpha
# Hard-code the alpha channel to fully solid
save_image_np[:, :, 3] = 255
_png.write_png(save_image_np.tostring(), width, height, output)
| mit |
ishank08/scikit-learn | sklearn/datasets/tests/test_base.py | 16 | 9390 | import os
import shutil
import tempfile
import warnings
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets import load_wine
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import with_setup
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
# test return_X_y option
X_y_tuple = load_digits(return_X_y=True)
bunch = load_digits()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
assert_equal(len(res.feature_names), 10)
# test return_X_y option
X_y_tuple = load_diabetes(return_X_y=True)
bunch = load_diabetes()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_linnerud(return_X_y=True)
bunch = load_linnerud()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_iris(return_X_y=True)
bunch = load_iris()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_wine():
res = load_wine()
assert_equal(res.data.shape, (178, 13))
assert_equal(res.target.size, 178)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_wine(return_X_y=True)
bunch = load_wine()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_breast_cancer():
res = load_breast_cancer()
assert_equal(res.data.shape, (569, 30))
assert_equal(res.target.size, 569)
assert_equal(res.target_names.size, 2)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_breast_cancer(return_X_y=True)
bunch = load_breast_cancer()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_boston(return_X_y=True)
bunch = load_boston()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
def test_bunch_pickle_generated_with_0_16_and_read_with_0_17():
bunch = Bunch(key='original')
# This reproduces a problem when Bunch pickles have been created
# with scikit-learn 0.16 and are read with 0.17. Basically there
# is a suprising behaviour because reading bunch.key uses
# bunch.__dict__ (which is non empty for 0.16 Bunch objects)
# whereas assigning into bunch.key uses bunch.__setattr__. See
# https://github.com/scikit-learn/scikit-learn/issues/6196 for
# more details
bunch.__dict__['key'] = 'set from __dict__'
bunch_from_pkl = loads(dumps(bunch))
# After loading from pickle the __dict__ should have been ignored
assert_equal(bunch_from_pkl.key, 'original')
assert_equal(bunch_from_pkl['key'], 'original')
# Making sure that changing the attr does change the value
# associated with __getitem__ as well
bunch_from_pkl.key = 'changed'
assert_equal(bunch_from_pkl.key, 'changed')
assert_equal(bunch_from_pkl['key'], 'changed')
def test_bunch_dir():
# check that dir (important for autocomplete) shows attributes
data = load_iris()
assert_true("data" in dir(data))
| bsd-3-clause |
duducosmos/pgs4a | python-install/lib/python2.7/test/test_string.py | 94 | 9159 | import unittest, string
from test import test_support, string_tests
from UserList import UserList
class StringTest(
string_tests.CommonTest,
string_tests.MixinStrStringUserStringTest
):
type2test = str
def checkequal(self, result, object, methodname, *args):
realresult = getattr(string, methodname)(object, *args)
self.assertEqual(
result,
realresult
)
def checkraises(self, exc, object, methodname, *args):
self.assertRaises(
exc,
getattr(string, methodname),
object,
*args
)
def checkcall(self, object, methodname, *args):
getattr(string, methodname)(object, *args)
def test_join(self):
# These are the same checks as in string_test.ObjectTest.test_join
# but the argument order ist different
self.checkequal('a b c d', ['a', 'b', 'c', 'd'], 'join', ' ')
self.checkequal('abcd', ('a', 'b', 'c', 'd'), 'join', '')
self.checkequal('w x y z', string_tests.Sequence(), 'join', ' ')
self.checkequal('abc', ('abc',), 'join', 'a')
self.checkequal('z', UserList(['z']), 'join', 'a')
if test_support.have_unicode:
self.checkequal(unicode('a.b.c'), ['a', 'b', 'c'], 'join', unicode('.'))
self.checkequal(unicode('a.b.c'), [unicode('a'), 'b', 'c'], 'join', '.')
self.checkequal(unicode('a.b.c'), ['a', unicode('b'), 'c'], 'join', '.')
self.checkequal(unicode('a.b.c'), ['a', 'b', unicode('c')], 'join', '.')
self.checkraises(TypeError, ['a', unicode('b'), 3], 'join', '.')
for i in [5, 25, 125]:
self.checkequal(
((('a' * i) + '-') * i)[:-1],
['a' * i] * i, 'join', '-')
self.checkequal(
((('a' * i) + '-') * i)[:-1],
('a' * i,) * i, 'join', '-')
self.checkraises(TypeError, string_tests.BadSeq1(), 'join', ' ')
self.checkequal('a b c', string_tests.BadSeq2(), 'join', ' ')
try:
def f():
yield 4 + ""
self.fixtype(' ').join(f())
except TypeError, e:
if '+' not in str(e):
self.fail('join() ate exception message')
else:
self.fail('exception not raised')
class ModuleTest(unittest.TestCase):
def test_attrs(self):
string.whitespace
string.lowercase
string.uppercase
string.letters
string.digits
string.hexdigits
string.octdigits
string.punctuation
string.printable
def test_atoi(self):
self.assertEqual(string.atoi(" 1 "), 1)
self.assertRaises(ValueError, string.atoi, " 1x")
self.assertRaises(ValueError, string.atoi, " x1 ")
def test_atol(self):
self.assertEqual(string.atol(" 1 "), 1L)
self.assertRaises(ValueError, string.atol, " 1x ")
self.assertRaises(ValueError, string.atol, " x1 ")
def test_atof(self):
self.assertAlmostEqual(string.atof(" 1 "), 1.0)
self.assertRaises(ValueError, string.atof, " 1x ")
self.assertRaises(ValueError, string.atof, " x1 ")
def test_maketrans(self):
transtable = '\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`xyzdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377'
self.assertEqual(string.maketrans('abc', 'xyz'), transtable)
self.assertRaises(ValueError, string.maketrans, 'abc', 'xyzq')
def test_capwords(self):
self.assertEqual(string.capwords('abc def ghi'), 'Abc Def Ghi')
self.assertEqual(string.capwords('abc\tdef\nghi'), 'Abc Def Ghi')
self.assertEqual(string.capwords('abc\t def \nghi'), 'Abc Def Ghi')
self.assertEqual(string.capwords('ABC DEF GHI'), 'Abc Def Ghi')
self.assertEqual(string.capwords('ABC-DEF-GHI', '-'), 'Abc-Def-Ghi')
self.assertEqual(string.capwords('ABC-def DEF-ghi GHI'), 'Abc-def Def-ghi Ghi')
self.assertEqual(string.capwords(' aBc DeF '), 'Abc Def')
self.assertEqual(string.capwords('\taBc\tDeF\t'), 'Abc Def')
self.assertEqual(string.capwords('\taBc\tDeF\t', '\t'), '\tAbc\tDef\t')
def test_formatter(self):
fmt = string.Formatter()
self.assertEqual(fmt.format("foo"), "foo")
self.assertEqual(fmt.format("foo{0}", "bar"), "foobar")
self.assertEqual(fmt.format("foo{1}{0}-{1}", "bar", 6), "foo6bar-6")
self.assertEqual(fmt.format("-{arg!r}-", arg='test'), "-'test'-")
# override get_value ############################################
class NamespaceFormatter(string.Formatter):
def __init__(self, namespace={}):
string.Formatter.__init__(self)
self.namespace = namespace
def get_value(self, key, args, kwds):
if isinstance(key, str):
try:
# Check explicitly passed arguments first
return kwds[key]
except KeyError:
return self.namespace[key]
else:
string.Formatter.get_value(key, args, kwds)
fmt = NamespaceFormatter({'greeting':'hello'})
self.assertEqual(fmt.format("{greeting}, world!"), 'hello, world!')
# override format_field #########################################
class CallFormatter(string.Formatter):
def format_field(self, value, format_spec):
return format(value(), format_spec)
fmt = CallFormatter()
self.assertEqual(fmt.format('*{0}*', lambda : 'result'), '*result*')
# override convert_field ########################################
class XFormatter(string.Formatter):
def convert_field(self, value, conversion):
if conversion == 'x':
return None
return super(XFormatter, self).convert_field(value, conversion)
fmt = XFormatter()
self.assertEqual(fmt.format("{0!r}:{0!x}", 'foo', 'foo'), "'foo':None")
# override parse ################################################
class BarFormatter(string.Formatter):
# returns an iterable that contains tuples of the form:
# (literal_text, field_name, format_spec, conversion)
def parse(self, format_string):
for field in format_string.split('|'):
if field[0] == '+':
# it's markup
field_name, _, format_spec = field[1:].partition(':')
yield '', field_name, format_spec, None
else:
yield field, None, None, None
fmt = BarFormatter()
self.assertEqual(fmt.format('*|+0:^10s|*', 'foo'), '* foo *')
# test all parameters used
class CheckAllUsedFormatter(string.Formatter):
def check_unused_args(self, used_args, args, kwargs):
# Track which arguments actually got used
unused_args = set(kwargs.keys())
unused_args.update(range(0, len(args)))
for arg in used_args:
unused_args.remove(arg)
if unused_args:
raise ValueError("unused arguments")
fmt = CheckAllUsedFormatter()
self.assertEqual(fmt.format("{0}", 10), "10")
self.assertEqual(fmt.format("{0}{i}", 10, i=100), "10100")
self.assertEqual(fmt.format("{0}{i}{1}", 10, 20, i=100), "1010020")
self.assertRaises(ValueError, fmt.format, "{0}{i}{1}", 10, 20, i=100, j=0)
self.assertRaises(ValueError, fmt.format, "{0}", 10, 20)
self.assertRaises(ValueError, fmt.format, "{0}", 10, 20, i=100)
self.assertRaises(ValueError, fmt.format, "{i}", 10, 20, i=100)
# Alternate formatting is not supported
self.assertRaises(ValueError, format, '', '#')
self.assertRaises(ValueError, format, '', '#20')
class BytesAliasTest(unittest.TestCase):
def test_builtin(self):
self.assertTrue(str is bytes)
def test_syntax(self):
self.assertEqual(b"spam", "spam")
self.assertEqual(br"egg\foo", "egg\\foo")
self.assertTrue(type(b""), str)
self.assertTrue(type(br""), str)
def test_main():
test_support.run_unittest(StringTest, ModuleTest, BytesAliasTest)
if __name__ == "__main__":
test_main()
| lgpl-2.1 |
alianmohammad/pd-gem5-latest | src/arch/x86/isa/insts/general_purpose/rotate_and_shift/shift.py | 91 | 6764 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop SAL_R_I
{
slli reg, reg, imm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SAL_M_I
{
ldst t1, seg, sib, disp
slli t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SAL_P_I
{
rdip t7
ldst t1, seg, riprel, disp
slli t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SAL_1_R
{
slli reg, reg, 1, flags=(CF,OF,SF,ZF,PF)
};
def macroop SAL_1_M
{
ldst t1, seg, sib, disp
slli t1, t1, 1, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SAL_1_P
{
rdip t7
ldst t1, seg, riprel, disp
slli t1, t1, 1, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SAL_R_R
{
sll reg, reg, regm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SAL_M_R
{
ldst t1, seg, sib, disp
sll t1, t1, reg, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SAL_P_R
{
rdip t7
ldst t1, seg, riprel, disp
sll t1, t1, reg, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SHLD_R_R
{
mdbi regm, 0
sld reg, reg, rcx, flags=(CF,OF,SF,ZF,PF)
};
def macroop SHLD_M_R
{
ldst t1, seg, sib, disp
mdbi reg, 0
sld t1, t1, rcx, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SHLD_P_R
{
rdip t7
ldst t1, seg, riprel, disp
mdbi reg, 0
sld t1, t1, rcx, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SHLD_R_R_I
{
mdbi regm, 0
sldi reg, reg, imm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SHLD_M_R_I
{
ldst t1, seg, sib, disp
mdbi reg, 0
sldi t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SHLD_P_R_I
{
rdip t7
ldst t1, seg, riprel, disp
mdbi reg, 0
sldi t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SHR_R_I
{
srli reg, reg, imm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SHR_M_I
{
ldst t1, seg, sib, disp
srli t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SHR_P_I
{
rdip t7
ldst t1, seg, riprel, disp
srli t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SHR_1_R
{
srli reg, reg, 1, flags=(CF,OF,SF,ZF,PF)
};
def macroop SHR_1_M
{
ldst t1, seg, sib, disp
srli t1, t1, 1, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SHR_1_P
{
rdip t7
ldst t1, seg, riprel, disp
srli t1, t1, 1, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SHR_R_R
{
srl reg, reg, regm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SHR_M_R
{
ldst t1, seg, sib, disp
srl t1, t1, reg, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SHR_P_R
{
rdip t7
ldst t1, seg, riprel, disp
srl t1, t1, reg, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SHRD_R_R
{
mdbi regm, 0
srd reg, reg, rcx, flags=(CF,OF,SF,ZF,PF)
};
def macroop SHRD_M_R
{
ldst t1, seg, sib, disp
mdbi reg, 0
srd t1, t1, rcx, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SHRD_P_R
{
rdip t7
ldst t1, seg, riprel, disp
mdbi reg, 0
srd t1, t1, rcx, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SHRD_R_R_I
{
mdbi regm, 0
srdi reg, reg, imm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SHRD_M_R_I
{
ldst t1, seg, sib, disp
mdbi reg, 0
srdi t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SHRD_P_R_I
{
rdip t7
ldst t1, seg, riprel, disp
mdbi reg, 0
srdi t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SAR_R_I
{
srai reg, reg, imm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SAR_M_I
{
ldst t1, seg, sib, disp
srai t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SAR_P_I
{
rdip t7
ldst t1, seg, riprel, disp
srai t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SAR_1_R
{
srai reg, reg, 1, flags=(CF,OF,SF,ZF,PF)
};
def macroop SAR_1_M
{
ldst t1, seg, sib, disp
srai t1, t1, 1, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SAR_1_P
{
rdip t7
ldst t1, seg, riprel, disp
srai t1, t1, 1, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SAR_R_R
{
sra reg, reg, regm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SAR_M_R
{
ldst t1, seg, sib, disp
sra t1, t1, reg, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SAR_P_R
{
rdip t7
ldst t1, seg, riprel, disp
sra t1, t1, reg, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
'''
| bsd-3-clause |
Jorge-Rodriguez/ansible | lib/ansible/module_utils/netapp_module.py | 25 | 9559 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2018, Laurent Nicolas <laurentn@netapp.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
''' Support class for NetApp ansible modules '''
import ansible.module_utils.netapp as netapp_utils
def cmp(a, b):
"""
Python 3 does not have a cmp function, this will do the cmp.
:param a: first object to check
:param b: second object to check
:return:
"""
# convert to lower case for string comparison.
if a is None:
return -1
if type(a) is str and type(b) is str:
a = a.lower()
b = b.lower()
# if list has string element, convert string to lower case.
if type(a) is list and type(b) is list:
a = [x.lower() if type(x) is str else x for x in a]
b = [x.lower() if type(x) is str else x for x in b]
a.sort()
b.sort()
return (a > b) - (a < b)
class NetAppModule(object):
'''
Common class for NetApp modules
set of support functions to derive actions based
on the current state of the system, and a desired state
'''
def __init__(self):
self.log = list()
self.changed = False
self.parameters = {'name': 'not intialized'}
self.zapi_string_keys = dict()
self.zapi_bool_keys = dict()
self.zapi_list_keys = dict()
self.zapi_int_keys = dict()
self.zapi_required = dict()
def set_parameters(self, ansible_params):
self.parameters = dict()
for param in ansible_params:
if ansible_params[param] is not None:
self.parameters[param] = ansible_params[param]
return self.parameters
def get_value_for_bool(self, from_zapi, value):
"""
Convert boolean values to string or vice-versa
If from_zapi = True, value is converted from string (as it appears in ZAPI) to boolean
If from_zapi = False, value is converted from boolean to string
For get() method, from_zapi = True
For modify(), create(), from_zapi = False
:param from_zapi: convert the value from ZAPI or to ZAPI acceptable type
:param value: value of the boolean attribute
:return: string or boolean
"""
if value is None:
return None
if from_zapi:
return True if value == 'true' else False
else:
return 'true' if value else 'false'
def get_value_for_int(self, from_zapi, value):
"""
Convert integer values to string or vice-versa
If from_zapi = True, value is converted from string (as it appears in ZAPI) to integer
If from_zapi = False, value is converted from integer to string
For get() method, from_zapi = True
For modify(), create(), from_zapi = False
:param from_zapi: convert the value from ZAPI or to ZAPI acceptable type
:param value: value of the integer attribute
:return: string or integer
"""
if value is None:
return None
if from_zapi:
return int(value)
else:
return str(value)
def get_value_for_list(self, from_zapi, zapi_parent, zapi_child=None, data=None):
"""
Convert a python list() to NaElement or vice-versa
If from_zapi = True, value is converted from NaElement (parent-children structure) to list()
If from_zapi = False, value is converted from list() to NaElement
:param zapi_parent: ZAPI parent key or the ZAPI parent NaElement
:param zapi_child: ZAPI child key
:param data: list() to be converted to NaElement parent-children object
:param from_zapi: convert the value from ZAPI or to ZAPI acceptable type
:return: list() or NaElement
"""
if from_zapi:
if zapi_parent is None:
return []
else:
return [zapi_child.get_content() for zapi_child in zapi_parent.get_children()]
else:
zapi_parent = netapp_utils.zapi.NaElement(zapi_parent)
for item in data:
zapi_parent.add_new_child(zapi_child, item)
return zapi_parent
def get_cd_action(self, current, desired):
''' takes a desired state and a current state, and return an action:
create, delete, None
eg:
is_present = 'absent'
some_object = self.get_object(source)
if some_object is not None:
is_present = 'present'
action = cd_action(current=is_present, desired = self.desired.state())
'''
if 'state' in desired:
desired_state = desired['state']
else:
desired_state = 'present'
if current is None and desired_state == 'absent':
return None
if current is not None and desired_state == 'present':
return None
# change in state
self.changed = True
if current is not None:
return 'delete'
return 'create'
@staticmethod
def check_keys(current, desired):
''' TODO: raise an error if keys do not match
with the exception of:
new_name, state in desired
'''
pass
def get_modified_attributes(self, current, desired, get_list_diff=False):
''' takes two dicts of attributes and return a dict of attributes that are
not in the current state
It is expected that all attributes of interest are listed in current and
desired.
:param: current: current attributes in ONTAP
:param: desired: attributes from playbook
:param: get_list_diff: specifies whether to have a diff of desired list w.r.t current list for an attribute
:return: dict of attributes to be modified
:rtype: dict
NOTE: depending on the attribute, the caller may need to do a modify or a
different operation (eg move volume if the modified attribute is an
aggregate name)
'''
# if the object does not exist, we can't modify it
modified = dict()
if current is None:
return modified
# error out if keys do not match
self.check_keys(current, desired)
# collect changed attributes
for key, value in current.items():
if key in desired and desired[key] is not None:
if type(value) is list:
value.sort()
desired[key].sort()
if cmp(value, desired[key]) != 0:
if not get_list_diff:
modified[key] = desired[key]
else:
modified[key] = [item for item in desired[key] if item not in value]
if modified:
self.changed = True
return modified
def is_rename_action(self, source, target):
''' takes a source and target object, and returns True
if a rename is required
eg:
source = self.get_object(source_name)
target = self.get_object(target_name)
action = is_rename_action(source, target)
:return: None for error, True for rename action, False otherwise
'''
if source is None and target is None:
# error, do nothing
# cannot rename an non existent resource
# alternatively we could create B
return None
if source is not None and target is not None:
# error, do nothing
# idempotency (or) new_name_is_already_in_use
# alternatively we could delete B and rename A to B
return False
if source is None and target is not None:
# do nothing, maybe the rename was already done
return False
# source is not None and target is None:
# rename is in order
self.changed = True
return True
| gpl-3.0 |
timsnyder/bokeh | bokeh/models/callbacks.py | 2 | 5701 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Client-side interactivity.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from types import FunctionType
# External imports
# Bokeh imports
from ..core.has_props import abstract
from ..core.properties import Dict, String, Bool, AnyRef
from ..model import Model
from ..util.dependencies import import_required
from ..util.compiler import nodejs_compile, CompilationError
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Callback',
'OpenURL',
'CustomJS',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@abstract
class Callback(Model):
''' Base class for interactive callback.
'''
class OpenURL(Callback):
''' Open a URL in a new or current tab or window.
'''
url = String("http://", help="""
The URL to direct the web browser to. This can be a template string,
which will be formatted with data from the data source.
""")
same_tab = Bool(False, help="""
Open URL in a new (`False`, default) or current (`True`) tab or window.
For `same_tab=False`, whether tab or window will be opened is browser
dependent.
""")
class CustomJS(Callback):
''' Execute a JavaScript function.
.. warning::
The explicit purpose of this Bokeh Model is to embed *raw JavaScript
code* for a browser to execute. If any part of the code is derived
from untrusted user inputs, then you must take appropriate care to
sanitize the user input prior to passing to Bokeh.
'''
@classmethod
def from_py_func(cls, func):
""" Create a ``CustomJS`` instance from a Python function. The
function is translated to JavaScript using PScript.
"""
from bokeh.util.deprecation import deprecated
deprecated("'from_py_func' is deprecated and will be removed in an eventual 2.0 release. "
"Use CustomJS directly instead.")
if not isinstance(func, FunctionType):
raise ValueError('CustomJS.from_py_func needs function object.')
pscript = import_required('pscript',
'To use Python functions for CustomJS, you need PScript ' +
'("conda install -c conda-forge pscript" or "pip install pscript")')
# Collect default values
default_values = func.__defaults__ # Python 2.6+
default_names = func.__code__.co_varnames[:len(default_values)]
args = dict(zip(default_names, default_values))
args.pop('window', None) # Clear window, so we use the global window object
# Get JS code, we could rip out the function def, or just
# call the function. We do the latter.
code = pscript.py2js(func, 'cb') + 'cb(%s);\n' % ', '.join(default_names)
return cls(code=code, args=args)
@classmethod
def from_coffeescript(cls, code, args={}):
''' Create a ``CustomJS`` instance from CoffeeScript code.
'''
compiled = nodejs_compile(code, lang="coffeescript", file="???")
if "error" in compiled:
raise CompilationError(compiled.error)
else:
return cls(code=compiled.code, args=args)
args = Dict(String, AnyRef, help="""
A mapping of names to Python objects. In particular those can be bokeh's models.
These objects are made available to the callback's code snippet as the values of
named parameters to the callback.
""")
code = String(default="", help="""
A snippet of JavaScript code to execute in the browser. The
code is made into the body of a function, and all of of the named objects in
``args`` are available as parameters that the code can use. Additionally,
a ``cb_obj`` parameter contains the object that triggered the callback
and an optional ``cb_data`` parameter that contains any tool-specific data
(i.e. mouse coordinates and hovered glyph indices for the ``HoverTool``).
.. note:: Use ``CustomJS.from_coffeescript()`` for CoffeeScript source code.
""")
use_strict = Bool(default=False, help="""
Enables or disables automatic insertion of ``"use strict";`` into ``code``.
""")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause |
lucidfrontier45/scikit-learn | sklearn/utils/tests/test_extmath.py | 2 | 8819 | # Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# License: BSD
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_true(np.all(mode == mode_result))
assert_true(np.all(score.ravel() == w[:, :5].sum(1)))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
"""Check that extmath.randomized_svd is consistent with linalg.svd"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_randomized_svd_low_rank_with_noise():
"""Check that extmath.randomized_svd can handle noisy matrices"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
"""Check that extmath.randomized_svd can handle noisy matrices"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
"""Check that transposing the design matrix has limit impact"""
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in xrange(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
"""Check if cartesian product delivers the right results"""
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
| bsd-3-clause |
user-none/calibre | src/calibre/devices/idevice/parse_xml.py | 14 | 11023 | #!/usr/bin/env python2
from __future__ import (unicode_literals, division, absolute_import,
print_function)
"""
https://github.com/ishikawa/python-plist-parser/blob/master/plist_parser.py
A `Property Lists`_ is a data representation used in Apple's Mac OS X as
a convenient way to store standard object types, such as string, number,
boolean, and container object.
This file contains a class ``XmlPropertyListParser`` for parse
a property list file and get back a python native data structure.
:copyright: 2008 by Takanori Ishikawa <takanori.ishikawa@gmail.com>
:license: MIT (See LICENSE file for more details)
.. _Property Lists: http://developer.apple.com/documentation/Cocoa/Conceptual/PropertyLists/
"""
class PropertyListParseError(Exception):
"""Raised when parsing a property list is failed."""
pass
class XmlPropertyListParser(object):
"""
The ``XmlPropertyListParser`` class provides methods that
convert `Property Lists`_ objects from xml format.
Property list objects include ``string``, ``unicode``,
``list``, ``dict``, ``datetime``, and ``int`` or ``float``.
:copyright: 2008 by Takanori Ishikawa <takanori.ishikawa@gmail.com>
:license: MIT License
.. _Property List: http://developer.apple.com/documentation/Cocoa/Conceptual/PropertyLists/
"""
def _assert(self, test, message):
if not test:
raise PropertyListParseError(message)
# ------------------------------------------------
# SAX2: ContentHandler
# ------------------------------------------------
def setDocumentLocator(self, locator):
pass
def startPrefixMapping(self, prefix, uri):
pass
def endPrefixMapping(self, prefix):
pass
def startElementNS(self, name, qname, attrs):
pass
def endElementNS(self, name, qname):
pass
def ignorableWhitespace(self, whitespace):
pass
def processingInstruction(self, target, data):
pass
def skippedEntity(self, name):
pass
def startDocument(self):
self.__stack = []
self.__plist = self.__key = self.__characters = None
# For reducing runtime type checking,
# the parser caches top level object type.
self.__in_dict = False
def endDocument(self):
self._assert(self.__plist is not None, "A top level element must be <plist>.")
self._assert(
len(self.__stack) is 0,
"multiple objects at top level.")
def startElement(self, name, attributes):
if name in XmlPropertyListParser.START_CALLBACKS:
XmlPropertyListParser.START_CALLBACKS[name](self, name, attributes)
if name in XmlPropertyListParser.PARSE_CALLBACKS:
self.__characters = []
def endElement(self, name):
if name in XmlPropertyListParser.END_CALLBACKS:
XmlPropertyListParser.END_CALLBACKS[name](self, name)
if name in XmlPropertyListParser.PARSE_CALLBACKS:
# Creates character string from buffered characters.
content = ''.join(self.__characters)
# For compatibility with ``xml.etree`` and ``plistlib``,
# convert text string to ascii, if possible
try:
content = content.encode('ascii')
except (UnicodeError, AttributeError):
pass
XmlPropertyListParser.PARSE_CALLBACKS[name](self, name, content)
self.__characters = None
def characters(self, content):
if self.__characters is not None:
self.__characters.append(content)
# ------------------------------------------------
# XmlPropertyListParser private
# ------------------------------------------------
def _push_value(self, value):
if not self.__stack:
self._assert(self.__plist is None, "Multiple objects at top level")
self.__plist = value
else:
top = self.__stack[-1]
#assert isinstance(top, (dict, list))
if self.__in_dict:
k = self.__key
if k is None:
raise PropertyListParseError("Missing key for dictionary.")
top[k] = value
self.__key = None
else:
top.append(value)
def _push_stack(self, value):
self.__stack.append(value)
self.__in_dict = isinstance(value, dict)
def _pop_stack(self):
self.__stack.pop()
self.__in_dict = self.__stack and isinstance(self.__stack[-1], dict)
def _start_plist(self, name, attrs):
self._assert(not self.__stack and self.__plist is None, "<plist> more than once.")
self._assert(attrs.get('version', '1.0') == '1.0',
"version 1.0 is only supported, but was '%s'." % attrs.get('version'))
def _start_array(self, name, attrs):
v = list()
self._push_value(v)
self._push_stack(v)
def _start_dict(self, name, attrs):
v = dict()
self._push_value(v)
self._push_stack(v)
def _end_array(self, name):
self._pop_stack()
def _end_dict(self, name):
if self.__key is not None:
print("XmlPropertyListParser() WARNING: Missing value for key '%s'" % self.__key)
#raise PropertyListParseError("Missing value for key '%s'" % self.__key)
self._pop_stack()
def _start_true(self, name, attrs):
self._push_value(True)
def _start_false(self, name, attrs):
self._push_value(False)
def _parse_key(self, name, content):
if not self.__in_dict:
print("XmlPropertyListParser() WARNING: ignoring <key>%s</key> (<key> elements must be contained in <dict> element)" % content)
#raise PropertyListParseError("<key> element '%s' must be in <dict> element." % content)
else:
self.__key = content
def _parse_string(self, name, content):
self._push_value(content)
def _parse_data(self, name, content):
import base64
self._push_value(base64.b64decode(content))
# http://www.apple.com/DTDs/PropertyList-1.0.dtd says:
#
# Contents should conform to a subset of ISO 8601
# (in particular, YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z'.
# Smaller units may be omitted with a loss of precision)
import re
DATETIME_PATTERN = re.compile(r"(?P<year>\d\d\d\d)(?:-(?P<month>\d\d)(?:-(?P<day>\d\d)(?:T(?P<hour>\d\d)(?::(?P<minute>\d\d)(?::(?P<second>\d\d))?)?)?)?)?Z$")
def _parse_date(self, name, content):
import datetime
units = ('year', 'month', 'day', 'hour', 'minute', 'second', )
pattern = XmlPropertyListParser.DATETIME_PATTERN
match = pattern.match(content)
if not match:
print("XmlPropertyListParser() ERROR: error parsing %s as datetime" % repr(content))
#raise PropertyListParseError("Failed to parse datetime '%s'" % content)
d = datetime.datetime.today()
else:
groups, components = match.groupdict(), []
for key in units:
value = groups[key]
if value is None:
break
components.append(int(value))
while len(components) < 3:
components.append(1)
d = datetime.datetime(*components)
self._push_value(d)
def _parse_real(self, name, content):
content = content.replace(',', '.')
try:
self._push_value(float(content))
except:
print("XmlPropertyListParser() WARNING: error converting %s to float" % repr(content))
self._push_value(0.0)
def _parse_integer(self, name, content):
self._push_value(int(content))
START_CALLBACKS = {
'plist': _start_plist,
'array': _start_array,
'dict': _start_dict,
'true': _start_true,
'false': _start_false,
}
END_CALLBACKS = {
'array': _end_array,
'dict': _end_dict,
}
PARSE_CALLBACKS = {
'key': _parse_key,
'string': _parse_string,
'data': _parse_data,
'date': _parse_date,
'real': _parse_real,
'integer': _parse_integer,
}
# ------------------------------------------------
# XmlPropertyListParser
# ------------------------------------------------
def _to_stream(self, io_or_string):
if isinstance(io_or_string, basestring):
# Creates a string stream for in-memory contents.
from cStringIO import StringIO
return StringIO(io_or_string)
elif hasattr(io_or_string, 'read') and callable(getattr(io_or_string, 'read')):
return io_or_string
else:
raise TypeError('Can\'t convert %s to file-like-object' % type(io_or_string))
def _parse_using_etree(self, xml_input):
from xml.etree.cElementTree import iterparse
parser = iterparse(self._to_stream(xml_input), events=(b'start', b'end'))
self.startDocument()
try:
for action, element in parser:
name = element.tag
if action == 'start':
if name in XmlPropertyListParser.START_CALLBACKS:
XmlPropertyListParser.START_CALLBACKS[name](self, element.tag, element.attrib)
elif action == 'end':
if name in XmlPropertyListParser.END_CALLBACKS:
XmlPropertyListParser.END_CALLBACKS[name](self, name)
if name in XmlPropertyListParser.PARSE_CALLBACKS:
XmlPropertyListParser.PARSE_CALLBACKS[name](self, name, element.text or "")
element.clear()
except SyntaxError, e:
raise PropertyListParseError(e)
self.endDocument()
return self.__plist
def _parse_using_sax_parser(self, xml_input):
from xml.sax import make_parser, xmlreader, SAXParseException
source = xmlreader.InputSource()
source.setByteStream(self._to_stream(xml_input))
reader = make_parser()
reader.setContentHandler(self)
try:
reader.parse(source)
except SAXParseException, e:
raise PropertyListParseError(e)
return self.__plist
def parse(self, xml_input):
"""
Parse the property list (`.plist`, `.xml, for example) ``xml_input``,
which can be either a string or a file-like object.
>>> parser = XmlPropertyListParser()
>>> parser.parse(r'<plist version="1.0">'
... r'<dict><key>Python</key><string>.py</string></dict>'
... r'</plist>')
{'Python': '.py'}
"""
try:
return self._parse_using_etree(xml_input)
except ImportError:
# No xml.etree.ccElementTree found.
return self._parse_using_sax_parser(xml_input)
| gpl-3.0 |
boxed/CMi | web_frontend/CMi/tvshows/api.py | 1 | 1578 | from django.template.loader import render_to_string
from django.conf.urls import patterns
from CMi.tvshows.models import Episode, Category
def tv_show_tile(title, episodes, category=None):
return (
10, render_to_string('tile.html', {
'url': '/tvshows/' + (('category/%s/' % category.pk) if category else ''),
'image': '/site-media/tv.svg',
'title': title,
'content': '%s new / %s total' % (episodes.filter(watched=False).count(), episodes.count()),
}))
def tiles():
return [tv_show_tile(title='TV Shows', episodes=Episode.objects.filter(show__category=None).exclude(filepath=''))] + [tv_show_tile(category=category, title=category.name, episodes=Episode.objects.filter(show__category=category).exclude(filepath='')) for category in Category.objects.order_by('name')]
def urls():
return patterns('CMi.tvshows.views',
(r'^tvshows/$', 'index'),
(r'^tvshows/category/(?P<category_id>\d+)/$', 'index'),
(r'^tvshows/(?P<show_id>\d+)/$', 'episode_list'),
(r'^tvshows/(?P<show_id>\d+)/(?P<episode_id>\d+)/$', 'play_episode'),
(r'^tvshows/(?P<show_id>\d+)/(?P<episode_id>\d+)/ended$', 'episode_ended'),
(r'^tvshows/(?P<show_id>\d+)/(?P<episode_id>\d+)/position/(?P<position>\d+)$', 'episode_position'),
(r'^tvshows/suggested/$', 'suggested_shows'),
(r'^tvshows/suggested/(?P<suggested_show_id>\d+)/add/(?P<option>.*)/$', 'add_suggested_show'),
(r'^tvshows/suggested/(?P<suggested_show_id>\d+)/ignore/$', 'ignore_suggested_show'),
) | mit |
khchine5/xl | lino_xl/lib/postings/mixins.py | 1 | 2368 | # -*- coding: UTF-8 -*-
# Copyright 2012-2016 Luc Saffre
#
# License: BSD (see file COPYING for details)
"""
Defines models for :mod:`lino_xl.lib.postings`.
"""
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from lino.core import actions
from lino.api import dd, rt
class CreatePostings(dd.Action):
"""
Creates a series of new Postings from this Postable.
The Postable gives the list of recipients, and there will
be one Posting for each recipient.
Author of each Posting will be the user who issued the action request,
even if that user is acting as someone else.
You cannot create a Posting in someone else's name.
"""
url_action_name = 'post'
#~ label = _('Create email')
label = _('Create posting')
help_text = _('Create classical mail postings from this')
icon_name = 'script_add'
callable_from = (actions.ShowTable,
actions.ShowDetail) # but not from ShowInsert
def run_from_ui(self, ar, **kw):
Posting = rt.models.postings.Posting
PostingStates = rt.models.postings.PostingStates
elem = ar.selected_rows[0]
recs = tuple(elem.get_postable_recipients())
def ok(ar):
for rec in recs:
p = Posting(
user=ar.user, owner=elem,
partner=rec,
date=dd.today(),
state=PostingStates.ready)
p.full_clean()
p.save()
kw.update(refresh=True)
ar.success(**kw)
msg = _("Going to create %(num)d postings for %(elem)s") % dict(
num=len(recs), elem=elem)
ar.confirm(ok, msg)
class Postable(dd.Model):
"""
Mixin for models that provide a "Post" button.
"""
class Meta:
abstract = True
if dd.is_installed('postings'):
create_postings = CreatePostings()
def get_postable_recipients(self):
return []
def get_recipients(self):
Posting = rt.models.postings.Posting
qs = Posting.objects.filter(
owner_id=self.pk, owner_type=ContentType.get_for_model(
self.__class__))
return qs.values('partner')
#~ state=PostingStates.ready)
| bsd-2-clause |
0jpq0/kbengine | kbe/res/scripts/common/Lib/distutils/tests/test_dep_util.py | 147 | 2832 | """Tests for distutils.dep_util."""
import unittest
import os
import time
from distutils.dep_util import newer, newer_pairwise, newer_group
from distutils.errors import DistutilsFileError
from distutils.tests import support
from test.support import run_unittest
class DepUtilTestCase(support.TempdirManager, unittest.TestCase):
def test_newer(self):
tmpdir = self.mkdtemp()
new_file = os.path.join(tmpdir, 'new')
old_file = os.path.abspath(__file__)
# Raise DistutilsFileError if 'new_file' does not exist.
self.assertRaises(DistutilsFileError, newer, new_file, old_file)
# Return true if 'new_file' exists and is more recently modified than
# 'old_file', or if 'new_file' exists and 'old_file' doesn't.
self.write_file(new_file)
self.assertTrue(newer(new_file, 'I_dont_exist'))
self.assertTrue(newer(new_file, old_file))
# Return false if both exist and 'old_file' is the same age or younger
# than 'new_file'.
self.assertFalse(newer(old_file, new_file))
def test_newer_pairwise(self):
tmpdir = self.mkdtemp()
sources = os.path.join(tmpdir, 'sources')
targets = os.path.join(tmpdir, 'targets')
os.mkdir(sources)
os.mkdir(targets)
one = os.path.join(sources, 'one')
two = os.path.join(sources, 'two')
three = os.path.abspath(__file__) # I am the old file
four = os.path.join(targets, 'four')
self.write_file(one)
self.write_file(two)
self.write_file(four)
self.assertEqual(newer_pairwise([one, two], [three, four]),
([one],[three]))
def test_newer_group(self):
tmpdir = self.mkdtemp()
sources = os.path.join(tmpdir, 'sources')
os.mkdir(sources)
one = os.path.join(sources, 'one')
two = os.path.join(sources, 'two')
three = os.path.join(sources, 'three')
old_file = os.path.abspath(__file__)
# return true if 'old_file' is out-of-date with respect to any file
# listed in 'sources'.
self.write_file(one)
self.write_file(two)
self.write_file(three)
self.assertTrue(newer_group([one, two, three], old_file))
self.assertFalse(newer_group([one, two, old_file], three))
# missing handling
os.remove(one)
self.assertRaises(OSError, newer_group, [one, two, old_file], three)
self.assertFalse(newer_group([one, two, old_file], three,
missing='ignore'))
self.assertTrue(newer_group([one, two, old_file], three,
missing='newer'))
def test_suite():
return unittest.makeSuite(DepUtilTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| lgpl-3.0 |
ibinti/intellij-community | python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/pytree.py | 325 | 29039 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""
Python parse tree definitions.
This is a very concrete parse tree; we need to keep every token and
even the comments and whitespace between tokens.
There's also a pattern matching implementation here.
"""
__author__ = "Guido van Rossum <guido@python.org>"
import sys
import warnings
from StringIO import StringIO
HUGE = 0x7FFFFFFF # maximum repeat count, default max
_type_reprs = {}
def type_repr(type_num):
global _type_reprs
if not _type_reprs:
from .pygram import python_symbols
# printing tokens is possible but not as useful
# from .pgen2 import token // token.__dict__.items():
for name, val in python_symbols.__dict__.items():
if type(val) == int: _type_reprs[val] = name
return _type_reprs.setdefault(type_num, type_num)
class Base(object):
"""
Abstract base class for Node and Leaf.
This provides some default functionality and boilerplate using the
template pattern.
A node may be a subnode of at most one parent.
"""
# Default values for instance variables
type = None # int: token number (< 256) or symbol number (>= 256)
parent = None # Parent node pointer, or None
children = () # Tuple of subnodes
was_changed = False
was_checked = False
def __new__(cls, *args, **kwds):
"""Constructor that prevents Base from being instantiated."""
assert cls is not Base, "Cannot instantiate Base"
return object.__new__(cls)
def __eq__(self, other):
"""
Compare two nodes for equality.
This calls the method _eq().
"""
if self.__class__ is not other.__class__:
return NotImplemented
return self._eq(other)
__hash__ = None # For Py3 compatibility.
def __ne__(self, other):
"""
Compare two nodes for inequality.
This calls the method _eq().
"""
if self.__class__ is not other.__class__:
return NotImplemented
return not self._eq(other)
def _eq(self, other):
"""
Compare two nodes for equality.
This is called by __eq__ and __ne__. It is only called if the two nodes
have the same type. This must be implemented by the concrete subclass.
Nodes should be considered equal if they have the same structure,
ignoring the prefix string and other context information.
"""
raise NotImplementedError
def clone(self):
"""
Return a cloned (deep) copy of self.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def post_order(self):
"""
Return a post-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def pre_order(self):
"""
Return a pre-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def set_prefix(self, prefix):
"""
Set the prefix for the node (see Leaf class).
DEPRECATED; use the prefix property directly.
"""
warnings.warn("set_prefix() is deprecated; use the prefix property",
DeprecationWarning, stacklevel=2)
self.prefix = prefix
def get_prefix(self):
"""
Return the prefix for the node (see Leaf class).
DEPRECATED; use the prefix property directly.
"""
warnings.warn("get_prefix() is deprecated; use the prefix property",
DeprecationWarning, stacklevel=2)
return self.prefix
def replace(self, new):
"""Replace this node with a new one in the parent."""
assert self.parent is not None, str(self)
assert new is not None
if not isinstance(new, list):
new = [new]
l_children = []
found = False
for ch in self.parent.children:
if ch is self:
assert not found, (self.parent.children, self, new)
if new is not None:
l_children.extend(new)
found = True
else:
l_children.append(ch)
assert found, (self.children, self, new)
self.parent.changed()
self.parent.children = l_children
for x in new:
x.parent = self.parent
self.parent = None
def get_lineno(self):
"""Return the line number which generated the invocant node."""
node = self
while not isinstance(node, Leaf):
if not node.children:
return
node = node.children[0]
return node.lineno
def changed(self):
if self.parent:
self.parent.changed()
self.was_changed = True
def remove(self):
"""
Remove the node from the tree. Returns the position of the node in its
parent's children before it was removed.
"""
if self.parent:
for i, node in enumerate(self.parent.children):
if node is self:
self.parent.changed()
del self.parent.children[i]
self.parent = None
return i
@property
def next_sibling(self):
"""
The node immediately following the invocant in their parent's children
list. If the invocant does not have a next sibling, it is None
"""
if self.parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
try:
return self.parent.children[i+1]
except IndexError:
return None
@property
def prev_sibling(self):
"""
The node immediately preceding the invocant in their parent's children
list. If the invocant does not have a previous sibling, it is None.
"""
if self.parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i-1]
def leaves(self):
for child in self.children:
for x in child.leaves():
yield x
def depth(self):
if self.parent is None:
return 0
return 1 + self.parent.depth()
def get_suffix(self):
"""
Return the string immediately following the invocant node. This is
effectively equivalent to node.next_sibling.prefix
"""
next_sib = self.next_sibling
if next_sib is None:
return u""
return next_sib.prefix
if sys.version_info < (3, 0):
def __str__(self):
return unicode(self).encode("ascii")
class Node(Base):
"""Concrete implementation for interior nodes."""
def __init__(self,type, children,
context=None,
prefix=None,
fixers_applied=None):
"""
Initializer.
Takes a type constant (a symbol number >= 256), a sequence of
child nodes, and an optional context keyword argument.
As a side effect, the parent pointers of the children are updated.
"""
assert type >= 256, type
self.type = type
self.children = list(children)
for ch in self.children:
assert ch.parent is None, repr(ch)
ch.parent = self
if prefix is not None:
self.prefix = prefix
if fixers_applied:
self.fixers_applied = fixers_applied[:]
else:
self.fixers_applied = None
def __repr__(self):
"""Return a canonical string representation."""
return "%s(%s, %r)" % (self.__class__.__name__,
type_repr(self.type),
self.children)
def __unicode__(self):
"""
Return a pretty string representation.
This reproduces the input source exactly.
"""
return u"".join(map(unicode, self.children))
if sys.version_info > (3, 0):
__str__ = __unicode__
def _eq(self, other):
"""Compare two nodes for equality."""
return (self.type, self.children) == (other.type, other.children)
def clone(self):
"""Return a cloned (deep) copy of self."""
return Node(self.type, [ch.clone() for ch in self.children],
fixers_applied=self.fixers_applied)
def post_order(self):
"""Return a post-order iterator for the tree."""
for child in self.children:
for node in child.post_order():
yield node
yield self
def pre_order(self):
"""Return a pre-order iterator for the tree."""
yield self
for child in self.children:
for node in child.pre_order():
yield node
def _prefix_getter(self):
"""
The whitespace and comments preceding this node in the input.
"""
if not self.children:
return ""
return self.children[0].prefix
def _prefix_setter(self, prefix):
if self.children:
self.children[0].prefix = prefix
prefix = property(_prefix_getter, _prefix_setter)
def set_child(self, i, child):
"""
Equivalent to 'node.children[i] = child'. This method also sets the
child's parent attribute appropriately.
"""
child.parent = self
self.children[i].parent = None
self.children[i] = child
self.changed()
def insert_child(self, i, child):
"""
Equivalent to 'node.children.insert(i, child)'. This method also sets
the child's parent attribute appropriately.
"""
child.parent = self
self.children.insert(i, child)
self.changed()
def append_child(self, child):
"""
Equivalent to 'node.children.append(child)'. This method also sets the
child's parent attribute appropriately.
"""
child.parent = self
self.children.append(child)
self.changed()
class Leaf(Base):
"""Concrete implementation for leaf nodes."""
# Default values for instance variables
_prefix = "" # Whitespace and comments preceding this token in the input
lineno = 0 # Line where this token starts in the input
column = 0 # Column where this token tarts in the input
def __init__(self, type, value,
context=None,
prefix=None,
fixers_applied=[]):
"""
Initializer.
Takes a type constant (a token number < 256), a string value, and an
optional context keyword argument.
"""
assert 0 <= type < 256, type
if context is not None:
self._prefix, (self.lineno, self.column) = context
self.type = type
self.value = value
if prefix is not None:
self._prefix = prefix
self.fixers_applied = fixers_applied[:]
def __repr__(self):
"""Return a canonical string representation."""
return "%s(%r, %r)" % (self.__class__.__name__,
self.type,
self.value)
def __unicode__(self):
"""
Return a pretty string representation.
This reproduces the input source exactly.
"""
return self.prefix + unicode(self.value)
if sys.version_info > (3, 0):
__str__ = __unicode__
def _eq(self, other):
"""Compare two nodes for equality."""
return (self.type, self.value) == (other.type, other.value)
def clone(self):
"""Return a cloned (deep) copy of self."""
return Leaf(self.type, self.value,
(self.prefix, (self.lineno, self.column)),
fixers_applied=self.fixers_applied)
def leaves(self):
yield self
def post_order(self):
"""Return a post-order iterator for the tree."""
yield self
def pre_order(self):
"""Return a pre-order iterator for the tree."""
yield self
def _prefix_getter(self):
"""
The whitespace and comments preceding this token in the input.
"""
return self._prefix
def _prefix_setter(self, prefix):
self.changed()
self._prefix = prefix
prefix = property(_prefix_getter, _prefix_setter)
def convert(gr, raw_node):
"""
Convert raw node information to a Node or Leaf instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up.
"""
type, value, context, children = raw_node
if children or type in gr.number2symbol:
# If there's exactly one child, return that child instead of
# creating a new node.
if len(children) == 1:
return children[0]
return Node(type, children, context=context)
else:
return Leaf(type, value, context=context)
class BasePattern(object):
"""
A pattern is a tree matching pattern.
It looks for a specific node type (token or symbol), and
optionally for a specific content.
This is an abstract base class. There are three concrete
subclasses:
- LeafPattern matches a single leaf node;
- NodePattern matches a single node (usually non-leaf);
- WildcardPattern matches a sequence of nodes of variable length.
"""
# Defaults for instance variables
type = None # Node type (token if < 256, symbol if >= 256)
content = None # Optional content matching pattern
name = None # Optional name used to store match in results dict
def __new__(cls, *args, **kwds):
"""Constructor that prevents BasePattern from being instantiated."""
assert cls is not BasePattern, "Cannot instantiate BasePattern"
return object.__new__(cls)
def __repr__(self):
args = [type_repr(self.type), self.content, self.name]
while args and args[-1] is None:
del args[-1]
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args)))
def optimize(self):
"""
A subclass can define this as a hook for optimizations.
Returns either self or another node with the same effect.
"""
return self
def match(self, node, results=None):
"""
Does this pattern exactly match a node?
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
Default implementation for non-wildcard patterns.
"""
if self.type is not None and node.type != self.type:
return False
if self.content is not None:
r = None
if results is not None:
r = {}
if not self._submatch(node, r):
return False
if r:
results.update(r)
if results is not None and self.name:
results[self.name] = node
return True
def match_seq(self, nodes, results=None):
"""
Does this pattern exactly match a sequence of nodes?
Default implementation for non-wildcard patterns.
"""
if len(nodes) != 1:
return False
return self.match(nodes[0], results)
def generate_matches(self, nodes):
"""
Generator yielding all matches for this pattern.
Default implementation for non-wildcard patterns.
"""
r = {}
if nodes and self.match(nodes[0], r):
yield 1, r
class LeafPattern(BasePattern):
def __init__(self, type=None, content=None, name=None):
"""
Initializer. Takes optional type, content, and name.
The type, if given must be a token type (< 256). If not given,
this matches any *leaf* node; the content may still be required.
The content, if given, must be a string.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert 0 <= type < 256, type
if content is not None:
assert isinstance(content, basestring), repr(content)
self.type = type
self.content = content
self.name = name
def match(self, node, results=None):
"""Override match() to insist on a leaf node."""
if not isinstance(node, Leaf):
return False
return BasePattern.match(self, node, results)
def _submatch(self, node, results=None):
"""
Match the pattern's content to the node's children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.
"""
return self.content == node.value
class NodePattern(BasePattern):
wildcards = False
def __init__(self, type=None, content=None, name=None):
"""
Initializer. Takes optional type, content, and name.
The type, if given, must be a symbol type (>= 256). If the
type is None this matches *any* single node (leaf or not),
except if content is not None, in which it only matches
non-leaf nodes that also match the content pattern.
The content, if not None, must be a sequence of Patterns that
must match the node's children exactly. If the content is
given, the type must not be None.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert type >= 256, type
if content is not None:
assert not isinstance(content, basestring), repr(content)
content = list(content)
for i, item in enumerate(content):
assert isinstance(item, BasePattern), (i, item)
if isinstance(item, WildcardPattern):
self.wildcards = True
self.type = type
self.content = content
self.name = name
def _submatch(self, node, results=None):
"""
Match the pattern's content to the node's children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.
"""
if self.wildcards:
for c, r in generate_matches(self.content, node.children):
if c == len(node.children):
if results is not None:
results.update(r)
return True
return False
if len(self.content) != len(node.children):
return False
for subpattern, child in zip(self.content, node.children):
if not subpattern.match(child, results):
return False
return True
class WildcardPattern(BasePattern):
"""
A wildcard pattern can match zero or more nodes.
This has all the flexibility needed to implement patterns like:
.* .+ .? .{m,n}
(a b c | d e | f)
(...)* (...)+ (...)? (...){m,n}
except it always uses non-greedy matching.
"""
def __init__(self, content=None, min=0, max=HUGE, name=None):
"""
Initializer.
Args:
content: optional sequence of subsequences of patterns;
if absent, matches one node;
if present, each subsequence is an alternative [*]
min: optional minimum number of times to match, default 0
max: optional maximum number of times to match, default HUGE
name: optional name assigned to this match
[*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is
equivalent to (a b c | d e | f g h); if content is None,
this is equivalent to '.' in regular expression terms.
The min and max parameters work as follows:
min=0, max=maxint: .*
min=1, max=maxint: .+
min=0, max=1: .?
min=1, max=1: .
If content is not None, replace the dot with the parenthesized
list of alternatives, e.g. (a b c | d e | f g h)*
"""
assert 0 <= min <= max <= HUGE, (min, max)
if content is not None:
content = tuple(map(tuple, content)) # Protect against alterations
# Check sanity of alternatives
assert len(content), repr(content) # Can't have zero alternatives
for alt in content:
assert len(alt), repr(alt) # Can have empty alternatives
self.content = content
self.min = min
self.max = max
self.name = name
def optimize(self):
"""Optimize certain stacked wildcard patterns."""
subpattern = None
if (self.content is not None and
len(self.content) == 1 and len(self.content[0]) == 1):
subpattern = self.content[0][0]
if self.min == 1 and self.max == 1:
if self.content is None:
return NodePattern(name=self.name)
if subpattern is not None and self.name == subpattern.name:
return subpattern.optimize()
if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and
subpattern.min <= 1 and self.name == subpattern.name):
return WildcardPattern(subpattern.content,
self.min*subpattern.min,
self.max*subpattern.max,
subpattern.name)
return self
def match(self, node, results=None):
"""Does this pattern exactly match a node?"""
return self.match_seq([node], results)
def match_seq(self, nodes, results=None):
"""Does this pattern exactly match a sequence of nodes?"""
for c, r in self.generate_matches(nodes):
if c == len(nodes):
if results is not None:
results.update(r)
if self.name:
results[self.name] = list(nodes)
return True
return False
def generate_matches(self, nodes):
"""
Generator yielding matches for a sequence of nodes.
Args:
nodes: sequence of nodes
Yields:
(count, results) tuples where:
count: the match comprises nodes[:count];
results: dict containing named submatches.
"""
if self.content is None:
# Shortcut for special case (see __init__.__doc__)
for count in xrange(self.min, 1 + min(len(nodes), self.max)):
r = {}
if self.name:
r[self.name] = nodes[:count]
yield count, r
elif self.name == "bare_name":
yield self._bare_name_matches(nodes)
else:
# The reason for this is that hitting the recursion limit usually
# results in some ugly messages about how RuntimeErrors are being
# ignored. We don't do this on non-CPython implementation because
# they don't have this problem.
if hasattr(sys, "getrefcount"):
save_stderr = sys.stderr
sys.stderr = StringIO()
try:
for count, r in self._recursive_matches(nodes, 0):
if self.name:
r[self.name] = nodes[:count]
yield count, r
except RuntimeError:
# We fall back to the iterative pattern matching scheme if the recursive
# scheme hits the recursion limit.
for count, r in self._iterative_matches(nodes):
if self.name:
r[self.name] = nodes[:count]
yield count, r
finally:
if hasattr(sys, "getrefcount"):
sys.stderr = save_stderr
def _iterative_matches(self, nodes):
"""Helper to iteratively yield the matches."""
nodelen = len(nodes)
if 0 >= self.min:
yield 0, {}
results = []
# generate matches that use just one alt from self.content
for alt in self.content:
for c, r in generate_matches(alt, nodes):
yield c, r
results.append((c, r))
# for each match, iterate down the nodes
while results:
new_results = []
for c0, r0 in results:
# stop if the entire set of nodes has been matched
if c0 < nodelen and c0 <= self.max:
for alt in self.content:
for c1, r1 in generate_matches(alt, nodes[c0:]):
if c1 > 0:
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
new_results.append((c0 + c1, r))
results = new_results
def _bare_name_matches(self, nodes):
"""Special optimized matcher for bare_name."""
count = 0
r = {}
done = False
max = len(nodes)
while not done and count < max:
done = True
for leaf in self.content:
if leaf[0].match(nodes[count], r):
count += 1
done = False
break
r[self.name] = nodes[:count]
return count, r
def _recursive_matches(self, nodes, count):
"""Helper to recursively yield the matches."""
assert self.content is not None
if count >= self.min:
yield 0, {}
if count < self.max:
for alt in self.content:
for c0, r0 in generate_matches(alt, nodes):
for c1, r1 in self._recursive_matches(nodes[c0:], count+1):
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
class NegatedPattern(BasePattern):
def __init__(self, content=None):
"""
Initializer.
The argument is either a pattern or None. If it is None, this
only matches an empty sequence (effectively '$' in regex
lingo). If it is not None, this matches whenever the argument
pattern doesn't have any matches.
"""
if content is not None:
assert isinstance(content, BasePattern), repr(content)
self.content = content
def match(self, node):
# We never match a node in its entirety
return False
def match_seq(self, nodes):
# We only match an empty sequence of nodes in its entirety
return len(nodes) == 0
def generate_matches(self, nodes):
if self.content is None:
# Return a match if there is an empty sequence
if len(nodes) == 0:
yield 0, {}
else:
# Return a match if the argument pattern has no matches
for c, r in self.content.generate_matches(nodes):
return
yield 0, {}
def generate_matches(patterns, nodes):
"""
Generator yielding matches for a sequence of patterns and nodes.
Args:
patterns: a sequence of patterns
nodes: a sequence of nodes
Yields:
(count, results) tuples where:
count: the entire sequence of patterns matches nodes[:count];
results: dict containing named submatches.
"""
if not patterns:
yield 0, {}
else:
p, rest = patterns[0], patterns[1:]
for c0, r0 in p.generate_matches(nodes):
if not rest:
yield c0, r0
else:
for c1, r1 in generate_matches(rest, nodes[c0:]):
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
| apache-2.0 |
UkiyoESoragoto/FAndroidProject | cocos2d/plugin/tools/toolsForGame/steps.py | 255 | 2302 | import sys, string, os
from Tkinter import *
# define class step
class step:
stepFrame = None
def initStep(self, root):
return
def checkStep(self):
return None
# define class step1
class step1(step):
step_entry = None
def initStep(self, root):
self.stepFrame = Frame(root)
step_tip = Label(self.stepFrame, text="Input the android project path of your game:")
step_tip.pack(anchor='nw', padx=30)
step_tip2 = Label(self.stepFrame, text="(Pleasd avoid using spaces in your project path)")
step_tip2.pack(anchor='nw', padx=30)
self.step_entry = Entry(self.stepFrame)
self.step_entry.pack(anchor='nw', fill=X, padx=30)
return
def checkStep(self):
tipStr = None
projPath = self.step_entry.get()
haveDir = os.path.exists(projPath)
isPorj = os.path.exists(projPath + '/AndroidManifest.xml')
if projPath == None or len(projPath) == 0 or haveDir == False or isPorj == False:
tipStr = 'The project path is wrong'
return tipStr
def getPath(self):
return self.step_entry.get()
# define class step2
class step2(step):
checkBtns = []
checkValues = []
def initStep(self, root, pluginList):
self.stepFrame = Frame(root)
step_tip = Label(self.stepFrame, text="Select plugins you needed:")
step_tip.pack(anchor='nw', padx=30)
for plugin in pluginList:
var = StringVar()
self.checkValues.append(var)
btn = Checkbutton(self.stepFrame, text=plugin, variable=var, onvalue=plugin, offvalue='')
btn.pack(anchor='nw', padx=50)
self.checkBtns.append(btn)
return
def checkStep(self):
tipStr = None
num = 0
for var in self.checkValues:
if len(var.get()) != 0:
num += 1
break
if num == 0:
tipStr = 'At least select one plugin'
return tipStr
def getSelectedPlugins(self):
selectPlugins = []
for var in self.checkValues:
if len(var.get()) != 0:
plugin = var.get()
selectPlugins.append(plugin)
return selectPlugins
| mit |
espadrine/opera | chromium/src/third_party/python_26/Lib/distutils/config.py | 50 | 4292 | """distutils.pypirc
Provides the PyPIRCCommand class, the base class for the command classes
that uses .pypirc in the distutils.command package.
"""
import os
import sys
from ConfigParser import ConfigParser
from distutils.cmd import Command
DEFAULT_PYPIRC = """\
[distutils]
index-servers =
pypi
[pypi]
username:%s
password:%s
"""
class PyPIRCCommand(Command):
"""Base command that knows how to handle the .pypirc file
"""
DEFAULT_REPOSITORY = 'http://pypi.python.org/pypi'
DEFAULT_REALM = 'pypi'
repository = None
realm = None
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % \
DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server')]
boolean_options = ['show-response']
def _get_rc_file(self):
"""Returns rc file path."""
return os.path.join(os.path.expanduser('~'), '.pypirc')
def _store_pypirc(self, username, password):
"""Creates a default .pypirc file."""
rc = self._get_rc_file()
f = open(rc, 'w')
try:
f.write(DEFAULT_PYPIRC % (username, password))
finally:
f.close()
try:
os.chmod(rc, 0600)
except OSError:
# should do something better here
pass
def _read_pypirc(self):
"""Reads the .pypirc file."""
rc = self._get_rc_file()
if os.path.exists(rc):
self.announce('Using PyPI login from %s' % rc)
repository = self.repository or self.DEFAULT_REPOSITORY
realm = self.realm or self.DEFAULT_REALM
config = ConfigParser()
config.read(rc)
sections = config.sections()
if 'distutils' in sections:
# let's get the list of servers
index_servers = config.get('distutils', 'index-servers')
_servers = [server.strip() for server in
index_servers.split('\n')
if server.strip() != '']
if _servers == []:
# nothing set, let's try to get the default pypi
if 'pypi' in sections:
_servers = ['pypi']
else:
# the file is not properly defined, returning
# an empty dict
return {}
for server in _servers:
current = {'server': server}
current['username'] = config.get(server, 'username')
current['password'] = config.get(server, 'password')
# optional params
for key, default in (('repository',
self.DEFAULT_REPOSITORY),
('realm', self.DEFAULT_REALM)):
if config.has_option(server, key):
current[key] = config.get(server, key)
else:
current[key] = default
if (current['server'] == repository or
current['repository'] == repository):
return current
elif 'server-login' in sections:
# old format
server = 'server-login'
if config.has_option(server, 'repository'):
repository = config.get(server, 'repository')
else:
repository = self.DEFAULT_REPOSITORY
return {'username': config.get(server, 'username'),
'password': config.get(server, 'password'),
'repository': repository,
'server': server,
'realm': self.DEFAULT_REALM}
return {}
def initialize_options(self):
"""Initialize options."""
self.repository = None
self.realm = None
self.show_response = 0
def finalize_options(self):
"""Finalizes options."""
if self.repository is None:
self.repository = self.DEFAULT_REPOSITORY
if self.realm is None:
self.realm = self.DEFAULT_REALM
| bsd-3-clause |
nburn42/tensorflow | tensorflow/contrib/kfac/python/ops/utils_lib.py | 17 | 1719 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,wildcard-import
from tensorflow.contrib.kfac.python.ops.utils import *
from tensorflow.python.util.all_util import remove_undocumented
# pylint: enable=unused-import,line-too-long,wildcard-import
_allowed_symbols = [
"set_global_constants",
"SequenceDict",
"tensors_to_column",
"column_to_tensors",
"kronecker_product",
"layer_params_to_mat2d",
"mat2d_to_layer_params",
"posdef_inv",
"posdef_inv_matrix_inverse",
"posdef_inv_cholesky",
"posdef_inv_funcs",
"SubGraph",
"generate_random_signs",
"fwd_gradients",
"ensure_sequence",
"batch_execute",
"extract_convolution_patches",
"extract_pointwise_conv2d_patches",
"is_data_format_channel_last",
"matmul_sparse_dense",
"matmul_diag_sparse",
]
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
| apache-2.0 |
guewen/odoo | addons/account_asset/account_asset_invoice.py | 193 | 3070 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_invoice(osv.osv):
_inherit = 'account.invoice'
def action_number(self, cr, uid, ids, *args):
result = super(account_invoice, self).action_number(cr, uid, ids, *args)
for inv in self.browse(cr, uid, ids):
self.pool.get('account.invoice.line').asset_create(cr, uid, inv.invoice_line)
return result
def line_get_convert(self, cr, uid, x, part, date, context=None):
res = super(account_invoice, self).line_get_convert(cr, uid, x, part, date, context=context)
res['asset_id'] = x.get('asset_id', False)
return res
class account_invoice_line(osv.osv):
_inherit = 'account.invoice.line'
_columns = {
'asset_category_id': fields.many2one('account.asset.category', 'Asset Category'),
}
def asset_create(self, cr, uid, lines, context=None):
context = context or {}
asset_obj = self.pool.get('account.asset.asset')
for line in lines:
if line.asset_category_id:
vals = {
'name': line.name,
'code': line.invoice_id.number or False,
'category_id': line.asset_category_id.id,
'purchase_value': line.price_subtotal,
'period_id': line.invoice_id.period_id.id,
'partner_id': line.invoice_id.partner_id.id,
'company_id': line.invoice_id.company_id.id,
'currency_id': line.invoice_id.currency_id.id,
'purchase_date' : line.invoice_id.date_invoice,
}
changed_vals = asset_obj.onchange_category_id(cr, uid, [], vals['category_id'], context=context)
vals.update(changed_vals['value'])
asset_id = asset_obj.create(cr, uid, vals, context=context)
if line.asset_category_id.open_asset:
asset_obj.validate(cr, uid, [asset_id], context=context)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
webgeodatavore/django | tests/template_tests/syntax_tests/test_url.py | 42 | 10811 | # coding: utf-8
from django.core.urlresolvers import NoReverseMatch, resolve
from django.template import RequestContext, TemplateSyntaxError
from django.test import RequestFactory, SimpleTestCase, override_settings
from ..utils import setup
@override_settings(ROOT_URLCONF='template_tests.urls')
class UrlTagTests(SimpleTestCase):
# Successes
@setup({'url01': '{% url "client" client.id %}'})
def test_url01(self):
output = self.engine.render_to_string('url01', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/')
@setup({'url02': '{% url "client_action" id=client.id action="update" %}'})
def test_url02(self):
output = self.engine.render_to_string('url02', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/update/')
@setup({'url02a': '{% url "client_action" client.id "update" %}'})
def test_url02a(self):
output = self.engine.render_to_string('url02a', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/update/')
@setup({'url02b': "{% url 'client_action' id=client.id action='update' %}"})
def test_url02b(self):
output = self.engine.render_to_string('url02b', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/update/')
@setup({'url02c': "{% url 'client_action' client.id 'update' %}"})
def test_url02c(self):
output = self.engine.render_to_string('url02c', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/update/')
@setup({'url03': '{% url "index" %}'})
def test_url03(self):
output = self.engine.render_to_string('url03')
self.assertEqual(output, '/')
@setup({'url04': '{% url "named.client" client.id %}'})
def test_url04(self):
output = self.engine.render_to_string('url04', {'client': {'id': 1}})
self.assertEqual(output, '/named-client/1/')
@setup({'url05': '{% url "метка_оператора" v %}'})
def test_url05(self):
output = self.engine.render_to_string('url05', {'v': 'Ω'})
self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')
@setup({'url06': '{% url "метка_оператора_2" tag=v %}'})
def test_url06(self):
output = self.engine.render_to_string('url06', {'v': 'Ω'})
self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')
@setup({'url08': '{% url "метка_оператора" v %}'})
def test_url08(self):
output = self.engine.render_to_string('url08', {'v': 'Ω'})
self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')
@setup({'url09': '{% url "метка_оператора_2" tag=v %}'})
def test_url09(self):
output = self.engine.render_to_string('url09', {'v': 'Ω'})
self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')
@setup({'url10': '{% url "client_action" id=client.id action="two words" %}'})
def test_url10(self):
output = self.engine.render_to_string('url10', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/two%20words/')
@setup({'url11': '{% url "client_action" id=client.id action="==" %}'})
def test_url11(self):
output = self.engine.render_to_string('url11', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/==/')
@setup({'url12': '{% url "client_action" id=client.id action="!$&\'()*+,;=~:@," %}'})
def test_url12(self):
output = self.engine.render_to_string('url12', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/!$&'()*+,;=~:@,/')
@setup({'url13': '{% url "client_action" id=client.id action=arg|join:"-" %}'})
def test_url13(self):
output = self.engine.render_to_string('url13', {'client': {'id': 1}, 'arg': ['a', 'b']})
self.assertEqual(output, '/client/1/a-b/')
@setup({'url14': '{% url "client_action" client.id arg|join:"-" %}'})
def test_url14(self):
output = self.engine.render_to_string('url14', {'client': {'id': 1}, 'arg': ['a', 'b']})
self.assertEqual(output, '/client/1/a-b/')
@setup({'url15': '{% url "client_action" 12 "test" %}'})
def test_url15(self):
output = self.engine.render_to_string('url15')
self.assertEqual(output, '/client/12/test/')
@setup({'url18': '{% url "client" "1,2" %}'})
def test_url18(self):
output = self.engine.render_to_string('url18')
self.assertEqual(output, '/client/1,2/')
@setup({'url19': '{% url named_url client.id %}'})
def test_url19(self):
output = self.engine.render_to_string(
'url19', {'client': {'id': 1}, 'named_url': 'client'}
)
self.assertEqual(output, '/client/1/')
@setup({'url20': '{% url url_name_in_var client.id %}'})
def test_url20(self):
output = self.engine.render_to_string('url20', {'client': {'id': 1}, 'url_name_in_var': 'named.client'})
self.assertEqual(output, '/named-client/1/')
@setup({'url21': '{% autoescape off %}'
'{% url "client_action" id=client.id action="!$&\'()*+,;=~:@," %}'
'{% endautoescape %}'})
def test_url21(self):
output = self.engine.render_to_string('url21', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/!$&\'()*+,;=~:@,/')
# Failures
@setup({'url-fail01': '{% url %}'})
def test_url_fail01(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail01')
@setup({'url-fail02': '{% url "no_such_view" %}'})
def test_url_fail02(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail02')
@setup({'url-fail03': '{% url "client" %}'})
def test_url_fail03(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail03')
@setup({'url-fail04': '{% url "view" id, %}'})
def test_url_fail04(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail04')
@setup({'url-fail05': '{% url "view" id= %}'})
def test_url_fail05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail05')
@setup({'url-fail06': '{% url "view" a.id=id %}'})
def test_url_fail06(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail06')
@setup({'url-fail07': '{% url "view" a.id!id %}'})
def test_url_fail07(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail07')
@setup({'url-fail08': '{% url "view" id="unterminatedstring %}'})
def test_url_fail08(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail08')
@setup({'url-fail09': '{% url "view" id=", %}'})
def test_url_fail09(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail09')
@setup({'url-fail11': '{% url named_url %}'})
def test_url_fail11(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail11')
@setup({'url-fail12': '{% url named_url %}'})
def test_url_fail12(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail12', {'named_url': 'no_such_view'})
@setup({'url-fail13': '{% url named_url %}'})
def test_url_fail13(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail13', {'named_url': 'template_tests.views.client'})
@setup({'url-fail14': '{% url named_url id, %}'})
def test_url_fail14(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail14', {'named_url': 'view'})
@setup({'url-fail15': '{% url named_url id= %}'})
def test_url_fail15(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail15', {'named_url': 'view'})
@setup({'url-fail16': '{% url named_url a.id=id %}'})
def test_url_fail16(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail16', {'named_url': 'view'})
@setup({'url-fail17': '{% url named_url a.id!id %}'})
def test_url_fail17(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail17', {'named_url': 'view'})
@setup({'url-fail18': '{% url named_url id="unterminatedstring %}'})
def test_url_fail18(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail18', {'named_url': 'view'})
@setup({'url-fail19': '{% url named_url id=", %}'})
def test_url_fail19(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail19', {'named_url': 'view'})
# {% url ... as var %}
@setup({'url-asvar01': '{% url "index" as url %}'})
def test_url_asvar01(self):
output = self.engine.render_to_string('url-asvar01')
self.assertEqual(output, '')
@setup({'url-asvar02': '{% url "index" as url %}{{ url }}'})
def test_url_asvar02(self):
output = self.engine.render_to_string('url-asvar02')
self.assertEqual(output, '/')
@setup({'url-asvar03': '{% url "no_such_view" as url %}{{ url }}'})
def test_url_asvar03(self):
output = self.engine.render_to_string('url-asvar03')
self.assertEqual(output, '')
@setup({'url-namespace01': '{% url "app:named.client" 42 %}'})
def test_url_namespace01(self):
request = RequestFactory().get('/')
request.resolver_match = resolve('/ns1/')
template = self.engine.get_template('url-namespace01')
context = RequestContext(request)
output = template.render(context)
self.assertEqual(output, '/ns1/named-client/42/')
@setup({'url-namespace02': '{% url "app:named.client" 42 %}'})
def test_url_namespace02(self):
request = RequestFactory().get('/')
request.resolver_match = resolve('/ns2/')
template = self.engine.get_template('url-namespace02')
context = RequestContext(request)
output = template.render(context)
self.assertEqual(output, '/ns2/named-client/42/')
@setup({'url-namespace03': '{% url "app:named.client" 42 %}'})
def test_url_namespace03(self):
request = RequestFactory().get('/')
template = self.engine.get_template('url-namespace03')
context = RequestContext(request)
output = template.render(context)
self.assertEqual(output, '/ns2/named-client/42/')
| bsd-3-clause |
eadgarchen/tensorflow | tensorflow/python/keras/_impl/keras/applications/vgg16.py | 9 | 8908 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""VGG16 model for Keras.
# Reference
- [Very Deep Convolutional Networks for Large-Scale Image
Recognition](https://arxiv.org/abs/1409.1556)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras.applications.imagenet_utils import _obtain_input_shape
from tensorflow.python.keras._impl.keras.applications.imagenet_utils import decode_predictions # pylint: disable=unused-import
from tensorflow.python.keras._impl.keras.applications.imagenet_utils import preprocess_input # pylint: disable=unused-import
from tensorflow.python.keras._impl.keras.engine.topology import get_source_inputs
from tensorflow.python.keras._impl.keras.layers import Conv2D
from tensorflow.python.keras._impl.keras.layers import Dense
from tensorflow.python.keras._impl.keras.layers import Flatten
from tensorflow.python.keras._impl.keras.layers import GlobalAveragePooling2D
from tensorflow.python.keras._impl.keras.layers import GlobalMaxPooling2D
from tensorflow.python.keras._impl.keras.layers import Input
from tensorflow.python.keras._impl.keras.layers import MaxPooling2D
from tensorflow.python.keras._impl.keras.models import Model
from tensorflow.python.keras._impl.keras.utils import layer_utils
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
def VGG16(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the VGG16 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
Arguments:
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 input channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(
input_shape,
default_size=224,
min_size=48,
data_format=K.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
img_input = Input(tensor=input_tensor, shape=input_shape)
# Block 1
x = Conv2D(
64, (3, 3), activation='relu', padding='same',
name='block1_conv1')(img_input)
x = Conv2D(
64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(
128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(
128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='vgg16')
# load weights
if weights == 'imagenet':
if include_top:
weights_path = get_file(
'vgg16_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
file_hash='64373286793e3c8b2b4e3219cbf3544b')
else:
weights_path = get_file(
'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='6d6bbae143d832006294945121d1f1fc')
model.load_weights(weights_path)
if K.backend() == 'theano':
layer_utils.convert_all_kernels_in_model(model)
if K.image_data_format() == 'channels_first':
if include_top:
maxpool = model.get_layer(name='block5_pool')
shape = maxpool.output_shape[1:]
dense = model.get_layer(name='fc1')
layer_utils.convert_dense_weights_data_format(dense, shape,
'channels_first')
elif weights is not None:
model.load_weights(weights)
return model
| apache-2.0 |
loco-odoo/localizacion_co | openerp/addons-extra/procurement_purchase_forecast-8.0.1.0/procurement_sale_forecast/wizard/make_procurement.py | 24 | 1517 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, api
class MakeProcurement(models.TransientModel):
_inherit = 'make.procurement'
@api.multi
def make_procurement(self):
result = super(MakeProcurement, self).make_procurement()
forecast_line_obj = self.env['procurement.sale.forecast.line']
context = self.env.context
if context.get('active_model') == 'procurement.sale.forecast.line':
forecast_line_id = context['active_id']
procurement_id = result['res_id']
forecast_line = forecast_line_obj.browse(forecast_line_id)
forecast_line.procurement_id = procurement_id
return result
| agpl-3.0 |
loco-odoo/localizacion_co | openerp/addons/account/report/account_general_journal.py | 381 | 7669 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
from common_report_header import common_report_header
class journal_print(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context=None):
if context is None:
context = {}
super(journal_print, self).__init__(cr, uid, name, context=context)
self.period_ids = []
self.journal_ids = []
self.localcontext.update( {
'time': time,
'lines': self.lines,
'periods': self.periods,
'sum_debit_period': self._sum_debit_period,
'sum_credit_period': self._sum_credit_period,
'sum_debit': self._sum_debit,
'sum_credit': self._sum_credit,
'get_fiscalyear': self._get_fiscalyear,
'get_account': self._get_account,
'get_start_period': self.get_start_period,
'get_end_period': self.get_end_period,
'get_sortby': self._get_sortby,
'get_filter': self._get_filter,
'get_journal': self._get_journal,
'get_start_date':self._get_start_date,
'get_end_date':self._get_end_date,
'display_currency':self._display_currency,
'get_target_move': self._get_target_move,
})
def set_context(self, objects, data, ids, report_type=None):
obj_move = self.pool.get('account.move.line')
new_ids = ids
self.query_get_clause = ''
self.target_move = data['form'].get('target_move', 'all')
if (data['model'] == 'ir.ui.menu'):
new_ids = 'active_ids' in data['form'] and data['form']['active_ids'] or []
self.query_get_clause = 'AND '
self.query_get_clause += obj_move._query_get(self.cr, self.uid, obj='l', context=data['form'].get('used_context', {}))
objects = self.pool.get('account.journal.period').browse(self.cr, self.uid, new_ids)
if new_ids:
self.cr.execute('SELECT period_id, journal_id FROM account_journal_period WHERE id IN %s', (tuple(new_ids),))
res = self.cr.fetchall()
self.period_ids, self.journal_ids = zip(*res)
return super(journal_print, self).set_context(objects, data, ids, report_type=report_type)
# returns a list of period objs
def periods(self, journal_period_objs):
dic = {}
def filter_unique(o):
key = o.period_id.id
res = key in dic
if not res:
dic[key] = True
return not res
filtered_objs = filter(filter_unique, journal_period_objs)
return map(lambda x: x.period_id, filtered_objs)
def lines(self, period_id):
if not self.journal_ids:
return []
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT j.code, j.name, l.amount_currency,c.symbol AS currency_code,l.currency_id, '
'SUM(l.debit) AS debit, SUM(l.credit) AS credit '
'FROM account_move_line l '
'LEFT JOIN account_move am ON (l.move_id=am.id) '
'LEFT JOIN account_journal j ON (l.journal_id=j.id) '
'LEFT JOIN res_currency c on (l.currency_id=c.id)'
'WHERE am.state IN %s AND l.period_id=%s AND l.journal_id IN %s ' + self.query_get_clause + ' '
'GROUP BY j.id, j.code, j.name, l.amount_currency, c.symbol, l.currency_id ',
(tuple(move_state), period_id, tuple(self.journal_ids)))
return self.cr.dictfetchall()
def _set_get_account_currency_code(self, account_id):
self.cr.execute("SELECT c.symbol AS code "\
"FROM res_currency c, account_account AS ac "\
"WHERE ac.id = %s AND ac.currency_id = c.id" % (account_id))
result = self.cr.fetchone()
if result:
self.account_currency = result[0]
else:
self.account_currency = False
def _get_account(self, data):
if data['model'] == 'account.journal.period':
return self.pool.get('account.journal.period').browse(self.cr, self.uid, data['id']).company_id.name
return super(journal_print, self)._get_account(data)
def _get_fiscalyear(self, data):
if data['model'] == 'account.journal.period':
return self.pool.get('account.journal.period').browse(self.cr, self.uid, data['id']).fiscalyear_id.name
return super(journal_print, self)._get_fiscalyear(data)
def _display_currency(self, data):
if data['model'] == 'account.journal.period':
return True
return data['form']['amount_currency']
def _sum_debit_period(self, period_id, journal_id=False):
if journal_id:
journals = [journal_id]
else:
journals = self.journal_ids
if not journals:
return 0.0
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT SUM(l.debit) FROM account_move_line l '
'LEFT JOIN account_move am ON (l.move_id=am.id) '
'WHERE am.state IN %s AND l.period_id=%s AND l.journal_id IN %s ' + self.query_get_clause + ' ' \
'AND l.state<>\'draft\'',
(tuple(move_state), period_id, tuple(journals)))
return self.cr.fetchone()[0] or 0.0
def _sum_credit_period(self, period_id, journal_id=None):
if journal_id:
journals = [journal_id]
else:
journals = self.journal_ids
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
if not journals:
return 0.0
self.cr.execute('SELECT SUM(l.credit) FROM account_move_line l '
'LEFT JOIN account_move am ON (l.move_id=am.id) '
'WHERE am.state IN %s AND l.period_id=%s AND l.journal_id IN %s '+ self.query_get_clause + ' ' \
'AND l.state<>\'draft\'',
(tuple(move_state), period_id, tuple(journals)))
return self.cr.fetchone()[0] or 0.0
class report_generaljournal(osv.AbstractModel):
_name = 'report.account.report_generaljournal'
_inherit = 'report.abstract_report'
_template = 'account.report_generaljournal'
_wrapped_report_class = journal_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
caisq/tensorflow | tensorflow/python/ops/math_ops_test.py | 6 | 18203 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
exp = np.exp
log = np.log
class ReduceTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testReduceAllDims(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
with test_util.device(use_gpu=True):
y_tf = self.evaluate(math_ops.reduce_sum(x))
self.assertEqual(y_tf, 21)
@test_util.run_in_graph_and_eager_modes
def testReduceExplicitAxes(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
with test_util.device(use_gpu=True):
for axis in (0, -2, (0, 0), (0, -2)):
self.assertAllEqual(self.evaluate(math_ops.reduce_sum(x, axis=axis)),
[5, 7, 9])
for axis in (1, -1, (1, 1), (1, -1)):
self.assertAllEqual(self.evaluate(math_ops.reduce_sum(x, axis=axis)),
[6, 15])
for axis in (None, (0, 1), (-1, -2), (-2, -1, 0, 1)):
self.assertEqual(self.evaluate(math_ops.reduce_sum(x, axis=axis)), 21)
@test_util.run_in_graph_and_eager_modes
def testReduceInvalidAxis(self):
if context.executing_eagerly():
# The shape check is in run a graph construction time. In eager mode,
# it misses the check, magically return result given wrong shape.
return
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
axis = np.array([[0], [1]])
with self.assertRaisesRegexp(ValueError, "must be at most rank 1"):
math_ops.reduce_sum(x, axis)
class LogSumExpTest(test_util.TensorFlowTestCase):
def testReduceLogSumExp(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with self.test_session(use_gpu=True):
y_tf_np = math_ops.reduce_logsumexp(x_np).eval()
y_np = log(np.sum(exp(x_np)))
self.assertAllClose(y_tf_np, y_np)
def testReductionIndices(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with self.test_session(use_gpu=True):
y_tf = math_ops.reduce_logsumexp(x_np, reduction_indices=[0])
y_np = log(np.sum(exp(x_np), axis=0))
self.assertShapeEqual(y_np, y_tf)
y_tf_np = y_tf.eval()
self.assertAllClose(y_tf_np, y_np)
def testReductionIndices2(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with self.test_session(use_gpu=True):
y_tf = math_ops.reduce_logsumexp(x_np, reduction_indices=0)
y_np = log(np.sum(exp(x_np), axis=0))
self.assertShapeEqual(y_np, y_tf)
y_tf_np = y_tf.eval()
self.assertAllClose(y_tf_np, y_np)
def testKeepDims(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with self.test_session(use_gpu=True):
y_tf_np = math_ops.reduce_logsumexp(x_np, keepdims=True).eval()
self.assertEqual(y_tf_np.ndim, x_np.ndim)
y_np = log(np.sum(exp(x_np), keepdims=True))
self.assertAllClose(y_tf_np, y_np)
def testOverflow(self):
x = [1000, 1001, 1002, 1003]
for dtype in [np.float16, np.float32, np.double]:
x_np = np.array(x, dtype=dtype)
max_np = np.max(x_np)
with self.assertRaisesRegexp(RuntimeWarning,
"overflow encountered in exp"):
out = log(np.sum(exp(x_np)))
if out == np.inf:
raise RuntimeWarning("overflow encountered in exp")
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf_np = math_ops.reduce_logsumexp(x_tf).eval()
y_np = log(np.sum(exp(x_np - max_np))) + max_np
self.assertAllClose(y_tf_np, y_np)
def testUnderflow(self):
x = [-1000, -1001, -1002, -1003]
for dtype in [np.float16, np.float32, np.double]:
x_np = np.array(x, dtype=dtype)
max_np = np.max(x_np)
with self.assertRaisesRegexp(RuntimeWarning,
"divide by zero encountered in log"):
out = log(np.sum(exp(x_np)))
if out == -np.inf:
raise RuntimeWarning("divide by zero encountered in log")
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf_np = math_ops.reduce_logsumexp(x_tf).eval()
y_np = log(np.sum(exp(x_np - max_np))) + max_np
self.assertAllClose(y_tf_np, y_np)
def testInfinity(self):
with self.test_session(use_gpu=True):
res = math_ops.reduce_logsumexp(-np.inf).eval()
self.assertEqual(-np.inf, res)
class RoundTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testRounding(self):
x = np.arange(-5.0, 5.0, .25)
for dtype in [np.float32, np.double, np.int32]:
x_np = np.array(x, dtype=dtype)
with test_util.device(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.round(x_tf)
y_tf_np = self.evaluate(y_tf)
y_np = np.round(x_np)
self.assertAllClose(y_tf_np, y_np, atol=1e-2)
class ModTest(test_util.TensorFlowTestCase):
def testFloat(self):
x = [0.5, 0.7, 0.3]
for dtype in [np.float32, np.double]:
# Test scalar and vector versions.
for denom in [x[0], [x[0]] * 3]:
x_np = np.array(x, dtype=dtype)
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.mod(x_tf, denom)
y_tf_np = y_tf.eval()
y_np = np.fmod(x_np, denom)
self.assertAllClose(y_tf_np, y_np, atol=1e-2)
def testFixed(self):
x = [5, 10, 23]
for dtype in [np.int32, np.int64]:
# Test scalar and vector versions.
for denom in [x[0], x]:
x_np = np.array(x, dtype=dtype)
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.mod(x_tf, denom)
y_tf_np = y_tf.eval()
y_np = np.mod(x_np, denom)
self.assertAllClose(y_tf_np, y_np)
class SquaredDifferenceTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testSquaredDifference(self):
for dtype in [np.int32, np.float16]:
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
y = np.array([-3, -2, -1], dtype=dtype)
z = (x - y) * (x - y)
with test_util.device(use_gpu=True):
z_tf = self.evaluate(math_ops.squared_difference(x, y))
self.assertAllClose(z, z_tf)
class ApproximateEqualTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testApproximateEqual(self):
for dtype in [np.float32, np.double]:
x = dtype(1)
y = dtype(1.00009)
z = False
with test_util.device(use_gpu=True):
# Default tolerance is 0.00001
z_tf = self.evaluate(math_ops.approximate_equal(x, y))
self.assertAllEqual(z, z_tf)
for dtype in [np.float32, np.double]:
x = dtype(1)
y = dtype(1.000009)
z = True
with test_util.device(use_gpu=True):
# Default tolerance is 0.00001
z_tf = self.evaluate(math_ops.approximate_equal(x, y))
self.assertAllEqual(z, z_tf)
for dtype in [np.float32, np.double]:
x = np.array([[[[-1, 2.00009999], [-3, 4.01]]]], dtype=dtype)
y = np.array([[[[-1.001, 2], [-3.00009, 4]]]], dtype=dtype)
z = np.array([[[[False, True], [True, False]]]], dtype=np.bool)
with test_util.device(use_gpu=True):
z_tf = self.evaluate(math_ops.approximate_equal(x, y, tolerance=0.0001))
self.assertAllEqual(z, z_tf)
def testApproximateEqualShape(self):
for dtype in [np.float32, np.double]:
x = np.array([1, 2], dtype=dtype)
y = np.array([[1, 2]], dtype=dtype)
# The inputs 'x' and 'y' must have the same shape.
with self.assertRaisesRegexp(
ValueError, "Shapes must be equal rank, but are 1 and 2"):
math_ops.approximate_equal(x, y)
class ScalarMulTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testAcceptsRefs(self):
if context.executing_eagerly():
var = resource_variable_ops.ResourceVariable(10, name="var")
else:
var = variables.Variable(10)
result = math_ops.scalar_mul(3, var)
init = variables.global_variables_initializer()
with test_util.device(use_gpu=True):
self.evaluate(init)
self.assertEqual(30, self.evaluate(result))
@test_util.run_in_graph_and_eager_modes
def testAcceptsConstant(self):
const = constant_op.constant(10)
result = math_ops.scalar_mul(3, const)
with test_util.device(use_gpu=True):
self.assertEqual(30, self.evaluate(result))
@test_util.run_in_graph_and_eager_modes
def testAcceptsTensor(self):
tensor = array_ops.ones([10, 10])
result = math_ops.scalar_mul(3, tensor)
expected = array_ops.ones([10, 10]) * 3
with test_util.device(use_gpu=True):
self.assertAllEqual(self.evaluate(expected), self.evaluate(result))
@test_util.run_in_graph_and_eager_modes
def testAcceptsIndexedSlices(self):
values = constant_op.constant([2, 3, 5, 7, 0, -1], shape=[3, 2])
indices = constant_op.constant([0, 2, 5])
x = math_ops.scalar_mul(-3, ops.IndexedSlices(values, indices))
with test_util.device(use_gpu=True):
self.assertAllEqual(self.evaluate(x.values),
[[-6, -9], [-15, -21], [0, 3]])
self.assertAllEqual(self.evaluate(x.indices), [0, 2, 5])
class AccumulateNTest(test_util.TensorFlowTestCase):
def testFloat(self):
np.random.seed(12345)
x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
tf_x = ops.convert_n_to_tensor(x)
with self.test_session(use_gpu=True):
self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x).eval())
self.assertAllClose(x[0] * 5, math_ops.accumulate_n([tf_x[0]] * 5).eval())
def testInt(self):
np.random.seed(54321)
x = [np.random.randint(-128, 128, (5, 4, 3, 2, 1)) for _ in range(6)]
tf_x = ops.convert_n_to_tensor(x)
with self.test_session(use_gpu=True):
self.assertAllEqual(sum(x), math_ops.accumulate_n(tf_x).eval())
self.assertAllEqual(x[0] * 6, math_ops.accumulate_n([tf_x[0]] * 6).eval())
class AddNTest(test_util.TensorFlowTestCase):
def testPartials(self):
"""Test that previously revealed a bug in buffer forwarding for AddN."""
partials = []
for _ in range(98):
partials.append(math_ops.add_n([constant_op.constant(1)]))
partials.append(
math_ops.add_n([constant_op.constant(1),
constant_op.constant(1)]))
res = math_ops.add_n(partials) + constant_op.constant(0)
with self.test_session(use_gpu=True):
self.assertAllEqual(res.eval(), 100)
def testFloat(self):
np.random.seed(12345)
for num_inputs in range(1, 10):
x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(num_inputs)]
tf_x = ops.convert_n_to_tensor(x)
with self.test_session(use_gpu=True):
self.assertAllClose(sum(x), math_ops.add_n(tf_x).eval())
self.assertAllClose(x[0] * num_inputs,
math_ops.add_n([tf_x[0]] * num_inputs).eval())
def testInt(self):
np.random.seed(54321)
for num_inputs in range(1, 10):
x = [
np.random.randint(-128, 128, (5, 4, 3, 2, 1))
for _ in range(num_inputs)
]
tf_x = ops.convert_n_to_tensor(x)
with self.test_session(use_gpu=True):
self.assertAllEqual(sum(x), math_ops.add_n(tf_x).eval())
self.assertAllEqual(x[0] * num_inputs,
math_ops.add_n([tf_x[0]] * num_inputs).eval())
def testGrad(self):
np.random.seed(42)
for num_inputs in range(1, 10):
with self.test_session(use_gpu=True) as sess:
input_vars = [
variables.Variable(10.0 * np.random.random())
for i in range(0, num_inputs)
]
addn = math_ops.add_n(input_vars)
sess.run(variables.global_variables_initializer())
add_n_grad = gradients.gradients(addn, input_vars)
self.assertAllEqual(np.repeat(1.0, num_inputs), # d/dx (x + y + ...) = 1
[g.eval() for g in add_n_grad])
class DivAndModTest(test_util.TensorFlowTestCase):
# TODO(aselle): Test more types before exposing new division operators.
def intTestData(self):
nums = np.arange(-10, 10, 1).reshape(20, 1)
divs = np.arange(-3, 4, 2).reshape(1, 4)
return nums, divs
def floatTestData(self):
nums = np.arange(-10, 10, .25).reshape(80, 1)
divs = np.arange(-3, 0, .25).reshape(1, 12)
return nums, divs
def testFloorModInt(self):
nums, divs = self.intTestData()
with self.test_session():
# TODO(aselle): Change test to use % after switch
# tf_result = math_ops.floor_mod(nums, divs).eval()
tf_result = math_ops.floormod(nums, divs).eval()
np_result = nums % divs
self.assertAllEqual(tf_result, np_result)
def testFloorModFloat(self):
nums, divs = self.floatTestData()
with self.test_session():
tf_result = math_ops.floormod(nums, divs).eval()
np_result = nums % divs
self.assertAllEqual(tf_result, np_result)
# TODO(aselle): put this test in once % switched to floormod
# tf2_result = (array_ops.constant(nums)
# % array_ops.constant(divs)).eval()
# self.assertAllEqual(tf2_result, tf_result)
def testTruncateModInt(self):
nums, divs = self.intTestData()
with self.test_session():
tf_result = math_ops.truncatemod(nums, divs).eval()
np_result = np.fmod(nums, divs)
self.assertAllEqual(tf_result, np_result)
def testTruncateModFloat(self):
nums, divs = self.floatTestData()
with self.test_session():
tf_result = math_ops.truncatemod(nums, divs).eval()
np_result = np.fmod(nums, divs)
self.assertAllEqual(tf_result, np_result)
def testDivideInt(self):
nums, divs = self.intTestData()
with self.test_session():
tf_result = math_ops.floor_div(nums, divs).eval()
np_result = nums // divs
self.assertAllEqual(tf_result, np_result)
# TODO(aselle): Put this test in once // is switched to floordiv
# tf2_result = (array_ops.constant(nums)
# // array_ops.constant(divs)).eval()
# self.assertAllEqual(tf2_result, tf_result)
def testDivideName(self):
with self.test_session():
op = math_ops.divide(
array_ops.constant(3), array_ops.constant(4), name="my_cool_divide")
self.assertEqual(op.name, "my_cool_divide:0")
def testRealDiv(self):
nums, divs = self.floatTestData()
with self.test_session():
tf_result = math_ops.realdiv(nums, divs).eval()
np_result = np.divide(nums, divs)
self.assertAllEqual(tf_result, np_result)
def testComplexDiv(self):
foo = array_ops.constant([1. + 3.j])
with self.test_session():
_ = math_ops.divide(foo, 1.).eval()
_ = math_ops.div(foo, 2.).eval()
def testFloorDivGrad(self):
with self.test_session():
a = variables.Variable(2.)
b = variables.Variable(4.)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
c_grad = gradients.gradients(math_ops.divide(a, b), [a, b])
self.assertAllEqual([x.eval() for x in c_grad], [.25, -.125])
c_grad = gradients.gradients(math_ops.div(a, b), [a, b])
self.assertAllEqual([x.eval() for x in c_grad], [.25, -.125])
c_grad = gradients.gradients(math_ops.floordiv(a, b), [a, b])
self.assertAllEqual([None if x is None else x.eval()
for x in c_grad], [None, None])
def testConsistent(self):
nums, divs = self.intTestData()
with self.test_session():
tf_result = (math_ops.floor_div(nums, divs) * divs + math_ops.floormod(
nums, divs)).eval()
tf_nums = array_ops.constant(nums)
tf_divs = array_ops.constant(divs)
tf2_result = (tf_nums // tf_divs * tf_divs + tf_nums % tf_divs).eval()
np_result = (nums // divs) * divs + (nums % divs)
# Consistent with numpy
self.assertAllEqual(tf_result, np_result)
# Consistent with two forms of divide
self.assertAllEqual(tf_result, tf2_result)
# consistency for truncation form
tf3_result = (math_ops.truncatediv(nums, divs) * divs +
math_ops.truncatemod(nums, divs)).eval()
expanded_nums = np.reshape(
np.tile(nums, divs.shape[1]), (nums.shape[0], divs.shape[1]))
# Consistent with desire to get numerator
self.assertAllEqual(tf3_result, expanded_nums)
# Consistent with desire to get numerator
self.assertAllEqual(tf_result, expanded_nums)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
undoware/neutron-drive | google_appengine/google/appengine/_internal/django/utils/numberformat.py | 23 | 1676 | from google.appengine._internal.django.conf import settings
from google.appengine._internal.django.utils.safestring import mark_safe
def format(number, decimal_sep, decimal_pos, grouping=0, thousand_sep=''):
"""
Gets a number (as a number or string), and returns it as a string,
using formats definied as arguments:
* decimal_sep: Decimal separator symbol (for example ".")
* decimal_pos: Number of decimal positions
* grouping: Number of digits in every group limited by thousand separator
* thousand_sep: Thousand separator symbol (for example ",")
"""
use_grouping = settings.USE_L10N and settings.USE_THOUSAND_SEPARATOR and grouping
# Make the common case fast:
if isinstance(number, int) and not use_grouping and not decimal_pos:
return mark_safe(unicode(number))
# sign
if float(number) < 0:
sign = '-'
else:
sign = ''
str_number = unicode(number)
if str_number[0] == '-':
str_number = str_number[1:]
# decimal part
if '.' in str_number:
int_part, dec_part = str_number.split('.')
if decimal_pos:
dec_part = dec_part[:decimal_pos]
else:
int_part, dec_part = str_number, ''
if decimal_pos:
dec_part = dec_part + ('0' * (decimal_pos - len(dec_part)))
if dec_part: dec_part = decimal_sep + dec_part
# grouping
if use_grouping:
int_part_gd = ''
for cnt, digit in enumerate(int_part[::-1]):
if cnt and not cnt % grouping:
int_part_gd += thousand_sep
int_part_gd += digit
int_part = int_part_gd[::-1]
return sign + int_part + dec_part
| bsd-3-clause |
codenote/chromium-test | tools/sort-headers.py | 4 | 4369 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Given a filename as an argument, sort the #include/#imports in that file.
Shows a diff and prompts for confirmation before doing the deed.
Works great with tools/git/for-all-touched-files.py.
"""
import optparse
import os
import sys
import termios
import tty
def YesNo(prompt):
"""Prompts with a yes/no question, returns True if yes."""
print prompt,
sys.stdout.flush()
# http://code.activestate.com/recipes/134892/
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
ch = 'n'
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
print ch
return ch in ('Y', 'y')
def IncludeCompareKey(line):
"""Sorting comparator key used for comparing two #include lines.
Returns the filename without the #include/#import prefix.
"""
for prefix in ('#include ', '#import '):
if line.startswith(prefix):
line = line[len(prefix):]
break
# The win32 api has all sorts of implicit include order dependencies :-/
# Give a few headers special sort keys that make sure they appear before all
# other headers.
if line.startswith('<windows.h>'): # Must be before e.g. shellapi.h
return '0'
if line.startswith('<atlbase.h>'): # Must be before atlapp.h.
return '1' + line
if line.startswith('<unknwn.h>'): # Must be before e.g. intshcut.h
return '1' + line
# C++ system headers should come after C system headers.
if line.startswith('<'):
if line.find('.h>') != -1:
return '2' + line.lower()
else:
return '3' + line.lower()
return '4' + line
def IsInclude(line):
"""Returns True if the line is an #include/#import line."""
return line.startswith('#include ') or line.startswith('#import ')
def SortHeader(infile, outfile):
"""Sorts the headers in infile, writing the sorted file to outfile."""
for line in infile:
if IsInclude(line):
headerblock = []
while IsInclude(line):
headerblock.append(line)
line = infile.next()
for header in sorted(headerblock, key=IncludeCompareKey):
outfile.write(header)
# Intentionally fall through, to write the line that caused
# the above while loop to exit.
outfile.write(line)
def FixFileWithConfirmFunction(filename, confirm_function):
"""Creates a fixed version of the file, invokes |confirm_function|
to decide whether to use the new file, and cleans up.
|confirm_function| takes two parameters, the original filename and
the fixed-up filename, and returns True to use the fixed-up file,
false to not use it.
"""
fixfilename = filename + '.new'
infile = open(filename, 'r')
outfile = open(fixfilename, 'w')
SortHeader(infile, outfile)
infile.close()
outfile.close() # Important so the below diff gets the updated contents.
try:
if confirm_function(filename, fixfilename):
os.rename(fixfilename, filename)
finally:
try:
os.remove(fixfilename)
except OSError:
# If the file isn't there, we don't care.
pass
def DiffAndConfirm(filename, should_confirm):
"""Shows a diff of what the tool would change the file named
filename to. Shows a confirmation prompt if should_confirm is true.
Saves the resulting file if should_confirm is false or the user
answers Y to the confirmation prompt.
"""
def ConfirmFunction(filename, fixfilename):
diff = os.system('diff -u %s %s' % (filename, fixfilename))
if diff >> 8 == 0: # Check exit code.
print '%s: no change' % filename
return False
return (not should_confirm or YesNo('Use new file (y/N)?'))
FixFileWithConfirmFunction(filename, ConfirmFunction)
def main():
parser = optparse.OptionParser(usage='%prog filename1 filename2 ...')
parser.add_option('-f', '--force', action='store_false', default=True,
dest='should_confirm',
help='Turn off confirmation prompt.')
opts, filenames = parser.parse_args()
if len(filenames) < 1:
parser.print_help()
return 1
for filename in filenames:
DiffAndConfirm(filename, opts.should_confirm)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
romankagan/DDBWorkbench | python/testData/inspections/PyMethodMayBeStaticInspection/abc.py | 488 | 7145 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) according to PEP 3119."""
import types
from _weakrefset import WeakSet
# Instance of old-style class
class _C: pass
_InstanceType = type(_C())
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractproperty(property):
"""A decorator indicating abstract properties.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
class C:
__metaclass__ = ABCMeta
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
"""
__isabstractmethod__ = True
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
# A global counter that is incremented each time a class is
# registered as a virtual subclass of anything. It forces the
# negative cache to be cleared before its next use.
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super(ABCMeta, mcls).__new__(mcls, name, bases, namespace)
# Compute set of abstract method names
abstracts = set(name
for name, value in namespace.items()
if getattr(value, "__isabstractmethod__", False))
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
# Set up inheritance registry
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
"""Register a virtual subclass of an ABC."""
if not isinstance(subclass, (type, types.ClassType)):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print >> file, "Class: %s.%s" % (cls.__module__, cls.__name__)
print >> file, "Inv.counter: %s" % ABCMeta._abc_invalidation_counter
for name in sorted(cls.__dict__.keys()):
if name.startswith("_abc_"):
value = getattr(cls, name)
print >> file, "%s: %r" % (name, value)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
# Inline the cache checking when it's simple.
subclass = getattr(instance, '__class__', None)
if subclass is not None and subclass in cls._abc_cache:
return True
subtype = type(instance)
# Old-style instances
if subtype is _InstanceType:
subtype = subclass
if subtype is subclass or subclass is None:
if (cls._abc_negative_cache_version ==
ABCMeta._abc_invalidation_counter and
subtype in cls._abc_negative_cache):
return False
# Fall back to the subclass check.
return cls.__subclasscheck__(subtype)
return (cls.__subclasscheck__(subclass) or
cls.__subclasscheck__(subtype))
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
# Check cache
if subclass in cls._abc_cache:
return True
# Check negative cache; may have to invalidate
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
# Invalidate the negative cache
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
# Check the subclass hook
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
assert isinstance(ok, bool)
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
# Check if it's a direct subclass
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a registered class (recursive)
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a subclass (recursive)
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
# No dice; update negative cache
cls._abc_negative_cache.add(subclass)
return False
| apache-2.0 |
sdague/home-assistant | tests/components/conversation/test_util.py | 26 | 2111 | """Test the conversation utils."""
from homeassistant.components.conversation.util import create_matcher
def test_create_matcher():
"""Test the create matcher method."""
# Basic sentence
pattern = create_matcher("Hello world")
assert pattern.match("Hello world") is not None
# Match a part
pattern = create_matcher("Hello {name}")
match = pattern.match("hello world")
assert match is not None
assert match.groupdict()["name"] == "world"
no_match = pattern.match("Hello world, how are you?")
assert no_match is None
# Optional and matching part
pattern = create_matcher("Turn on [the] {name}")
match = pattern.match("turn on the kitchen lights")
assert match is not None
assert match.groupdict()["name"] == "kitchen lights"
match = pattern.match("turn on kitchen lights")
assert match is not None
assert match.groupdict()["name"] == "kitchen lights"
match = pattern.match("turn off kitchen lights")
assert match is None
# Two different optional parts, 1 matching part
pattern = create_matcher("Turn on [the] [a] {name}")
match = pattern.match("turn on the kitchen lights")
assert match is not None
assert match.groupdict()["name"] == "kitchen lights"
match = pattern.match("turn on kitchen lights")
assert match is not None
assert match.groupdict()["name"] == "kitchen lights"
match = pattern.match("turn on a kitchen light")
assert match is not None
assert match.groupdict()["name"] == "kitchen light"
# Strip plural
pattern = create_matcher("Turn {name}[s] on")
match = pattern.match("turn kitchen lights on")
assert match is not None
assert match.groupdict()["name"] == "kitchen light"
# Optional 2 words
pattern = create_matcher("Turn [the great] {name} on")
match = pattern.match("turn the great kitchen lights on")
assert match is not None
assert match.groupdict()["name"] == "kitchen lights"
match = pattern.match("turn kitchen lights on")
assert match is not None
assert match.groupdict()["name"] == "kitchen lights"
| apache-2.0 |
Wojtechnology/Muzit | StreetMuse/lib/python3.4/site-packages/pip/_vendor/distlib/compat.py | 203 | 40541 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2014 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import absolute_import
import os
import re
import sys
if sys.version_info[0] < 3:
from StringIO import StringIO
string_types = basestring,
text_type = unicode
from types import FileType as file_type
import __builtin__ as builtins
import ConfigParser as configparser
from ._backport import shutil
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit
from urllib import (urlretrieve, quote as _quote, unquote, url2pathname,
pathname2url, ContentTooShortError, splittype)
def quote(s):
if isinstance(s, unicode):
s = s.encode('utf-8')
return _quote(s)
import urllib2
from urllib2 import (Request, urlopen, URLError, HTTPError,
HTTPBasicAuthHandler, HTTPPasswordMgr,
HTTPSHandler, HTTPHandler, HTTPRedirectHandler,
build_opener)
import httplib
import xmlrpclib
import Queue as queue
from HTMLParser import HTMLParser
import htmlentitydefs
raw_input = raw_input
from itertools import ifilter as filter
from itertools import ifilterfalse as filterfalse
_userprog = None
def splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
global _userprog
if _userprog is None:
import re
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match: return match.group(1, 2)
return None, host
else:
from io import StringIO
string_types = str,
text_type = str
from io import TextIOWrapper as file_type
import builtins
import configparser
import shutil
from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote,
unquote, urlsplit, urlunsplit, splittype)
from urllib.request import (urlopen, urlretrieve, Request, url2pathname,
pathname2url,
HTTPBasicAuthHandler, HTTPPasswordMgr,
HTTPSHandler, HTTPHandler, HTTPRedirectHandler,
build_opener)
from urllib.error import HTTPError, URLError, ContentTooShortError
import http.client as httplib
import urllib.request as urllib2
import xmlrpc.client as xmlrpclib
import queue
from html.parser import HTMLParser
import html.entities as htmlentitydefs
raw_input = input
from itertools import filterfalse
filter = filter
try:
from ssl import match_hostname, CertificateError
except ImportError:
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
parts = dn.split('.')
leftmost, remainder = parts[0], parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survery of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
try:
from types import SimpleNamespace as Container
except ImportError:
class Container(object):
"""
A generic container for when multiple values need to be returned
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
try:
from shutil import which
except ImportError:
# Implementation from Python 3.3
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
# ZipFile is a context manager in 2.7, but not in 2.6
from zipfile import ZipFile as BaseZipFile
if hasattr(BaseZipFile, '__enter__'):
ZipFile = BaseZipFile
else:
from zipfile import ZipExtFile as BaseZipExtFile
class ZipExtFile(BaseZipExtFile):
def __init__(self, base):
self.__dict__.update(base.__dict__)
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
# return None, so if an exception occurred, it will propagate
class ZipFile(BaseZipFile):
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
# return None, so if an exception occurred, it will propagate
def open(self, *args, **kwargs):
base = BaseZipFile.open(self, *args, **kwargs)
return ZipExtFile(base)
try:
from platform import python_implementation
except ImportError: # pragma: no cover
def python_implementation():
"""Return a string identifying the Python implementation."""
if 'PyPy' in sys.version:
return 'PyPy'
if os.name == 'java':
return 'Jython'
if sys.version.startswith('IronPython'):
return 'IronPython'
return 'CPython'
try:
import sysconfig
except ImportError: # pragma: no cover
from ._backport import sysconfig
try:
callable = callable
except NameError: # pragma: no cover
from collections import Callable
def callable(obj):
return isinstance(obj, Callable)
try:
fsencode = os.fsencode
fsdecode = os.fsdecode
except AttributeError: # pragma: no cover
_fsencoding = sys.getfilesystemencoding()
if _fsencoding == 'mbcs':
_fserrors = 'strict'
else:
_fserrors = 'surrogateescape'
def fsencode(filename):
if isinstance(filename, bytes):
return filename
elif isinstance(filename, text_type):
return filename.encode(_fsencoding, _fserrors)
else:
raise TypeError("expect bytes or str, not %s" %
type(filename).__name__)
def fsdecode(filename):
if isinstance(filename, text_type):
return filename
elif isinstance(filename, bytes):
return filename.decode(_fsencoding, _fserrors)
else:
raise TypeError("expect bytes or str, not %s" %
type(filename).__name__)
try:
from tokenize import detect_encoding
except ImportError: # pragma: no cover
from codecs import BOM_UTF8, lookup
import re
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argment, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
try:
filename = readline.__self__.name
except AttributeError:
filename = None
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
# Decode as UTF-8. Either the line is an encoding declaration,
# in which case it should be pure ASCII, or it must be UTF-8
# per default encoding.
line_string = line.decode('utf-8')
except UnicodeDecodeError:
msg = "invalid or missing encoding declaration"
if filename is not None:
msg = '{} for {!r}'.format(msg, filename)
raise SyntaxError(msg)
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = _get_normal_name(matches[0])
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
if filename is None:
msg = "unknown encoding: " + encoding
else:
msg = "unknown encoding for {!r}: {}".format(filename,
encoding)
raise SyntaxError(msg)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
if filename is None:
msg = 'encoding problem: utf-8'
else:
msg = 'encoding problem for {!r}: utf-8'.format(filename)
raise SyntaxError(msg)
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
# For converting & <-> & etc.
try:
from html import escape
except ImportError:
from cgi import escape
if sys.version_info[:2] < (3, 4):
unescape = HTMLParser().unescape
else:
from html import unescape
try:
from collections import ChainMap
except ImportError: # pragma: no cover
from collections import MutableMapping
try:
from reprlib import recursive_repr as _recursive_repr
except ImportError:
def _recursive_repr(fillvalue='...'):
'''
Decorator to make a repr function return fillvalue for a recursive
call
'''
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self): # like Django's Context.push()
'New ChainMap with a new dict followed by all previous maps.'
return self.__class__({}, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
try:
from imp import cache_from_source
except ImportError: # pragma: no cover
def cache_from_source(path, debug_override=None):
assert path.endswith('.py')
if debug_override is None:
debug_override = __debug__
if debug_override:
suffix = 'c'
else:
suffix = 'o'
return path + suffix
try:
from collections import OrderedDict
except ImportError: # pragma: no cover
## {{{ http://code.activestate.com/recipes/576693/ (r9)
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running=None):
'od.__repr__() <==> repr(od)'
if not _repr_running: _repr_running = {}
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
try:
from logging.config import BaseConfigurator, valid_ident
except ImportError: # pragma: no cover
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = staticmethod(__import__)
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, string_types):
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
| apache-2.0 |
ajyoon/brown | tests/mocks/mock_graphic_object_interface.py | 1 | 1231 | from PyQt5 import QtWidgets
from brown.core.brush_pattern import BrushPattern
from brown.core.pen_cap_style import PenCapStyle
from brown.core.pen_join_style import PenJoinStyle
from brown.core.pen_pattern import PenPattern
from brown.interface.brush_interface import BrushInterface
from brown.interface.graphic_object_interface import GraphicObjectInterface
from brown.interface.pen_interface import PenInterface
from brown.utils.color import Color
"""A mock concrete GraphicObjectInterface subclass for testing"""
class MockGraphicObjectInterface(GraphicObjectInterface):
"""Only need to implement init for a functional mock subclass"""
def __init__(self, brown_object, pos, pen=None, brush=None):
super().__init__(brown_object)
self.qt_object = QtWidgets.QGraphicsRectItem(
0, 0, 10, 10)
self.pos = pos
if pen:
self.pen = pen
else:
self.pen = PenInterface(
None, Color('#000000'), 0,
PenPattern.SOLID, PenJoinStyle.BEVEL, PenCapStyle.SQUARE)
if brush:
self.brush = brush
else:
self.brush = BrushInterface(
None, Color('#000000'), BrushPattern.SOLID)
| gpl-3.0 |
rayleigh0407/Hackathon-Bot | MySQL-python-1.2.5/tests/test_MySQLdb_dbapi20.py | 41 | 7818 | #!/usr/bin/env python
import dbapi20
import unittest
import MySQLdb
from configdb import connection_kwargs
import warnings
warnings.simplefilter("ignore")
class test_MySQLdb(dbapi20.DatabaseAPI20Test):
driver = MySQLdb
connect_args = ()
connect_kw_args = connection_kwargs(dict(sql_mode="ANSI,STRICT_TRANS_TABLES,TRADITIONAL"))
def test_setoutputsize(self): pass
def test_setoutputsize_basic(self): pass
def test_nextset(self): pass
"""The tests on fetchone and fetchall and rowcount bogusly
test for an exception if the statement cannot return a
result set. MySQL always returns a result set; it's just that
some things return empty result sets."""
def test_fetchall(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchall should raise an Error if called
# without executing a query that may return rows (such
# as a select)
self.assertRaises(self.driver.Error, cur.fetchall)
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
# cursor.fetchall should raise an Error if called
# after executing a a statement that cannot return rows
## self.assertRaises(self.driver.Error,cur.fetchall)
cur.execute('select name from %sbooze' % self.table_prefix)
rows = cur.fetchall()
self.assertTrue(cur.rowcount in (-1,len(self.samples)))
self.assertEqual(len(rows),len(self.samples),
'cursor.fetchall did not retrieve all rows'
)
rows = [r[0] for r in rows]
rows.sort()
for i in range(0,len(self.samples)):
self.assertEqual(rows[i],self.samples[i],
'cursor.fetchall retrieved incorrect rows'
)
rows = cur.fetchall()
self.assertEqual(
len(rows),0,
'cursor.fetchall should return an empty list if called '
'after the whole result set has been fetched'
)
self.assertTrue(cur.rowcount in (-1,len(self.samples)))
self.executeDDL2(cur)
cur.execute('select name from %sbarflys' % self.table_prefix)
rows = cur.fetchall()
self.assertTrue(cur.rowcount in (-1,0))
self.assertEqual(len(rows),0,
'cursor.fetchall should return an empty list if '
'a select query returns no rows'
)
finally:
con.close()
def test_fetchone(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchone should raise an Error if called before
# executing a select-type query
self.assertRaises(self.driver.Error,cur.fetchone)
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
self.executeDDL1(cur)
## self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute('select name from %sbooze' % self.table_prefix)
self.assertEqual(cur.fetchone(),None,
'cursor.fetchone should return None if a query retrieves '
'no rows'
)
self.assertTrue(cur.rowcount in (-1,0))
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
## self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchone()
self.assertEqual(len(r),1,
'cursor.fetchone should have retrieved a single row'
)
self.assertEqual(r[0],'Victoria Bitter',
'cursor.fetchone retrieved incorrect data'
)
## self.assertEqual(cur.fetchone(),None,
## 'cursor.fetchone should return None if no more rows available'
## )
self.assertTrue(cur.rowcount in (-1,1))
finally:
con.close()
# Same complaint as for fetchall and fetchone
def test_rowcount(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
## self.assertEqual(cur.rowcount,-1,
## 'cursor.rowcount should be -1 after executing no-result '
## 'statements'
## )
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
## self.assertTrue(cur.rowcount in (-1,1),
## 'cursor.rowcount should == number or rows inserted, or '
## 'set to -1 after executing an insert statement'
## )
cur.execute("select name from %sbooze" % self.table_prefix)
self.assertTrue(cur.rowcount in (-1,1),
'cursor.rowcount should == number of rows returned, or '
'set to -1 after executing a select statement'
)
self.executeDDL2(cur)
## self.assertEqual(cur.rowcount,-1,
## 'cursor.rowcount not being reset to -1 after executing '
## 'no-result statements'
## )
finally:
con.close()
def test_callproc(self):
pass # performed in test_MySQL_capabilities
def help_nextset_setUp(self,cur):
''' Should create a procedure called deleteme
that returns two result sets, first the
number of rows in booze then "name from booze"
'''
sql="""
create procedure deleteme()
begin
select count(*) from %(tp)sbooze;
select name from %(tp)sbooze;
end
""" % dict(tp=self.table_prefix)
cur.execute(sql)
def help_nextset_tearDown(self,cur):
'If cleaning up is needed after nextSetTest'
cur.execute("drop procedure deleteme")
def test_nextset(self):
from warnings import warn
con = self._connect()
try:
cur = con.cursor()
if not hasattr(cur,'nextset'):
return
try:
self.executeDDL1(cur)
sql=self._populate()
for sql in self._populate():
cur.execute(sql)
self.help_nextset_setUp(cur)
cur.callproc('deleteme')
numberofrows=cur.fetchone()
assert numberofrows[0]== len(self.samples)
assert cur.nextset()
names=cur.fetchall()
assert len(names) == len(self.samples)
s=cur.nextset()
if s:
empty = cur.fetchall()
self.assertEquals(len(empty), 0,
"non-empty result set after other result sets")
#warn("Incompatibility: MySQL returns an empty result set for the CALL itself",
# Warning)
#assert s == None,'No more return sets, should return None'
finally:
self.help_nextset_tearDown(cur)
finally:
con.close()
if __name__ == '__main__':
unittest.main()
| mit |
DGrady/pandas | asv_bench/benchmarks/io_sql.py | 7 | 4120 | import sqlalchemy
from .pandas_vb_common import *
import sqlite3
from sqlalchemy import create_engine
#-------------------------------------------------------------------------------
# to_sql
class WriteSQL(object):
goal_time = 0.2
def setup(self):
self.engine = create_engine('sqlite:///:memory:')
self.con = sqlite3.connect(':memory:')
self.index = tm.makeStringIndex(10000)
self.df = DataFrame({'float1': randn(10000), 'float2': randn(10000), 'string1': (['foo'] * 10000), 'bool1': ([True] * 10000), 'int1': np.random.randint(0, 100000, size=10000), }, index=self.index)
def time_fallback(self):
self.df.to_sql('test1', self.con, if_exists='replace')
def time_sqlalchemy(self):
self.df.to_sql('test1', self.engine, if_exists='replace')
#-------------------------------------------------------------------------------
# read_sql
class ReadSQL(object):
goal_time = 0.2
def setup(self):
self.engine = create_engine('sqlite:///:memory:')
self.con = sqlite3.connect(':memory:')
self.index = tm.makeStringIndex(10000)
self.df = DataFrame({'float1': randn(10000), 'float2': randn(10000), 'string1': (['foo'] * 10000), 'bool1': ([True] * 10000), 'int1': np.random.randint(0, 100000, size=10000), }, index=self.index)
self.df.to_sql('test2', self.engine, if_exists='replace')
self.df.to_sql('test2', self.con, if_exists='replace')
def time_read_query_fallback(self):
read_sql_query('SELECT * FROM test2', self.con)
def time_read_query_sqlalchemy(self):
read_sql_query('SELECT * FROM test2', self.engine)
def time_read_table_sqlalchemy(self):
read_sql_table('test2', self.engine)
#-------------------------------------------------------------------------------
# type specific write
class WriteSQLTypes(object):
goal_time = 0.2
def setup(self):
self.engine = create_engine('sqlite:///:memory:')
self.con = sqlite3.connect(':memory:')
self.df = DataFrame({'float': randn(10000), 'string': (['foo'] * 10000), 'bool': ([True] * 10000), 'datetime': date_range('2000-01-01', periods=10000, freq='s'), })
self.df.loc[1000:3000, 'float'] = np.nan
def time_string_fallback(self):
self.df[['string']].to_sql('test_string', self.con, if_exists='replace')
def time_string_sqlalchemy(self):
self.df[['string']].to_sql('test_string', self.engine, if_exists='replace')
def time_float_fallback(self):
self.df[['float']].to_sql('test_float', self.con, if_exists='replace')
def time_float_sqlalchemy(self):
self.df[['float']].to_sql('test_float', self.engine, if_exists='replace')
def time_datetime_sqlalchemy(self):
self.df[['datetime']].to_sql('test_datetime', self.engine, if_exists='replace')
#-------------------------------------------------------------------------------
# type specific read
class ReadSQLTypes(object):
goal_time = 0.2
def setup(self):
self.engine = create_engine('sqlite:///:memory:')
self.con = sqlite3.connect(':memory:')
self.df = DataFrame({'float': randn(10000), 'datetime': date_range('2000-01-01', periods=10000, freq='s'), })
self.df['datetime_string'] = self.df['datetime'].map(str)
self.df.to_sql('test_type', self.engine, if_exists='replace')
self.df[['float', 'datetime_string']].to_sql('test_type', self.con, if_exists='replace')
def time_datetime_read_and_parse_sqlalchemy(self):
read_sql_table('test_type', self.engine, columns=['datetime_string'], parse_dates=['datetime_string'])
def time_datetime_read_as_native_sqlalchemy(self):
read_sql_table('test_type', self.engine, columns=['datetime'])
def time_float_read_query_fallback(self):
read_sql_query('SELECT float FROM test_type', self.con)
def time_float_read_query_sqlalchemy(self):
read_sql_query('SELECT float FROM test_type', self.engine)
def time_float_read_table_sqlalchemy(self):
read_sql_table('test_type', self.engine, columns=['float'])
| bsd-3-clause |
KnoxMakers/KM-Laser | extensions/km_deps/future/moves/urllib/parse.py | 13 | 1045 | from __future__ import absolute_import
from future.standard_library import suspend_hooks
from future.utils import PY3
if PY3:
from urllib.parse import *
else:
__future_module__ = True
from urlparse import (ParseResult, SplitResult, parse_qs, parse_qsl,
urldefrag, urljoin, urlparse, urlsplit,
urlunparse, urlunsplit)
# we use this method to get at the original py2 urllib before any renaming
# quote = sys.py2_modules['urllib'].quote
# quote_plus = sys.py2_modules['urllib'].quote_plus
# unquote = sys.py2_modules['urllib'].unquote
# unquote_plus = sys.py2_modules['urllib'].unquote_plus
# urlencode = sys.py2_modules['urllib'].urlencode
# splitquery = sys.py2_modules['urllib'].splitquery
with suspend_hooks():
from urllib import (quote,
quote_plus,
unquote,
unquote_plus,
urlencode,
splitquery)
| gpl-3.0 |
jbassen/edx-platform | common/djangoapps/service_status/views.py | 188 | 1296 | """
Django Views for service status app
"""
import json
import time
from django.http import HttpResponse
from dogapi import dog_stats_api
from service_status import tasks
from djcelery import celery
from celery.exceptions import TimeoutError
def index(_):
"""
An empty view
"""
return HttpResponse()
@dog_stats_api.timed('status.service.celery.status')
def celery_status(_):
"""
A view that returns Celery stats
"""
stats = celery.control.inspect().stats() or {}
return HttpResponse(json.dumps(stats, indent=4),
mimetype="application/json")
@dog_stats_api.timed('status.service.celery.ping')
def celery_ping(_):
"""
A Simple view that checks if Celery can process a simple task
"""
start = time.time()
result = tasks.delayed_ping.apply_async(('ping', 0.1))
task_id = result.id
# Wait until we get the result
try:
value = result.get(timeout=4.0)
success = True
except TimeoutError:
value = None
success = False
output = {
'success': success,
'task_id': task_id,
'value': value,
'time': time.time() - start,
}
return HttpResponse(json.dumps(output, indent=4),
mimetype="application/json")
| agpl-3.0 |
idea4bsd/idea4bsd | python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/fixes/fix_execfile.py | 324 | 1998 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for execfile.
This converts usages of the execfile function into calls to the built-in
exec() function.
"""
from .. import fixer_base
from ..fixer_util import (Comma, Name, Call, LParen, RParen, Dot, Node,
ArgList, String, syms)
class FixExecfile(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< 'execfile' trailer< '(' arglist< filename=any [',' globals=any [',' locals=any ] ] > ')' > >
|
power< 'execfile' trailer< '(' filename=any ')' > >
"""
def transform(self, node, results):
assert results
filename = results["filename"]
globals = results.get("globals")
locals = results.get("locals")
# Copy over the prefix from the right parentheses end of the execfile
# call.
execfile_paren = node.children[-1].children[-1].clone()
# Construct open().read().
open_args = ArgList([filename.clone()], rparen=execfile_paren)
open_call = Node(syms.power, [Name(u"open"), open_args])
read = [Node(syms.trailer, [Dot(), Name(u'read')]),
Node(syms.trailer, [LParen(), RParen()])]
open_expr = [open_call] + read
# Wrap the open call in a compile call. This is so the filename will be
# preserved in the execed code.
filename_arg = filename.clone()
filename_arg.prefix = u" "
exec_str = String(u"'exec'", u" ")
compile_args = open_expr + [Comma(), filename_arg, Comma(), exec_str]
compile_call = Call(Name(u"compile"), compile_args, u"")
# Finally, replace the execfile call with an exec call.
args = [compile_call]
if globals is not None:
args.extend([Comma(), globals.clone()])
if locals is not None:
args.extend([Comma(), locals.clone()])
return Call(Name(u"exec"), args, prefix=node.prefix)
| apache-2.0 |
Flexget/Flexget | flexget/components/trakt/cli.py | 3 | 4480 | from flexget import options, plugin
from flexget.event import event
from flexget.manager import Session
from flexget.terminal import TerminalTable, TerminalTableError, console, table_parser
from . import db
def action_auth(options):
if not (options.account):
console(
'You must specify an account (local identifier) so we know where to save your access token!'
)
return
try:
db.get_access_token(options.account, options.pin, re_auth=True, called_from_cli=True)
console('Successfully authorized Flexget app on Trakt.tv. Enjoy!')
return
except plugin.PluginError as e:
console('Authorization failed: %s' % e)
def action_list(options):
with Session() as session:
if not options.account:
# Print all accounts
accounts = session.query(db.TraktUserAuth).all()
if not accounts:
console('No trakt authorizations stored in database.')
return
header = ['Account', 'Created', 'Expires']
table_data = [header]
for auth in accounts:
table_data.append(
[
auth.account,
auth.created.strftime('%Y-%m-%d'),
auth.expires.strftime('%Y-%m-%d'),
]
)
try:
table = TerminalTable(options.table_type, table_data)
console(table.output)
return
except TerminalTableError as e:
console('ERROR: %s' % str(e))
# Show a specific account
acc = (
session.query(db.TraktUserAuth)
.filter(db.TraktUserAuth.account == options.account)
.first()
)
if acc:
console('Authorization expires on %s' % acc.expires)
else:
console('Flexget has not been authorized to access your account.')
def action_refresh(options):
if not options.account:
console('Please specify an account')
return
try:
db.get_access_token(options.account, refresh=True)
console('Successfully refreshed your access token.')
return
except plugin.PluginError as e:
console('Authorization failed: %s' % e)
def action_delete(options):
if not options.account:
console('Please specify an account')
return
try:
db.delete_account(options.account)
console('Successfully deleted your access token.')
return
except plugin.PluginError as e:
console('Deletion failed: %s' % e)
def do_cli(manager, options):
action_map = {
'auth': action_auth,
'list': action_list,
'refresh': action_refresh,
'delete': action_delete,
}
action_map[options.action](options)
@event('options.register')
def register_parser_arguments():
acc_text = 'Local identifier which should be used in your config to refer these credentials'
# Register subcommand
parser = options.register_command(
'trakt', do_cli, help='View and manage trakt authentication.'
)
# Set up our subparsers
subparsers = parser.add_subparsers(title='actions', metavar='<action>', dest='action')
auth_parser = subparsers.add_parser(
'auth', help='Authorize Flexget to access your Trakt.tv account'
)
auth_parser.add_argument('account', metavar='<account>', help=acc_text)
auth_parser.add_argument(
'pin',
metavar='<pin>',
help='Get this by authorizing FlexGet to use your trakt account '
'at %s. WARNING: DEPRECATED.' % db.PIN_URL,
nargs='?',
)
show_parser = subparsers.add_parser(
'list',
help='List expiration date for Flexget authorization(s) (don\'t worry, '
'they will automatically refresh when expired)',
parents=[table_parser],
)
show_parser.add_argument('account', metavar='<account>', nargs='?', help=acc_text)
refresh_parser = subparsers.add_parser(
'refresh',
help='Manually refresh your access token associated with your' ' --account <name>',
)
refresh_parser.add_argument('account', metavar='<account>', help=acc_text)
delete_parser = subparsers.add_parser(
'delete', help='Delete the specified <account> name from local database'
)
delete_parser.add_argument('account', metavar='<account>', help=acc_text)
| mit |
wubr2000/zipline | tests/risk/upload_answer_key.py | 43 | 1923 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility script for maintainer use to upload current version of the answer key
spreadsheet to S3.
"""
import hashlib
import boto
from . import answer_key
BUCKET_NAME = 'zipline-test-data'
def main():
with open(answer_key.ANSWER_KEY_PATH, 'r') as f:
md5 = hashlib.md5()
while True:
buf = f.read(1024)
if not buf:
break
md5.update(buf)
local_hash = md5.hexdigest()
s3_conn = boto.connect_s3()
bucket = s3_conn.get_bucket(BUCKET_NAME)
key = boto.s3.key.Key(bucket)
key.key = "risk/{local_hash}/risk-answer-key.xlsx".format(
local_hash=local_hash)
key.set_contents_from_filename(answer_key.ANSWER_KEY_PATH)
key.set_acl('public-read')
download_link = "http://s3.amazonaws.com/{bucket_name}/{key}".format(
bucket_name=BUCKET_NAME,
key=key.key)
print("Uploaded to key: {key}".format(key=key.key))
print("Download link: {download_link}".format(download_link=download_link))
# Now update checksum file with the recently added answer key.
# checksum file update will be then need to be commited via git.
with open(answer_key.ANSWER_KEY_CHECKSUMS_PATH, 'a') as checksum_file:
checksum_file.write(local_hash)
checksum_file.write("\n")
if __name__ == "__main__":
main()
| apache-2.0 |
thonkify/thonkify | src/lib/gcloud/test_client.py | 2 | 7021 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class Test_ClientFactoryMixin(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.client import _ClientFactoryMixin
return _ClientFactoryMixin
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_virtual(self):
self.assertRaises(NotImplementedError, self._makeOne)
class TestClient(unittest2.TestCase):
def setUp(self):
KLASS = self._getTargetClass()
self.original_cnxn_class = KLASS._connection_class
KLASS._connection_class = _MockConnection
def tearDown(self):
KLASS = self._getTargetClass()
KLASS._connection_class = self.original_cnxn_class
def _getTargetClass(self):
from gcloud.client import Client
return Client
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor_defaults(self):
from gcloud._testing import _Monkey
from gcloud import client
CREDENTIALS = object()
FUNC_CALLS = []
def mock_get_credentials():
FUNC_CALLS.append('get_credentials')
return CREDENTIALS
with _Monkey(client, get_credentials=mock_get_credentials):
client_obj = self._makeOne()
self.assertTrue(isinstance(client_obj.connection, _MockConnection))
self.assertTrue(client_obj.connection.credentials is CREDENTIALS)
self.assertEqual(FUNC_CALLS, ['get_credentials'])
def test_ctor_explicit(self):
CREDENTIALS = object()
HTTP = object()
client_obj = self._makeOne(credentials=CREDENTIALS, http=HTTP)
self.assertTrue(isinstance(client_obj.connection, _MockConnection))
self.assertTrue(client_obj.connection.credentials is CREDENTIALS)
self.assertTrue(client_obj.connection.http is HTTP)
def test_from_service_account_json(self):
from gcloud._testing import _Monkey
from gcloud import client
KLASS = self._getTargetClass()
CREDENTIALS = object()
_CALLED = []
def mock_creds(arg1):
_CALLED.append((arg1,))
return CREDENTIALS
BOGUS_ARG = object()
with _Monkey(client, get_for_service_account_json=mock_creds):
client_obj = KLASS.from_service_account_json(BOGUS_ARG)
self.assertTrue(client_obj.connection.credentials is CREDENTIALS)
self.assertEqual(_CALLED, [(BOGUS_ARG,)])
def test_from_service_account_json_fail(self):
KLASS = self._getTargetClass()
CREDENTIALS = object()
self.assertRaises(TypeError, KLASS.from_service_account_json, None,
credentials=CREDENTIALS)
def test_from_service_account_p12(self):
from gcloud._testing import _Monkey
from gcloud import client
KLASS = self._getTargetClass()
CREDENTIALS = object()
_CALLED = []
def mock_creds(arg1, arg2):
_CALLED.append((arg1, arg2))
return CREDENTIALS
BOGUS_ARG1 = object()
BOGUS_ARG2 = object()
with _Monkey(client, get_for_service_account_p12=mock_creds):
client_obj = KLASS.from_service_account_p12(BOGUS_ARG1, BOGUS_ARG2)
self.assertTrue(client_obj.connection.credentials is CREDENTIALS)
self.assertEqual(_CALLED, [(BOGUS_ARG1, BOGUS_ARG2)])
def test_from_service_account_p12_fail(self):
KLASS = self._getTargetClass()
CREDENTIALS = object()
self.assertRaises(TypeError, KLASS.from_service_account_p12, None,
None, credentials=CREDENTIALS)
class TestJSONClient(unittest2.TestCase):
def setUp(self):
KLASS = self._getTargetClass()
self.original_cnxn_class = KLASS._connection_class
KLASS._connection_class = _MockConnection
def tearDown(self):
KLASS = self._getTargetClass()
KLASS._connection_class = self.original_cnxn_class
def _getTargetClass(self):
from gcloud.client import JSONClient
return JSONClient
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor_defaults(self):
from gcloud._testing import _Monkey
from gcloud import client
PROJECT = 'PROJECT'
CREDENTIALS = object()
FUNC_CALLS = []
def mock_get_proj():
FUNC_CALLS.append('_get_production_project')
return PROJECT
def mock_get_credentials():
FUNC_CALLS.append('get_credentials')
return CREDENTIALS
with _Monkey(client, get_credentials=mock_get_credentials,
_get_production_project=mock_get_proj):
client_obj = self._makeOne()
self.assertTrue(client_obj.project is PROJECT)
self.assertTrue(isinstance(client_obj.connection, _MockConnection))
self.assertTrue(client_obj.connection.credentials is CREDENTIALS)
self.assertEqual(FUNC_CALLS,
['_get_production_project', 'get_credentials'])
def test_ctor_missing_project(self):
from gcloud._testing import _Monkey
from gcloud import client
FUNC_CALLS = []
def mock_get_proj():
FUNC_CALLS.append('_get_production_project')
return None
with _Monkey(client, _get_production_project=mock_get_proj):
self.assertRaises(ValueError, self._makeOne)
self.assertEqual(FUNC_CALLS, ['_get_production_project'])
def test_ctor_w_invalid_project(self):
CREDENTIALS = object()
HTTP = object()
with self.assertRaises(ValueError):
self._makeOne(project=object(), credentials=CREDENTIALS, http=HTTP)
def test_ctor_explicit(self):
PROJECT = 'PROJECT'
CREDENTIALS = object()
HTTP = object()
client_obj = self._makeOne(project=PROJECT, credentials=CREDENTIALS,
http=HTTP)
self.assertTrue(client_obj.project is PROJECT)
self.assertTrue(isinstance(client_obj.connection, _MockConnection))
self.assertTrue(client_obj.connection.credentials is CREDENTIALS)
self.assertTrue(client_obj.connection.http is HTTP)
class _MockConnection(object):
def __init__(self, credentials=None, http=None):
self.credentials = credentials
self.http = http
| mit |
ToqueWillot/M2DAC | RDFIA/TME3-4/codesTME3-4/learning/libsvm/python/svmutil.py | 13 | 8119 | #!/usr/bin/env python
from svm import *
def svm_read_problem(data_file_name):
"""
svm_read_problem(data_file_name) -> [y, x]
Read LIBSVM-format data from data_file_name and return labels y
and data instances x.
"""
prob_y = []
prob_x = []
for line in open(data_file_name):
line = line.split(None, 1)
# In case an instance with all zero features
if len(line) == 1: line += ['']
label, features = line
xi = {}
for e in features.split():
ind, val = e.split(":")
xi[int(ind)] = float(val)
prob_y += [float(label)]
prob_x += [xi]
return (prob_y, prob_x)
def svm_load_model(model_file_name):
"""
svm_load_model(model_file_name) -> model
Load a LIBSVM model from model_file_name and return.
"""
model = libsvm.svm_load_model(model_file_name)
if not model:
print("can't open model file %s" % model_file_name)
return None
model = toPyModel(model)
return model
def svm_save_model(model_file_name, model):
"""
svm_save_model(model_file_name, model) -> None
Save a LIBSVM model to the file model_file_name.
"""
libsvm.svm_save_model(model_file_name, model)
def evaluations(ty, pv):
"""
evaluations(ty, pv) -> (ACC, MSE, SCC)
Calculate accuracy, mean squared error and squared correlation coefficient
using the true values (ty) and predicted values (pv).
"""
if len(ty) != len(pv):
raise ValueError("len(ty) must equal to len(pv)")
total_correct = total_error = 0
sumv = sumy = sumvv = sumyy = sumvy = 0
for v, y in zip(pv, ty):
if y == v:
total_correct += 1
total_error += (v-y)*(v-y)
sumv += v
sumy += y
sumvv += v*v
sumyy += y*y
sumvy += v*y
l = len(ty)
ACC = 100.0*total_correct/l
MSE = total_error/l
try:
SCC = ((l*sumvy-sumv*sumy)*(l*sumvy-sumv*sumy))/((l*sumvv-sumv*sumv)*(l*sumyy-sumy*sumy))
except:
SCC = float('nan')
return (ACC, MSE, SCC)
def svm_train(arg1, arg2=None, arg3=None):
"""
svm_train(y, x [, 'options']) -> model | ACC | MSE
svm_train(prob, [, 'options']) -> model | ACC | MSE
svm_train(prob, param) -> model | ACC| MSE
Train an SVM model from data (y, x) or an svm_problem prob using
'options' or an svm_parameter param.
If '-v' is specified in 'options' (i.e., cross validation)
either accuracy (ACC) or mean-squared error (MSE) is returned.
'options':
-s svm_type : set type of SVM (default 0)
0 -- C-SVC
1 -- nu-SVC
2 -- one-class SVM
3 -- epsilon-SVR
4 -- nu-SVR
-t kernel_type : set type of kernel function (default 2)
0 -- linear: u'*v
1 -- polynomial: (gamma*u'*v + coef0)^degree
2 -- radial basis function: exp(-gamma*|u-v|^2)
3 -- sigmoid: tanh(gamma*u'*v + coef0)
4 -- precomputed kernel (kernel values in training_set_file)
-d degree : set degree in kernel function (default 3)
-g gamma : set gamma in kernel function (default 1/num_features)
-r coef0 : set coef0 in kernel function (default 0)
-c cost : set the parameter C of C-SVC, epsilon-SVR, and nu-SVR (default 1)
-n nu : set the parameter nu of nu-SVC, one-class SVM, and nu-SVR (default 0.5)
-p epsilon : set the epsilon in loss function of epsilon-SVR (default 0.1)
-m cachesize : set cache memory size in MB (default 100)
-e epsilon : set tolerance of termination criterion (default 0.001)
-h shrinking : whether to use the shrinking heuristics, 0 or 1 (default 1)
-b probability_estimates : whether to train a SVC or SVR model for probability estimates, 0 or 1 (default 0)
-wi weight : set the parameter C of class i to weight*C, for C-SVC (default 1)
-v n: n-fold cross validation mode
-q : quiet mode (no outputs)
"""
prob, param = None, None
if isinstance(arg1, (list, tuple)):
assert isinstance(arg2, (list, tuple))
y, x, options = arg1, arg2, arg3
prob = svm_problem(y, x)
param = svm_parameter(options)
elif isinstance(arg1, svm_problem):
prob = arg1
if isinstance(arg2, svm_parameter):
param = arg2
else:
param = svm_parameter(arg2)
if prob == None or param == None:
raise TypeError("Wrong types for the arguments")
if param.kernel_type == PRECOMPUTED:
for xi in prob.x_space:
idx, val = xi[0].index, xi[0].value
if xi[0].index != 0:
raise ValueError('Wrong input format: first column must be 0:sample_serial_number')
if val <= 0 or val > prob.n:
raise ValueError('Wrong input format: sample_serial_number out of range')
if param.gamma == 0 and prob.n > 0:
param.gamma = 1.0 / prob.n
libsvm.svm_set_print_string_function(param.print_func)
err_msg = libsvm.svm_check_parameter(prob, param)
if err_msg:
raise ValueError('Error: %s' % err_msg)
if param.cross_validation:
l, nr_fold = prob.l, param.nr_fold
target = (c_double * l)()
libsvm.svm_cross_validation(prob, param, nr_fold, target)
ACC, MSE, SCC = evaluations(prob.y[:l], target[:l])
if param.svm_type in [EPSILON_SVR, NU_SVR]:
print("Cross Validation Mean squared error = %g" % MSE)
print("Cross Validation Squared correlation coefficient = %g" % SCC)
return MSE
else:
print("Cross Validation Accuracy = %g%%" % ACC)
return ACC
else:
m = libsvm.svm_train(prob, param)
m = toPyModel(m)
# If prob is destroyed, data including SVs pointed by m can remain.
m.x_space = prob.x_space
return m
def svm_predict(y, x, m, options=""):
"""
svm_predict(y, x, m [, "options"]) -> (p_labels, p_acc, p_vals)
Predict data (y, x) with the SVM model m.
"options":
-b probability_estimates: whether to predict probability estimates,
0 or 1 (default 0); for one-class SVM only 0 is supported.
The return tuple contains
p_labels: a list of predicted labels
p_acc: a tuple including accuracy (for classification), mean-squared
error, and squared correlation coefficient (for regression).
p_vals: a list of decision values or probability estimates (if '-b 1'
is specified). If k is the number of classes, for decision values,
each element includes results of predicting k(k-1)/2 binary-class
SVMs. For probabilities, each element contains k values indicating
the probability that the testing instance is in each class.
Note that the order of classes here is the same as 'model.label'
field in the model structure.
"""
predict_probability = 0
argv = options.split()
i = 0
while i < len(argv):
if argv[i] == '-b':
i += 1
predict_probability = int(argv[i])
else:
raise ValueError("Wrong options")
i+=1
svm_type = m.get_svm_type()
is_prob_model = m.is_probability_model()
nr_class = m.get_nr_class()
pred_labels = []
pred_values = []
if predict_probability:
if not is_prob_model:
raise ValueError("Model does not support probabiliy estimates")
if svm_type in [NU_SVR, EPSILON_SVR]:
print("Prob. model for test data: target value = predicted value + z,\n"
"z: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma=%g" % m.get_svr_probability());
nr_class = 0
prob_estimates = (c_double * nr_class)()
for xi in x:
xi, idx = gen_svm_nodearray(xi)
label = libsvm.svm_predict_probability(m, xi, prob_estimates)
values = prob_estimates[:nr_class]
pred_labels += [label]
pred_values += [values]
else:
if is_prob_model:
print("Model supports probability estimates, but disabled in predicton.")
if svm_type in (ONE_CLASS, EPSILON_SVR, NU_SVC):
nr_classifier = 1
else:
nr_classifier = nr_class*(nr_class-1)//2
dec_values = (c_double * nr_classifier)()
for xi in x:
xi, idx = gen_svm_nodearray(xi)
label = libsvm.svm_predict_values(m, xi, dec_values)
if(nr_class == 1):
values = [1]
else:
values = dec_values[:nr_classifier]
pred_labels += [label]
pred_values += [values]
ACC, MSE, SCC = evaluations(y, pred_labels)
l = len(y)
if svm_type in [EPSILON_SVR, NU_SVR]:
print("Mean squared error = %g (regression)" % MSE)
print("Squared correlation coefficient = %g (regression)" % SCC)
else:
print("Accuracy = %g%% (%d/%d) (classification)" % (ACC, int(l*ACC/100), l))
return pred_labels, (ACC, MSE, SCC), pred_values
| gpl-2.0 |
jiangzhonghui/viewfinder | marketing/tornado/test/escape_test.py | 47 | 10024 | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
import tornado.escape
from tornado.escape import utf8, xhtml_escape, xhtml_unescape, url_escape, url_unescape, to_unicode, json_decode, json_encode
from tornado.util import u, unicode_type, bytes_type
from tornado.test.util import unittest
linkify_tests = [
# (input, linkify_kwargs, expected_output)
("hello http://world.com/!", {},
u('hello <a href="http://world.com/">http://world.com/</a>!')),
("hello http://world.com/with?param=true&stuff=yes", {},
u('hello <a href="http://world.com/with?param=true&stuff=yes">http://world.com/with?param=true&stuff=yes</a>')),
# an opened paren followed by many chars killed Gruber's regex
("http://url.com/w(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", {},
u('<a href="http://url.com/w">http://url.com/w</a>(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')),
# as did too many dots at the end
("http://url.com/withmany.......................................", {},
u('<a href="http://url.com/withmany">http://url.com/withmany</a>.......................................')),
("http://url.com/withmany((((((((((((((((((((((((((((((((((a)", {},
u('<a href="http://url.com/withmany">http://url.com/withmany</a>((((((((((((((((((((((((((((((((((a)')),
# some examples from http://daringfireball.net/2009/11/liberal_regex_for_matching_urls
# plus a fex extras (such as multiple parentheses).
("http://foo.com/blah_blah", {},
u('<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>')),
("http://foo.com/blah_blah/", {},
u('<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>')),
("(Something like http://foo.com/blah_blah)", {},
u('(Something like <a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>)')),
("http://foo.com/blah_blah_(wikipedia)", {},
u('<a href="http://foo.com/blah_blah_(wikipedia)">http://foo.com/blah_blah_(wikipedia)</a>')),
("http://foo.com/blah_(blah)_(wikipedia)_blah", {},
u('<a href="http://foo.com/blah_(blah)_(wikipedia)_blah">http://foo.com/blah_(blah)_(wikipedia)_blah</a>')),
("(Something like http://foo.com/blah_blah_(wikipedia))", {},
u('(Something like <a href="http://foo.com/blah_blah_(wikipedia)">http://foo.com/blah_blah_(wikipedia)</a>)')),
("http://foo.com/blah_blah.", {},
u('<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>.')),
("http://foo.com/blah_blah/.", {},
u('<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>.')),
("<http://foo.com/blah_blah>", {},
u('<<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>>')),
("<http://foo.com/blah_blah/>", {},
u('<<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>>')),
("http://foo.com/blah_blah,", {},
u('<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>,')),
("http://www.example.com/wpstyle/?p=364.", {},
u('<a href="http://www.example.com/wpstyle/?p=364">http://www.example.com/wpstyle/?p=364</a>.')),
("rdar://1234",
{"permitted_protocols": ["http", "rdar"]},
u('<a href="rdar://1234">rdar://1234</a>')),
("rdar:/1234",
{"permitted_protocols": ["rdar"]},
u('<a href="rdar:/1234">rdar:/1234</a>')),
("http://userid:password@example.com:8080", {},
u('<a href="http://userid:password@example.com:8080">http://userid:password@example.com:8080</a>')),
("http://userid@example.com", {},
u('<a href="http://userid@example.com">http://userid@example.com</a>')),
("http://userid@example.com:8080", {},
u('<a href="http://userid@example.com:8080">http://userid@example.com:8080</a>')),
("http://userid:password@example.com", {},
u('<a href="http://userid:password@example.com">http://userid:password@example.com</a>')),
("message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e",
{"permitted_protocols": ["http", "message"]},
u('<a href="message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e">message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e</a>')),
(u("http://\u27a1.ws/\u4a39"), {},
u('<a href="http://\u27a1.ws/\u4a39">http://\u27a1.ws/\u4a39</a>')),
("<tag>http://example.com</tag>", {},
u('<tag><a href="http://example.com">http://example.com</a></tag>')),
("Just a www.example.com link.", {},
u('Just a <a href="http://www.example.com">www.example.com</a> link.')),
("Just a www.example.com link.",
{"require_protocol": True},
u('Just a www.example.com link.')),
("A http://reallylong.com/link/that/exceedsthelenglimit.html",
{"require_protocol": True, "shorten": True},
u('A <a href="http://reallylong.com/link/that/exceedsthelenglimit.html" title="http://reallylong.com/link/that/exceedsthelenglimit.html">http://reallylong.com/link...</a>')),
("A http://reallylongdomainnamethatwillbetoolong.com/hi!",
{"shorten": True},
u('A <a href="http://reallylongdomainnamethatwillbetoolong.com/hi" title="http://reallylongdomainnamethatwillbetoolong.com/hi">http://reallylongdomainnametha...</a>!')),
("A file:///passwords.txt and http://web.com link", {},
u('A file:///passwords.txt and <a href="http://web.com">http://web.com</a> link')),
("A file:///passwords.txt and http://web.com link",
{"permitted_protocols": ["file"]},
u('A <a href="file:///passwords.txt">file:///passwords.txt</a> and http://web.com link')),
("www.external-link.com",
{"extra_params": 'rel="nofollow" class="external"'},
u('<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a>')),
("www.external-link.com and www.internal-link.com/blogs extra",
{"extra_params": lambda href: 'class="internal"' if href.startswith("http://www.internal-link.com") else 'rel="nofollow" class="external"'},
u('<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a> and <a href="http://www.internal-link.com/blogs" class="internal">www.internal-link.com/blogs</a> extra')),
("www.external-link.com",
{"extra_params": lambda href: ' rel="nofollow" class="external" '},
u('<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a>')),
]
class EscapeTestCase(unittest.TestCase):
def test_linkify(self):
for text, kwargs, html in linkify_tests:
linked = tornado.escape.linkify(text, **kwargs)
self.assertEqual(linked, html)
def test_xhtml_escape(self):
tests = [
("<foo>", "<foo>"),
(u("<foo>"), u("<foo>")),
(b"<foo>", b"<foo>"),
("<>&\"'", "<>&"'"),
("&", "&amp;"),
(u("<\u00e9>"), u("<\u00e9>")),
(b"<\xc3\xa9>", b"<\xc3\xa9>"),
]
for unescaped, escaped in tests:
self.assertEqual(utf8(xhtml_escape(unescaped)), utf8(escaped))
self.assertEqual(utf8(unescaped), utf8(xhtml_unescape(escaped)))
def test_url_escape_unicode(self):
tests = [
# byte strings are passed through as-is
(u('\u00e9').encode('utf8'), '%C3%A9'),
(u('\u00e9').encode('latin1'), '%E9'),
# unicode strings become utf8
(u('\u00e9'), '%C3%A9'),
]
for unescaped, escaped in tests:
self.assertEqual(url_escape(unescaped), escaped)
def test_url_unescape_unicode(self):
tests = [
('%C3%A9', u('\u00e9'), 'utf8'),
('%C3%A9', u('\u00c3\u00a9'), 'latin1'),
('%C3%A9', utf8(u('\u00e9')), None),
]
for escaped, unescaped, encoding in tests:
# input strings to url_unescape should only contain ascii
# characters, but make sure the function accepts both byte
# and unicode strings.
self.assertEqual(url_unescape(to_unicode(escaped), encoding), unescaped)
self.assertEqual(url_unescape(utf8(escaped), encoding), unescaped)
def test_url_escape_quote_plus(self):
unescaped = '+ #%'
plus_escaped = '%2B+%23%25'
escaped = '%2B%20%23%25'
self.assertEqual(url_escape(unescaped), plus_escaped)
self.assertEqual(url_escape(unescaped, plus=False), escaped)
self.assertEqual(url_unescape(plus_escaped), unescaped)
self.assertEqual(url_unescape(escaped, plus=False), unescaped)
self.assertEqual(url_unescape(plus_escaped, encoding=None),
utf8(unescaped))
self.assertEqual(url_unescape(escaped, encoding=None, plus=False),
utf8(unescaped))
def test_escape_return_types(self):
# On python2 the escape methods should generally return the same
# type as their argument
self.assertEqual(type(xhtml_escape("foo")), str)
self.assertEqual(type(xhtml_escape(u("foo"))), unicode_type)
def test_json_decode(self):
# json_decode accepts both bytes and unicode, but strings it returns
# are always unicode.
self.assertEqual(json_decode(b'"foo"'), u("foo"))
self.assertEqual(json_decode(u('"foo"')), u("foo"))
# Non-ascii bytes are interpreted as utf8
self.assertEqual(json_decode(utf8(u('"\u00e9"'))), u("\u00e9"))
def test_json_encode(self):
# json deals with strings, not bytes. On python 2 byte strings will
# convert automatically if they are utf8; on python 3 byte strings
# are not allowed.
self.assertEqual(json_decode(json_encode(u("\u00e9"))), u("\u00e9"))
if bytes_type is str:
self.assertEqual(json_decode(json_encode(utf8(u("\u00e9")))), u("\u00e9"))
self.assertRaises(UnicodeDecodeError, json_encode, b"\xe9")
| apache-2.0 |
mrgloom/h2o-3 | h2o-py/tests/testdir_jira/pyunit_NOPASS_pubdev_1388_offset_comparisons.py | 3 | 2998 | import sys
sys.path.insert(1, "../../")
import h2o
def offset_1388(ip, port):
print "Loading datasets..."
pros_hex = h2o.import_file(h2o.locate("smalldata/prostate/prostate.csv"))
pros_hex[1] = pros_hex[1].asfactor()
pros_hex[3] = pros_hex[3].asfactor()
pros_hex[4] = pros_hex[4].asfactor()
pros_hex[5] = pros_hex[5].asfactor()
pros_hex[8] = pros_hex[8].asfactor()
cars_hex = h2o.import_file(h2o.locate("smalldata/junit/cars.csv"))
cars_hex[0] = cars_hex[0].asfactor()
cars_hex[2] = cars_hex[2].asfactor()
print "Running Binomial Comparison..."
glm_bin_h2o = h2o.glm(x=pros_hex[2:9], y=pros_hex[1], training_frame=pros_hex, family="binomial", standardize=False,
offset_column="AGE", Lambda=[0], max_iterations=100)
print "binomial"
print "R:"
print "deviance: {0}".format(1464.9565781185)
print "null deviance: {0}".format(2014.93087862689)
print "aic: {0}".format(1494.9565781185)
print "H2O:"
print "deviance {0}".format(glm_bin_h2o.residual_deviance())
print "null deviance {0}".format(glm_bin_h2o.null_deviance())
print "aic {0}".format(glm_bin_h2o.aic())
assert abs(1464.9565781185 - glm_bin_h2o.residual_deviance()) < 0.1
assert abs(2014.93087862689 - glm_bin_h2o.null_deviance()) < 0.1
assert abs(1494.9565781185 - glm_bin_h2o.aic()) < 0.1
print "Running Regression Comparisons..."
glm_h2o = h2o.glm(x=cars_hex[2:8], y=cars_hex[1], training_frame=cars_hex, family="gaussian", standardize=False,
offset_column="year", Lambda = [0], max_iterations = 100)
print "gaussian"
print "R:"
print "deviance: {0}".format(4204.68399275449)
print "null deviance: {0}".format(16072.0955102041)
print "aic: {0}".format(2062.54330117177)
print "H2O:"
print "deviance {0}".format(glm_h2o.residual_deviance())
print "null deviance {0}".format(glm_h2o.null_deviance())
print "aic {0}".format(glm_h2o.aic())
assert abs(4204.68399275449 - glm_h2o.residual_deviance()) < 0.1
assert abs(16072.0955102041 - glm_h2o.null_deviance()) < 0.1
assert abs(2062.54330117177 - glm_h2o.aic()) < 0.1
glm_h2o = h2o.glm(x=cars_hex[2:8], y=cars_hex[1], training_frame=cars_hex, family="poisson", standardize=False,
offset_column="year", Lambda = [0], max_iterations = 100)
print "poisson"
print "R:"
print "deviance: {0}".format(54039.1725227918)
print "null deviance: {0}".format(59381.5624028358)
print "aic: {0}".format("Inf")
print "H2O:"
print "deviance {0}".format(glm_h2o.residual_deviance())
print "null deviance {0}".format(glm_h2o.null_deviance())
print "aic {0}".format(glm_h2o.aic())
assert abs(54039.1725227918 - glm_h2o.residual_deviance()) < 0.1
assert abs(59381.5624028358 - glm_h2o.null_deviance()) < 0.1
assert abs(float('inf') - glm_h2o.aic()) < 0.1
if __name__ == "__main__":
h2o.run_test(sys.argv, offset_1388)
| apache-2.0 |
aagavin/Friday-Flair-Python-reddit-api | reddit/helpers.py | 1 | 3568 | # This file is part of reddit_api.
#
# reddit_api is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# reddit_api is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with reddit_api. If not, see <http://www.gnu.org/licenses/>.
import urllib
import urllib2
from urlparse import urljoin
from reddit.decorators import SleepAfter, require_login
from reddit.util import Memoize
def _get_section(subpath=''):
"""
Used by the Redditor class to generate each of the sections (overview,
comments, submitted).
"""
def _section(self, sort='new', time='all', *args, **kw):
if 'url_data' in kw and kw['url_data']:
url_data = kw['url_data']
else:
url_data = kw['url_data'] = {}
url_data.update({'sort': sort, 't': time})
url = urljoin(self._url, subpath) # pylint: disable-msg=W0212
return self.reddit_session.get_content(url, *args, **kw)
return _section
def _get_sorter(subpath='', **defaults):
"""
Used by the Reddit Page classes to generate each of the currently supported
sorts (hot, top, new, best).
"""
def _sorted(self, *args, **kw):
if 'url_data' in kw and kw['url_data']:
url_data = kw['url_data']
else:
url_data = kw['url_data'] = {}
for key, value in defaults.items():
url_data.setdefault(key, value)
url = urljoin(self._url, subpath) # pylint: disable-msg=W0212
return self.reddit_session.get_content(url, *args, **kw)
return _sorted
def _modify_relationship(relationship, unlink=False, is_sub=False):
"""
Modify the relationship between the current user or subreddit and a target
thing.
Used to support friending (user-to-user), as well as moderating,
contributor creating, and banning (user-to-subreddit).
"""
# the API uses friend and unfriend to manage all of these relationships
url_key = 'unfriend' if unlink else 'friend'
@require_login
def do_relationship(thing, user):
params = {'name': str(user),
'container': thing.content_id,
'type': relationship,
'uh': thing.reddit_session.modhash,
'api_type': 'json'}
if is_sub:
params['r'] = str(thing)
url = thing.reddit_session.config[url_key]
return thing.reddit_session.request_json(url, params)
return do_relationship
@Memoize
@SleepAfter
def _request(reddit_session, page_url, params=None, url_data=None, timeout=45):
if isinstance(page_url, unicode):
page_url = urllib.quote(page_url.encode('utf-8'), ':/')
if url_data:
page_url += '?' + urllib.urlencode(url_data)
encoded_params = None
if params:
params = dict([k, v.encode('utf-8')] for k, v in params.items())
encoded_params = urllib.urlencode(params)
request = urllib2.Request(page_url, data=encoded_params,
headers=reddit_session.DEFAULT_HEADERS)
# pylint: disable-msg=W0212
response = reddit_session._opener.open(request, timeout=timeout)
return response.read()
| gpl-3.0 |
JoelBender/bacpypes | tests/test_constructed_data/helpers.py | 1 | 2570 | #!/usr/bin/python
"""
Helper classes for constructed data tests.
"""
from bacpypes.debugging import bacpypes_debugging, ModuleLogger, xtob
from bacpypes.errors import MissingRequiredParameter
from bacpypes.primitivedata import Boolean, Integer, Tag, TagList
from bacpypes.constructeddata import Element, Sequence
# some debugging
_debug = 0
_log = ModuleLogger(globals())
@bacpypes_debugging
class SequenceEquality:
"""
This mixin class adds an equality function for matching values for all of
the elements, even if they are optional. It will raise an exception for
missing elements, even if they are missing in both objects.
"""
def __eq__(self, other):
if _debug: SequenceEquality._debug("__eq__ %r", other)
# loop through this sequences elements
for element in self.sequenceElements:
self_value = getattr(self, element.name, None)
other_value = getattr(other, element.name, None)
if (not element.optional) and ((self_value is None) or (other_value is None)):
raise MissingRequiredParameter("%s is a missing required element of %s" % (element.name, self.__class__.__name__))
if not (self_value == other_value):
return False
# success
return True
@bacpypes_debugging
class EmptySequence(Sequence, SequenceEquality):
def __init__(self, *args, **kwargs):
if _debug: EmptySequence._debug("__init__ %r %r", args, kwargs)
Sequence.__init__(self, *args, **kwargs)
@bacpypes_debugging
class SimpleSequence(Sequence, SequenceEquality):
sequenceElements = [
Element('hydrogen', Boolean),
]
def __init__(self, *args, **kwargs):
if _debug: SimpleSequence._debug("__init__ %r %r", args, kwargs)
Sequence.__init__(self, *args, **kwargs)
@bacpypes_debugging
class CompoundSequence1(Sequence, SequenceEquality):
sequenceElements = [
Element('hydrogen', Boolean),
Element('helium', Integer),
]
def __init__(self, *args, **kwargs):
if _debug: CompoundSequence1._debug("__init__ %r %r", args, kwargs)
Sequence.__init__(self, *args, **kwargs)
@bacpypes_debugging
class CompoundSequence2(Sequence, SequenceEquality):
sequenceElements = [
Element('lithium', Boolean, optional=True),
Element('beryllium', Integer),
]
def __init__(self, *args, **kwargs):
if _debug: CompoundSequence2._debug("__init__ %r %r", args, kwargs)
Sequence.__init__(self, *args, **kwargs)
| mit |
iansprice/wagtail | wagtail/project_template/home/migrations/0002_create_homepage.py | 10 | 1686 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def create_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
Page = apps.get_model('wagtailcore.Page')
Site = apps.get_model('wagtailcore.Site')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
# If migration is run multiple times, it may have already been deleted
Page.objects.filter(id=2).delete()
# Create content type for homepage model
homepage_content_type, __ = ContentType.objects.get_or_create(
model='homepage', app_label='home')
# Create a new homepage
homepage = HomePage.objects.create(
title="Home",
slug='home',
content_type=homepage_content_type,
path='00010001',
depth=2,
numchild=0,
url_path='/home/',
)
# Create a site with the new homepage set as the root
Site.objects.create(
hostname='localhost', root_page=homepage, is_default_site=True)
def remove_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
# Page and Site objects CASCADE
HomePage.objects.filter(slug='home', depth=2).delete()
# Delete content type for homepage model
ContentType.objects.filter(model='homepage', app_label='home').delete()
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.RunPython(create_homepage, remove_homepage),
]
| bsd-3-clause |
agentr13/python-phonenumbers | python/tests/testdata/region_BS.py | 7 | 1600 | """Auto-generated file, do not edit by hand. BS metadata"""
from phonenumbers.phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_BS = PhoneMetadata(id='BS', country_code=1, international_prefix='011',
general_desc=PhoneNumberDesc(national_number_pattern='(242|8(00|66|77|88)|900)\\d{7}', possible_number_pattern='\\d{7,10}'),
fixed_line=PhoneNumberDesc(national_number_pattern='242(?:3(?:02|[236][1-9]|4[0-24-9]|5[0-68]|7[3-57]|9[2-5])|4(?:2[237]|51|64|77)|502|636|702)\\d{4}', possible_number_pattern='\\d{7,10}'),
mobile=PhoneNumberDesc(national_number_pattern='242(357|359|457|557)\\d{4}', possible_number_pattern='\\d{10}'),
toll_free=PhoneNumberDesc(national_number_pattern='8(00|66|77|88)\\d{7}', possible_number_pattern='\\d{10}'),
premium_rate=PhoneNumberDesc(national_number_pattern='900\\d{7}', possible_number_pattern='\\d{10}'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix='1',
national_prefix_for_parsing='1')
| apache-2.0 |
inspyration/odoo | addons/auth_openid/controllers/__init__.py | 443 | 1033 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import main
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
caveman-dick/ansible | test/units/modules/network/nxos/test_nxos_evpn_vni.py | 47 | 2744 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_evpn_vni
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosEvpnVniModule(TestNxosModule):
module = nxos_evpn_vni
def setUp(self):
self.mock_run_commands = patch('ansible.modules.network.nxos.nxos_evpn_vni.run_commands')
self.run_commands = self.mock_run_commands.start()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_evpn_vni.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_evpn_vni.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
self.mock_run_commands.stop()
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('', 'nxos_evpn_vni_config.cfg')
self.load_config.return_value = None
def test_nxos_evpn_vni_present(self):
set_module_args(dict(vni='6000',
route_target_import='5000:10',
state='present'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['evpn',
'vni 6000 l2',
'route-target import 5000:10'])
def test_nxos_evpn_vni_absent_not_existing(self):
set_module_args(dict(vni='12000', state='absent'))
result = self.execute_module(changed=False)
self.assertEqual(result['commands'], [])
def test_nxos_evpn_vni_absent_existing(self):
set_module_args(dict(vni='6000', state='absent'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['evpn', 'no vni 6000 l2'])
| gpl-3.0 |
JackDandy/SickGear | lib/chardet/escsm.py | 8 | 10756 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .enums import MachineState
HZ_CLS = (
1,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,4,0,5,2,0, # 78 - 7f
1,1,1,1,1,1,1,1, # 80 - 87
1,1,1,1,1,1,1,1, # 88 - 8f
1,1,1,1,1,1,1,1, # 90 - 97
1,1,1,1,1,1,1,1, # 98 - 9f
1,1,1,1,1,1,1,1, # a0 - a7
1,1,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,1,1,1,1,1,1, # c0 - c7
1,1,1,1,1,1,1,1, # c8 - cf
1,1,1,1,1,1,1,1, # d0 - d7
1,1,1,1,1,1,1,1, # d8 - df
1,1,1,1,1,1,1,1, # e0 - e7
1,1,1,1,1,1,1,1, # e8 - ef
1,1,1,1,1,1,1,1, # f0 - f7
1,1,1,1,1,1,1,1, # f8 - ff
)
HZ_ST = (
MachineState.START,MachineState.ERROR, 3,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START, 4,MachineState.ERROR,# 10-17
5,MachineState.ERROR, 6,MachineState.ERROR, 5, 5, 4,MachineState.ERROR,# 18-1f
4,MachineState.ERROR, 4, 4, 4,MachineState.ERROR, 4,MachineState.ERROR,# 20-27
4,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 28-2f
)
HZ_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0)
HZ_SM_MODEL = {'class_table': HZ_CLS,
'class_factor': 6,
'state_table': HZ_ST,
'char_len_table': HZ_CHAR_LEN_TABLE,
'name': "HZ-GB-2312",
'language': 'Chinese'}
ISO2022CN_CLS = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,4,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022CN_ST = (
MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07
MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f
MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,# 18-1f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 20-27
5, 6,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 28-2f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 30-37
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,# 38-3f
)
ISO2022CN_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022CN_SM_MODEL = {'class_table': ISO2022CN_CLS,
'class_factor': 9,
'state_table': ISO2022CN_ST,
'char_len_table': ISO2022CN_CHAR_LEN_TABLE,
'name': "ISO-2022-CN",
'language': 'Chinese'}
ISO2022JP_CLS = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,2,2, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,7,0,0,0, # 20 - 27
3,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
6,0,4,0,8,0,0,0, # 40 - 47
0,9,5,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022JP_ST = (
MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07
MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,# 18-1f
MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,MachineState.ERROR,# 20-27
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 6,MachineState.ITS_ME,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,# 28-2f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,# 30-37
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 38-3f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.START,# 40-47
)
ISO2022JP_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022JP_SM_MODEL = {'class_table': ISO2022JP_CLS,
'class_factor': 10,
'state_table': ISO2022JP_ST,
'char_len_table': ISO2022JP_CHAR_LEN_TABLE,
'name': "ISO-2022-JP",
'language': 'Japanese'}
ISO2022KR_CLS = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,3,0,0,0, # 20 - 27
0,4,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,5,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022KR_ST = (
MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,MachineState.ERROR,# 10-17
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 18-1f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 20-27
)
ISO2022KR_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0)
ISO2022KR_SM_MODEL = {'class_table': ISO2022KR_CLS,
'class_factor': 6,
'state_table': ISO2022KR_ST,
'char_len_table': ISO2022KR_CHAR_LEN_TABLE,
'name': "ISO-2022-KR",
'language': 'Korean'}
| gpl-3.0 |
Qalthos/ansible | lib/ansible/utils/unsafe_proxy.py | 41 | 4247 | # PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
# analyze, test, perform and/or display publicly, prepare derivative works,
# distribute, and otherwise use Python alone or in any derivative version,
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are
# retained in Python alone or in any derivative version prepared by Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
#
# Original Python Recipe for Proxy:
# http://code.activestate.com/recipes/496741-object-proxying/
# Author: Tomer Filiba
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.six import string_types, text_type
from ansible.module_utils._text import to_text
from ansible.module_utils.common._collections_compat import Mapping, MutableSequence, Set
__all__ = ['UnsafeProxy', 'AnsibleUnsafe', 'wrap_var']
class AnsibleUnsafe(object):
__UNSAFE__ = True
class AnsibleUnsafeText(text_type, AnsibleUnsafe):
pass
class UnsafeProxy(object):
def __new__(cls, obj, *args, **kwargs):
# In our usage we should only receive unicode strings.
# This conditional and conversion exists to sanity check the values
# we're given but we may want to take it out for testing and sanitize
# our input instead.
if isinstance(obj, string_types):
obj = to_text(obj, errors='surrogate_or_strict')
return AnsibleUnsafeText(obj)
return obj
def _wrap_dict(v):
for k in v.keys():
if v[k] is not None:
v[wrap_var(k)] = wrap_var(v[k])
return v
def _wrap_list(v):
for idx, item in enumerate(v):
if item is not None:
v[idx] = wrap_var(item)
return v
def _wrap_set(v):
return set(item if item is None else wrap_var(item) for item in v)
def wrap_var(v):
if isinstance(v, Mapping):
v = _wrap_dict(v)
elif isinstance(v, MutableSequence):
v = _wrap_list(v)
elif isinstance(v, Set):
v = _wrap_set(v)
elif v is not None and not isinstance(v, AnsibleUnsafe):
v = UnsafeProxy(v)
return v
| gpl-3.0 |
gedakc/manuskript | manuskript/ui/exporters/exporter_ui.py | 2 | 3647 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'manuskript/ui/exporters/exporter_ui.ui'
#
# Created: Fri Apr 8 12:22:37 2016
# by: PyQt5 UI code generator 5.2.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_exporter(object):
def setupUi(self, exporter):
exporter.setObjectName("exporter")
exporter.resize(933, 642)
self.verticalLayout = QtWidgets.QVBoxLayout(exporter)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(exporter)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.cmbExporters = QtWidgets.QComboBox(exporter)
self.cmbExporters.setObjectName("cmbExporters")
self.horizontalLayout.addWidget(self.cmbExporters)
self.btnManageExporters = QtWidgets.QPushButton(exporter)
icon = QtGui.QIcon.fromTheme("preferences-system")
self.btnManageExporters.setIcon(icon)
self.btnManageExporters.setObjectName("btnManageExporters")
self.horizontalLayout.addWidget(self.btnManageExporters)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.btnPreview = QtWidgets.QPushButton(exporter)
icon = QtGui.QIcon.fromTheme("document-print-preview")
self.btnPreview.setIcon(icon)
self.btnPreview.setObjectName("btnPreview")
self.horizontalLayout.addWidget(self.btnPreview)
self.btnExport = QtWidgets.QPushButton(exporter)
icon = QtGui.QIcon.fromTheme("document-export")
self.btnExport.setIcon(icon)
self.btnExport.setObjectName("btnExport")
self.horizontalLayout.addWidget(self.btnExport)
self.verticalLayout.addLayout(self.horizontalLayout)
self.splitter = QtWidgets.QSplitter(exporter)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.grpSettings = QtWidgets.QGroupBox(self.splitter)
self.grpSettings.setObjectName("grpSettings")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.grpSettings)
self.verticalLayout_3.setSpacing(0)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.grpPreview = QtWidgets.QGroupBox(self.splitter)
self.grpPreview.setObjectName("grpPreview")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.grpPreview)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.verticalLayout.addWidget(self.splitter)
self.retranslateUi(exporter)
QtCore.QMetaObject.connectSlotsByName(exporter)
def retranslateUi(self, exporter):
_translate = QtCore.QCoreApplication.translate
exporter.setWindowTitle(_translate("exporter", "Export"))
self.label.setText(_translate("exporter", "Export to:"))
self.btnManageExporters.setText(_translate("exporter", "Manage exporters"))
self.btnPreview.setText(_translate("exporter", "Preview"))
self.btnExport.setText(_translate("exporter", "Export"))
self.grpSettings.setTitle(_translate("exporter", "Settings"))
self.grpPreview.setTitle(_translate("exporter", "Preview"))
| gpl-3.0 |
thiriel/maps | django/contrib/webdesign/templatetags/webdesign.py | 350 | 2196 | from django.contrib.webdesign.lorem_ipsum import words, paragraphs
from django import template
register = template.Library()
class LoremNode(template.Node):
def __init__(self, count, method, common):
self.count, self.method, self.common = count, method, common
def render(self, context):
try:
count = int(self.count.resolve(context))
except (ValueError, TypeError):
count = 1
if self.method == 'w':
return words(count, common=self.common)
else:
paras = paragraphs(count, common=self.common)
if self.method == 'p':
paras = ['<p>%s</p>' % p for p in paras]
return u'\n\n'.join(paras)
#@register.tag
def lorem(parser, token):
"""
Creates random Latin text useful for providing test data in templates.
Usage format::
{% lorem [count] [method] [random] %}
``count`` is a number (or variable) containing the number of paragraphs or
words to generate (default is 1).
``method`` is either ``w`` for words, ``p`` for HTML paragraphs, ``b`` for
plain-text paragraph blocks (default is ``b``).
``random`` is the word ``random``, which if given, does not use the common
paragraph (starting "Lorem ipsum dolor sit amet, consectetuer...").
Examples:
* ``{% lorem %}`` will output the common "lorem ipsum" paragraph
* ``{% lorem 3 p %}`` will output the common "lorem ipsum" paragraph
and two random paragraphs each wrapped in HTML ``<p>`` tags
* ``{% lorem 2 w random %}`` will output two random latin words
"""
bits = list(token.split_contents())
tagname = bits[0]
# Random bit
common = bits[-1] != 'random'
if not common:
bits.pop()
# Method bit
if bits[-1] in ('w', 'p', 'b'):
method = bits.pop()
else:
method = 'b'
# Count bit
if len(bits) > 1:
count = bits.pop()
else:
count = '1'
count = parser.compile_filter(count)
if len(bits) != 1:
raise template.TemplateSyntaxError("Incorrect format for %r tag" % tagname)
return LoremNode(count, method, common)
lorem = register.tag(lorem)
| bsd-3-clause |
Kiiv/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/xboxclips.py | 22 | 2035 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_iso8601,
float_or_none,
int_or_none,
)
class XboxClipsIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?xboxclips\.com/video\.php\?.*vid=(?P<id>[\w-]{36})'
_TEST = {
'url': 'https://xboxclips.com/video.php?uid=2533274823424419&gamertag=Iabdulelah&vid=074a69a9-5faf-46aa-b93b-9909c1720325',
'md5': 'fbe1ec805e920aeb8eced3c3e657df5d',
'info_dict': {
'id': '074a69a9-5faf-46aa-b93b-9909c1720325',
'ext': 'mp4',
'title': 'Iabdulelah playing Upload Studio',
'filesize_approx': 28101836.8,
'timestamp': 1407388500,
'upload_date': '20140807',
'duration': 56,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
video_url = self._html_search_regex(
r'>Link: <a href="([^"]+)">', webpage, 'video URL')
title = self._html_search_regex(
r'<title>XboxClips \| ([^<]+)</title>', webpage, 'title')
timestamp = parse_iso8601(self._html_search_regex(
r'>Recorded: ([^<]+)<', webpage, 'upload date', fatal=False))
filesize = float_or_none(self._html_search_regex(
r'>Size: ([\d\.]+)MB<', webpage, 'file size', fatal=False), invscale=1024 * 1024)
duration = int_or_none(self._html_search_regex(
r'>Duration: (\d+) Seconds<', webpage, 'duration', fatal=False))
view_count = int_or_none(self._html_search_regex(
r'>Views: (\d+)<', webpage, 'view count', fatal=False))
return {
'id': video_id,
'url': video_url,
'title': title,
'timestamp': timestamp,
'filesize_approx': filesize,
'duration': duration,
'view_count': view_count,
}
| gpl-3.0 |
funkybob/django-email-template | tests/email_template_tests/tests.py | 2 | 4422 | from functools import partial
from django.template import Template
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import (
setup_test_template_loader,
restore_template_loaders,
)
from email_template.email import (
render_django_fields,
send_base,
send_django,
)
EMAIL = Template("""
{% block subject %}Subject{% endblock %}
{% block text %}Hello from {{ name }}{% endblock %}
{% block recipients %}x@z.com, y@z.com{% endblock %}
""")
HTML_EMAIL = Template("""
{% block subject %}Subject{% endblock %}
{% block html %}Hello from {{ name }}{% endblock %}
{% block recipients %}x@z.com, y@z.com{% endblock %}
""")
MULTI_EMAIL = Template("""
{% block subject %}Subject{% endblock %}
{% block text %}Hello from {{ name }}{% endblock %}
{% block html %}Hello from {{ name }}{% endblock %}
{% block recipients %}x@z.com, y@z.com{% endblock %}
""")
MISSING = Template("""
{% block recipients %}x@z.com, y@z.com{% endblock %}
""")
def send_method(**kwargs):
return kwargs
send = partial(send_base,
send_method=send_method,
render_method=render_django_fields,
)
class SendBaseTest(TestCase):
def setUp(self):
templates = {
"email.html": EMAIL,
"missing.html": MISSING,
}
setup_test_template_loader(templates)
def tearDown(self):
restore_template_loaders()
def test_send(self):
message = send(
template_name="email.html",
context_data=dict(name="Y"),
request=None,
from_email="x@y.com",
send_method=send_method,
send_method_args=dict(
header=2,
)
)
self.assertEqual(message["text"], "Hello from Y")
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message["recipient_list"], ["x@z.com", "y@z.com"])
self.assertEqual(message["from_email"], "x@y.com")
self.assertEqual(message["header"], 2)
def test_with_request(self):
message = send(
template_name="email.html",
context_data=dict(name="Y"),
request=RequestFactory().post("/email/"),
send_method=send_method,
)
self.assertEqual(message["text"], "Hello from Y")
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message["recipient_list"], ["x@z.com", "y@z.com"])
def test_missing_subject_and_body(self):
message = send(
template_name="missing.html",
context_data=dict(name="X"),
request=RequestFactory().post("/email/"),
)
self.assertEqual(message["text"], "")
self.assertEqual(message["subject"], "")
self.assertEqual(message["recipient_list"], ["x@z.com", "y@z.com"])
class SendDjangoTest(TestCase):
def setUp(self):
templates = {
"email.html": EMAIL,
"html-email.html": HTML_EMAIL,
"multi-email.html": MULTI_EMAIL,
}
setup_test_template_loader(templates)
def tearDown(self):
restore_template_loaders()
def test_text(self):
message = send_django(
template_name="email.html",
context_data=dict(name="Y"),
request=None,
from_email="x@y.com",
)
self.assertEqual(message.content_subtype, "plain")
self.assertEqual(message.body, "Hello from Y")
self.assertEqual(message.from_email, "x@y.com")
self.assertEqual(message.subject, "Subject")
self.assertEqual(message.to, ["x@z.com", "y@z.com"])
def test_html(self):
message = send_django(
template_name="html-email.html",
context_data=dict(name="Y"),
request=None,
from_email="x@y.com",
)
self.assertEqual(message.content_subtype, "html")
self.assertEqual(message.body, "Hello from Y")
def test_multi(self):
message = send_django(
template_name="multi-email.html",
context_data=dict(name="Y"),
request=None,
from_email="x@y.com",
)
self.assertEqual(message.content_subtype, "plain")
self.assertEqual(message.body, "Hello from Y")
self.assertEqual(
message.alternatives,
[("Hello from Y", "text/html")],
)
| mit |
dataxu/ansible | lib/ansible/module_utils/network/asa/asa.py | 86 | 5729 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network.common.utils import to_list, EntityCollection
from ansible.module_utils.connection import exec_command
from ansible.module_utils.connection import Connection, ConnectionError
_DEVICE_CONFIGS = {}
_CONNECTION = None
asa_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True),
'timeout': dict(type='int'),
'context': dict(),
'passwords': dict()
}
asa_argument_spec = {
'provider': dict(type='dict', options=asa_provider_spec),
}
asa_top_spec = {
'host': dict(removed_in_version=2.9),
'port': dict(removed_in_version=2.9, type='int'),
'username': dict(removed_in_version=2.9),
'password': dict(removed_in_version=2.9, no_log=True),
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
'authorize': dict(type='bool'),
'auth_pass': dict(removed_in_version=2.9, no_log=True),
'timeout': dict(removed_in_version=2.9, type='int'),
'context': dict(),
'passwords': dict()
}
asa_argument_spec.update(asa_top_spec)
command_spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
def get_provider_argspec():
return asa_provider_spec
def check_args(module):
pass
def get_connection(module):
global _CONNECTION
if _CONNECTION:
return _CONNECTION
_CONNECTION = Connection(module._socket_path)
context = module.params['context']
if context:
if context == 'system':
command = 'changeto system'
else:
command = 'changeto context %s' % context
_CONNECTION.get(command)
return _CONNECTION
def to_commands(module, commands):
if not isinstance(commands, list):
raise AssertionError('argument must be of type <list>')
transform = EntityCollection(module, command_spec)
commands = transform(commands)
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
module.warn('only show commands are supported when using check '
'mode, not executing `%s`' % item['command'])
return commands
def run_commands(module, commands, check_rc=True):
connection = get_connection(module)
commands = to_commands(module, to_list(commands))
responses = list()
for cmd in commands:
out = connection.get(**cmd)
responses.append(to_text(out, errors='surrogate_then_replace'))
return responses
def get_config(module, flags=None):
flags = [] if flags is None else flags
passwords = module.params['passwords']
if passwords:
cmd = 'more system:running-config'
else:
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return _DEVICE_CONFIGS[cmd]
except KeyError:
conn = get_connection(module)
out = conn.get(cmd)
cfg = to_text(out, errors='surrogate_then_replace').strip()
_DEVICE_CONFIGS[cmd] = cfg
return cfg
def load_config(module, config):
try:
conn = get_connection(module)
conn.edit_config(config)
except ConnectionError as exc:
module.fail_json(msg=to_text(exc))
def get_defaults_flag(module):
rc, out, err = exec_command(module, 'show running-config ?')
out = to_text(out, errors='surrogate_then_replace')
commands = set()
for line in out.splitlines():
if line:
commands.add(line.strip().split()[0])
if 'all' in commands:
return 'all'
else:
return 'full'
| gpl-3.0 |
maxdeliso/elevatorSim | Lib/distutils/command/config.py | 151 | 13092 | """distutils.command.config
Implements the Distutils 'config' command, a (mostly) empty command class
that exists mainly to be sub-classed by specific module distributions and
applications. The idea is that while every "config" command is different,
at least they're all named the same, and users always see "config" in the
list of standard commands. Also, this is a good place to put common
configure-like tasks: "try to compile this C code", or "figure out where
this header file lives".
"""
import sys, os, re
from distutils.core import Command
from distutils.errors import DistutilsExecError
from distutils.sysconfig import customize_compiler
from distutils import log
LANG_EXT = {"c": ".c", "c++": ".cxx"}
class config(Command):
description = "prepare to build"
user_options = [
('compiler=', None,
"specify the compiler type"),
('cc=', None,
"specify the compiler executable"),
('include-dirs=', 'I',
"list of directories to search for header files"),
('define=', 'D',
"C preprocessor macros to define"),
('undef=', 'U',
"C preprocessor macros to undefine"),
('libraries=', 'l',
"external C libraries to link with"),
('library-dirs=', 'L',
"directories to search for external C libraries"),
('noisy', None,
"show every action (compile, link, run, ...) taken"),
('dump-source', None,
"dump generated source files before attempting to compile them"),
]
# The three standard command methods: since the "config" command
# does nothing by default, these are empty.
def initialize_options(self):
self.compiler = None
self.cc = None
self.include_dirs = None
self.libraries = None
self.library_dirs = None
# maximal output for now
self.noisy = 1
self.dump_source = 1
# list of temporary files generated along-the-way that we have
# to clean at some point
self.temp_files = []
def finalize_options(self):
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
elif isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
if self.libraries is None:
self.libraries = []
elif isinstance(self.libraries, str):
self.libraries = [self.libraries]
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, str):
self.library_dirs = self.library_dirs.split(os.pathsep)
def run(self):
pass
# Utility methods for actual "config" commands. The interfaces are
# loosely based on Autoconf macros of similar names. Sub-classes
# may use these freely.
def _check_compiler(self):
"""Check that 'self.compiler' really is a CCompiler object;
if not, make it one.
"""
# We do this late, and only on-demand, because this is an expensive
# import.
from distutils.ccompiler import CCompiler, new_compiler
if not isinstance(self.compiler, CCompiler):
self.compiler = new_compiler(compiler=self.compiler,
dry_run=self.dry_run, force=1)
customize_compiler(self.compiler)
if self.include_dirs:
self.compiler.set_include_dirs(self.include_dirs)
if self.libraries:
self.compiler.set_libraries(self.libraries)
if self.library_dirs:
self.compiler.set_library_dirs(self.library_dirs)
def _gen_temp_sourcefile(self, body, headers, lang):
filename = "_configtest" + LANG_EXT[lang]
file = open(filename, "w")
if headers:
for header in headers:
file.write("#include <%s>\n" % header)
file.write("\n")
file.write(body)
if body[-1] != "\n":
file.write("\n")
file.close()
return filename
def _preprocess(self, body, headers, include_dirs, lang):
src = self._gen_temp_sourcefile(body, headers, lang)
out = "_configtest.i"
self.temp_files.extend([src, out])
self.compiler.preprocess(src, out, include_dirs=include_dirs)
return (src, out)
def _compile(self, body, headers, include_dirs, lang):
src = self._gen_temp_sourcefile(body, headers, lang)
if self.dump_source:
dump_file(src, "compiling '%s':" % src)
(obj,) = self.compiler.object_filenames([src])
self.temp_files.extend([src, obj])
self.compiler.compile([src], include_dirs=include_dirs)
return (src, obj)
def _link(self, body, headers, include_dirs, libraries, library_dirs,
lang):
(src, obj) = self._compile(body, headers, include_dirs, lang)
prog = os.path.splitext(os.path.basename(src))[0]
self.compiler.link_executable([obj], prog,
libraries=libraries,
library_dirs=library_dirs,
target_lang=lang)
if self.compiler.exe_extension is not None:
prog = prog + self.compiler.exe_extension
self.temp_files.append(prog)
return (src, obj, prog)
def _clean(self, *filenames):
if not filenames:
filenames = self.temp_files
self.temp_files = []
log.info("removing: %s", ' '.join(filenames))
for filename in filenames:
try:
os.remove(filename)
except OSError:
pass
# XXX these ignore the dry-run flag: what to do, what to do? even if
# you want a dry-run build, you still need some sort of configuration
# info. My inclination is to make it up to the real config command to
# consult 'dry_run', and assume a default (minimal) configuration if
# true. The problem with trying to do it here is that you'd have to
# return either true or false from all the 'try' methods, neither of
# which is correct.
# XXX need access to the header search path and maybe default macros.
def try_cpp(self, body=None, headers=None, include_dirs=None, lang="c"):
"""Construct a source file from 'body' (a string containing lines
of C/C++ code) and 'headers' (a list of header files to include)
and run it through the preprocessor. Return true if the
preprocessor succeeded, false if there were any errors.
('body' probably isn't of much use, but what the heck.)
"""
from distutils.ccompiler import CompileError
self._check_compiler()
ok = True
try:
self._preprocess(body, headers, include_dirs, lang)
except CompileError:
ok = False
self._clean()
return ok
def search_cpp(self, pattern, body=None, headers=None, include_dirs=None,
lang="c"):
"""Construct a source file (just like 'try_cpp()'), run it through
the preprocessor, and return true if any line of the output matches
'pattern'. 'pattern' should either be a compiled regex object or a
string containing a regex. If both 'body' and 'headers' are None,
preprocesses an empty file -- which can be useful to determine the
symbols the preprocessor and compiler set by default.
"""
self._check_compiler()
src, out = self._preprocess(body, headers, include_dirs, lang)
if isinstance(pattern, str):
pattern = re.compile(pattern)
file = open(out)
match = False
while True:
line = file.readline()
if line == '':
break
if pattern.search(line):
match = True
break
file.close()
self._clean()
return match
def try_compile(self, body, headers=None, include_dirs=None, lang="c"):
"""Try to compile a source file built from 'body' and 'headers'.
Return true on success, false otherwise.
"""
from distutils.ccompiler import CompileError
self._check_compiler()
try:
self._compile(body, headers, include_dirs, lang)
ok = True
except CompileError:
ok = False
log.info(ok and "success!" or "failure.")
self._clean()
return ok
def try_link(self, body, headers=None, include_dirs=None, libraries=None,
library_dirs=None, lang="c"):
"""Try to compile and link a source file, built from 'body' and
'headers', to executable form. Return true on success, false
otherwise.
"""
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
try:
self._link(body, headers, include_dirs,
libraries, library_dirs, lang)
ok = True
except (CompileError, LinkError):
ok = False
log.info(ok and "success!" or "failure.")
self._clean()
return ok
def try_run(self, body, headers=None, include_dirs=None, libraries=None,
library_dirs=None, lang="c"):
"""Try to compile, link to an executable, and run a program
built from 'body' and 'headers'. Return true on success, false
otherwise.
"""
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
try:
src, obj, exe = self._link(body, headers, include_dirs,
libraries, library_dirs, lang)
self.spawn([exe])
ok = True
except (CompileError, LinkError, DistutilsExecError):
ok = False
log.info(ok and "success!" or "failure.")
self._clean()
return ok
# -- High-level methods --------------------------------------------
# (these are the ones that are actually likely to be useful
# when implementing a real-world config command!)
def check_func(self, func, headers=None, include_dirs=None,
libraries=None, library_dirs=None, decl=0, call=0):
"""Determine if function 'func' is available by constructing a
source file that refers to 'func', and compiles and links it.
If everything succeeds, returns true; otherwise returns false.
The constructed source file starts out by including the header
files listed in 'headers'. If 'decl' is true, it then declares
'func' (as "int func()"); you probably shouldn't supply 'headers'
and set 'decl' true in the same call, or you might get errors about
a conflicting declarations for 'func'. Finally, the constructed
'main()' function either references 'func' or (if 'call' is true)
calls it. 'libraries' and 'library_dirs' are used when
linking.
"""
self._check_compiler()
body = []
if decl:
body.append("int %s ();" % func)
body.append("int main () {")
if call:
body.append(" %s();" % func)
else:
body.append(" %s;" % func)
body.append("}")
body = "\n".join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
def check_lib(self, library, library_dirs=None, headers=None,
include_dirs=None, other_libraries=[]):
"""Determine if 'library' is available to be linked against,
without actually checking that any particular symbols are provided
by it. 'headers' will be used in constructing the source file to
be compiled, but the only effect of this is to check if all the
header files listed are available. Any libraries listed in
'other_libraries' will be included in the link, in case 'library'
has symbols that depend on other libraries.
"""
self._check_compiler()
return self.try_link("int main (void) { }", headers, include_dirs,
[library] + other_libraries, library_dirs)
def check_header(self, header, include_dirs=None, library_dirs=None,
lang="c"):
"""Determine if the system header file named by 'header_file'
exists and can be found by the preprocessor; return true if so,
false otherwise.
"""
return self.try_cpp(body="/* No body */", headers=[header],
include_dirs=include_dirs)
def dump_file(filename, head=None):
"""Dumps a file content into log.info.
If head is not None, will be dumped before the file content.
"""
if head is None:
log.info('%s' % filename)
else:
log.info(head)
file = open(filename)
try:
log.info(file.read())
finally:
file.close()
| bsd-2-clause |
merenlab/anvio | anvio/drivers/MODELLER.py | 1 | 40930 | # coding: utf-8
"""
Interface to MODELLER (https://salilab.org/modeller/).
"""
import os
import anvio
import shutil
import argparse
import subprocess
import pandas as pd
import anvio.utils as utils
import anvio.fastalib as u
import anvio.terminal as terminal
import anvio.constants as constants
import anvio.filesnpaths as filesnpaths
from anvio.drivers import diamond
from anvio.errors import ConfigError, ModellerError, ModellerScriptError, FilesNPathsError
__author__ = "Evan Kiefl"
__copyright__ = "Copyright 2016, The anvio Project"
__credits__ = []
__license__ = "GPL 3.0"
__version__ = anvio.__version__
__maintainer__ = "Evan Kiefl"
__email__ = "kiefl.evan@gmail.com"
up_to_date_modeller_exec = "mod9.23" # default exec to use
J = lambda x, y: os.path.join(x, y)
class MODELLER:
"""Driver class for MODELLER
This class is a driver to run MODELLER scripts. MODELLER scripts are written
in python 2.3 which is the language MODELLER used when this driver was written.
Parameters
==========
args : argparse.Namespace object
Check __init__ for allowable attributes
target_fasta_path: str
Path to amino acid sequence fasta file with 1 sequence, the gene to be modelled. The defline
should be an integer (This class will assume this integer is the genes gene caller id)
directory: str, None
Path to directory that MODELLER will be run in. If None, temp dir will be created
lazy_init : bool, False
If True, check_MODELLER will not be called
skip_warnings : bool, False
If True, all warnings will be suppressed
Notes
=====
- You can add MODELLER scripts by storing them in anvio/data/misc/MODELLER/scripts. Each script
should have its own function in this class. For example, align_to_templates.py is a script
anvi'o has found in that directory and has a corresponding function in this class called
self.run_align_to_templates. Please see that method if you want to add your own script.
"""
def __init__(self, args, target_fasta_path, directory=None, run=terminal.Run(),
lazy_init=False, skip_warnings=False, check_db_only=False):
self.args = args
self.run = run
if skip_warnings and not anvio.DEBUG:
self.run.verbose = False
self.lazy_init = lazy_init
self.check_db_only = check_db_only
self.target_fasta_path = target_fasta_path
self.directory = directory if directory else filesnpaths.get_temp_directory_path()
A = lambda x, t: t(args.__dict__[x]) if x in self.args.__dict__ else None
null = lambda x: x
self.scoring_method = A('scoring_method', str) or 'DOPE_score'
self.very_fast = A('very_fast', bool) or False
self.executable = A('modeller_executable', null) or up_to_date_modeller_exec
self.num_models = A('num_models', int) or 5
self.modeller_database = A('modeller_database', str) or 'pdb_95'
self.max_number_templates = A('max_number_templates', null) or 5
self.percent_cutoff = A('percent_cutoff', null) or 30
self.alignment_fraction_cutoff = A('alignment_fraction_cutoff', null) or 0.80
self.deviation = A('deviation', null) or 4
self.pdb_db_path = A('pdb_db', null)
self.offline_mode = A('offline_mode', null)
# All MODELLER scripts are housed in self.script_folder
self.scripts_folder = constants.default_modeller_scripts_dir
self.alignment_pap_path = None
self.alignment_pir_path = None
self.get_template_path = None
self.target_pir_path = None
self.template_family_matrix_path = None
self.template_info_path = None
self.template_pdb_dir = None
self.model_info = None
self.pdb_db = None
self.use_pdb_db = False
self.logs = {}
self.scripts = {}
# All MODELLER databases are housed in self.database_dir
self.database_dir = constants.default_modeller_database_dir
# store the original directory so we can cd back and forth between
# self.directory and self.start_dir
self.start_dir = os.getcwd()
if self.check_db_only:
self.check_database()
return
self.sanity_check()
self.corresponding_gene_call = self.get_corresponding_gene_call_from_target_fasta_path()
# as reward, whoever called this class will receive self.out when they run self.process()
self.out = {
"templates" : {"pdb_id": [], "chain_id": [], "proper_percent_similarity": [], "percent_similarity": [], "align_fraction":[]},
"models" : {"molpdf": [], "GA341_score": [], "DOPE_score": [], "picked_as_best": []},
"corresponding_gene_call" : self.corresponding_gene_call,
"structure_exists" : False,
"best_model_path" : None,
"best_score" : None,
"scoring_method" : self.scoring_method,
"percent_cutoff" : self.percent_cutoff,
"alignment_fraction_cutoff" : self.alignment_fraction_cutoff,
"very_fast" : self.very_fast,
"deviation" : self.deviation,
"directory" : self.directory,
}
# copy fasta into the working directory
try:
shutil.copy2(self.target_fasta_path, self.directory)
self.target_fasta_path = J(self.directory, self.target_fasta_path)
except shutil.SameFileError:
pass
def get_corresponding_gene_call_from_target_fasta_path(self):
"""corresponding_gene_call is assumed to be the defline of self.args.target_fasta_path"""
target_fasta = u.SequenceSource(self.target_fasta_path, lazy_init=False)
while next(target_fasta):
corresponding_gene_call = target_fasta.id
target_fasta.close()
return corresponding_gene_call
def load_pdb_db(self):
"""Try loading a PDB database with path equal to self.pdb_db_path
Modifies self.pdb_db and self.use_pdb_db
"""
if not self.pdb_db_path:
self.pdb_db_path = constants.default_pdb_database_path
ok_if_absent = True if self.pdb_db_path == constants.default_pdb_database_path else False
if filesnpaths.is_file_exists(self.pdb_db_path, dont_raise=ok_if_absent):
# The user has a database there! Try and load it
self.pdb_db = anvio.structureops.PDBDatabase(argparse.Namespace(pdb_database_path=self.pdb_db_path))
self.pdb_db.check_or_create_db()
self.pdb_db.get_stored_structure_ids()
self.use_pdb_db = True
else:
self.use_pdb_db = False
def process(self):
timer = terminal.Timer()
try:
self.load_pdb_db()
timer.make_checkpoint('PDB DB loaded')
self.run_fasta_to_pir()
timer.make_checkpoint('Converted gene FASTA to PIR')
self.check_database()
timer.make_checkpoint('Checked databases')
self.run_search_and_parse_results()
timer.make_checkpoint('Ran DIAMOND search and parsed hits')
self.get_structures()
timer.make_checkpoint('Obtained template structures')
self.run_align_to_templates(self.list_of_template_code_and_chain_ids)
timer.make_checkpoint('Sequence aligned to templates')
self.run_get_model(self.num_models, self.deviation, self.very_fast)
timer.make_checkpoint('Ran structure predictions')
self.tidyup()
self.pick_best_model()
self.run_add_chain_identifiers_to_best_model()
timer.make_checkpoint('Picked best model and tidied up')
self.out["structure_exists"] = True
except self.EndModeller:
pass
except ModellerScriptError as e:
print(e)
finally:
timer.gen_report(title='ID %s Time Report' % str(self.corresponding_gene_call), run=self.run)
self.abort()
return self.out
def get_structures(self):
"""Populate self.template_pdb_dir with template structure PDBs"""
self.template_pdb_dir = os.path.join(self.directory, "%s_TEMPLATE_PDBS" % str(self.corresponding_gene_call))
filesnpaths.gen_output_directory(self.template_pdb_dir) # does nothing if already exists
pdb_paths = {}
for code, chain in self.list_of_template_code_and_chain_ids:
five_letter_id = code + chain
requested_path = J(self.template_pdb_dir, '%s.pdb' % code)
if self.use_pdb_db and five_letter_id in self.pdb_db.stored_structure_ids:
# This chain exists in the external database. Export it and get the path
try:
path = self.pdb_db.export_pdb(five_letter_id, requested_path)
source = 'Offline DB'
except ConfigError:
# The ID is in the DB, but the PDB content is None
path = None
source = 'Nowhere'
elif not self.offline_mode:
# This chain doesn't exist in an external database, and internet access is assumed.
# We try and download the protein from the RCSB PDB server. If downloading fails,
# path is None
path = utils.download_protein_structure(code, chain=chain, output_path=requested_path, raise_if_fail=False)
source = 'RCSB PDB Server'
else:
# Internet access is not assumed, and the chain wasn't in the external database
path = None
source = 'Nowhere'
self.run.info('%s obtained from' % five_letter_id, source)
if path:
pdb_paths[five_letter_id] = path
# remove templates whose structures are not available
self.list_of_template_code_and_chain_ids = [
(code, chain_code)
for code, chain_code in self.list_of_template_code_and_chain_ids
if code + chain_code in pdb_paths
]
if not len(self.list_of_template_code_and_chain_ids):
self.run.warning("No structures of the homologous proteins (templates) were available. Probably something "
"is wrong. Stopping here.")
raise self.EndModeller
self.run.info("Structures obtained for", ", ".join([code[0]+code[1] for code in self.list_of_template_code_and_chain_ids]))
def sanity_check(self, skip_warnings=False):
# the directory files will be dumped into (can exist but must be empty)
if filesnpaths.is_file_exists(self.directory, dont_raise=True):
filesnpaths.is_output_dir_writable(self.directory)
if not filesnpaths.is_dir_empty(self.directory):
raise ModellerError("You cannot give MODELLER a non-empty directory to work in.")
else:
filesnpaths.gen_output_directory(self.directory)
if not self.lazy_init:
self.executable = check_MODELLER(self.executable)
# does target_fasta_path point to a fasta file?
utils.filesnpaths.is_file_fasta_formatted(self.target_fasta_path)
# make sure target_fasta is valid
target_fasta = u.SequenceSource(self.target_fasta_path, lazy_init=False)
if target_fasta.total_seq != 1:
raise ConfigError("MODELLER :: The input FASTA file must have exactly one sequence. "
"You provided one with {}.".format(target_fasta.total_seq))
try:
while next(target_fasta):
int(target_fasta.id)
except:
raise ConfigError("MODELLER :: The defline of this fasta file must be an integer")
target_fasta.close()
# parameter consistencies
if self.deviation < 0.5 or self.deviation > 20:
self.run.warning("You realize that deviation is given in angstroms, right? You chose {}".format(self.deviation))
if self.very_fast and self.num_models > 1:
self.num_models = 1
self.run.warning("Since you chose --very-fast, there will be little difference, if at all, between models. Anvi'o "
"authoritatively sets --num-models to 1 to save you time.")
def pick_best_model(self):
"""Pick best model based on self.scoring_method and rename to gene_<corresponding_gene_call>.pdb"""
# initialize new model_info column
self.model_info["picked_as_best"] = False
# For these scores, lower is better
if self.scoring_method in ["molpself.model_info", "DOPE_score"]:
best_basename = self.model_info.loc[self.model_info[self.scoring_method].idxmin(axis=0), "name"]
self.model_info.loc[self.model_info[self.scoring_method].idxmin(axis=0), "picked_as_best"] = True
# For these scores, higher is better
if self.scoring_method == "GA341_score":
best_basename = self.model_info.loc[self.model_info[self.scoring_method].idxmax(axis=0), "name"]
self.model_info.loc[self.model_info[self.scoring_method].idxmax(axis=0), "picked_as_best"] = True
new_best_file_path = J(self.directory, "gene_{}.pdb".format(self.corresponding_gene_call))
os.rename(J(self.directory, best_basename), new_best_file_path)
# append model information to self.out
for model_index in self.model_info.index:
self.out["models"]["molpdf"].append(self.model_info.loc[model_index, "molpdf"])
self.out["models"]["GA341_score"].append(self.model_info.loc[model_index, "GA341_score"])
self.out["models"]["DOPE_score"].append(self.model_info.loc[model_index, "DOPE_score"])
self.out["models"]["picked_as_best"].append(self.model_info.loc[model_index, "picked_as_best"])
# append pdb path to self.out
self.out["best_model_path"] = new_best_file_path
# append the best score to self.out
self.out["best_score"] = self.model_info.loc[self.model_info["picked_as_best"] == True, self.scoring_method]
def abort(self):
"""Gene was not modelled. Return to the starting directory"""
os.chdir(self.start_dir)
def tidyup(self):
"""Tidyup operations after running get_model.py
Some of the files in here are unnecessary, some of the names are disgusting. rename from
"2.B99990001.pdb" to "gene_2_Model001.pdb" if normal model. Rename from "cluster.opt" to
"gene_2_ModelAvg.pdb"
"""
if not "get_model.py" in self.scripts.keys():
raise ConfigError("You are out of line calling tidyup without running get_model.py")
# remove all copies of all scripts that were ran
for script_name, file_path in self.scripts.items():
os.remove(file_path)
for model in self.model_info.index:
basename = self.model_info.loc[model, "name"]
# The default names are bad. This is where they are defined
if basename == "cluster.opt":
new_basename = "gene_{}_ModelAvg.pdb".format(self.corresponding_gene_call)
else:
model_num = os.path.splitext(basename)[0][-3:]
new_basename = "gene_{}_Model{}.pdb".format(self.corresponding_gene_call, model_num)
# rename the files (an reflect changes in self.model_info)
file_path = J(self.directory, basename)
new_file_path = J(self.directory, new_basename)
os.rename(file_path, new_file_path)
self.model_info.loc[model, "name"] = new_basename
def run_add_chain_identifiers_to_best_model(self):
"""Add chain identifier to best model to appease some third-party services"""
script_name = "add_chain_identifiers_to_best_model.py"
# check script exists, then copy the script into the working directory
self.copy_script_to_directory(script_name)
dir_name, base_name = os.path.split(self.out['best_model_path'])
command = [self.executable,
script_name,
dir_name,
base_name]
self.run_command(command, script_name=script_name)
def run_get_model(self, num_models, deviation, very_fast):
"""Run get model
This is the magic of MODELLER. Based on the template alignment file, the structures of the
templates, and satisfaction of physical constraints, the target protein structure is
modelled without further user input.
"""
script_name = "get_model.py"
# check script exists, then copy the script into the working directory
self.copy_script_to_directory(script_name)
# model info
self.model_info_path = J(self.directory, "gene_{}_ModelInfo.txt".format(self.corresponding_gene_call))
self.run.info("Number of models", num_models)
self.run.info("Deviation", str(deviation) + " angstroms")
self.run.info("Fast optimization", str(very_fast))
if not deviation and num_models > 1:
raise ConfigError("run_get_modeli :: deviation must be > 0 if num_models > 1.")
command = [self.executable,
script_name,
self.alignment_pir_path,
self.corresponding_gene_call,
self.template_info_path,
str(num_models),
str(deviation),
str(int(very_fast)),
self.model_info_path]
self.run_command(command,
script_name = script_name,
check_output = [self.model_info_path])
# load the model results information as a dataframe
self.model_info = pd.read_csv(self.model_info_path, sep="\t", index_col=False)
self.run.info("Model info", os.path.basename(self.model_info_path))
def run_align_to_templates(self, templates_info):
"""Align the sequence to the best candidate protein sequences
After identifying best candidate proteins based on sequence data, this function aligns the
protein. This alignment file is the main input (besides structures) for the homology
protein.
Parameters
==========
templates_info : list of 2-tuples
The zeroth element is the 4-letter protein code and the first element is the chain
number. E.g. [('4sda', 'A'), ('iq8p', 'E')]
"""
script_name = "align_to_templates.py"
# check script exists, then copy the script into the working directory
self.copy_script_to_directory(script_name)
# First, write ids and chains to file read by align_to_templates.py MODELLER script
self.template_info_path = J(self.directory, "gene_{}_BestTemplateIDs.txt".format(self.corresponding_gene_call))
f = open(self.template_info_path, "w")
for match in templates_info:
f.write("{}\t{}\n".format(match[0], match[1]))
f.close()
# name of the output. .pir is the standard format for MODELLER, .pap is human readable
# protein_family computes a matrix comparing the different templates agianst one another
self.alignment_pir_path = J(self.directory, "gene_{}_Alignment.ali".format(self.corresponding_gene_call))
self.alignment_pap_path = J(self.directory, "gene_{}_Alignment.pap".format(self.corresponding_gene_call))
self.template_family_matrix_path = J(self.directory, "gene_{}_ProteinFamily.mat".format(self.corresponding_gene_call))
command = [self.executable,
script_name,
self.target_pir_path,
self.corresponding_gene_call,
self.template_info_path,
self.alignment_pir_path,
self.alignment_pap_path,
self.template_family_matrix_path]
self.run_command(command,
script_name = script_name,
check_output = [self.alignment_pir_path,
self.alignment_pap_path,
self.template_family_matrix_path])
self.run.info("Similarity matrix of templates", os.path.basename(self.template_family_matrix_path))
self.run.info("Target alignment to templates", ", ".join([os.path.basename(self.alignment_pir_path),
os.path.basename(self.alignment_pap_path)]))
def run_search_and_parse_results(self):
"""Align the protein against the database based on only sequence"""
# Change to MODELLER working directory
os.chdir(self.directory)
columns = ['qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'gaps', 'gapopen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore']
driver = diamond.Diamond(
query_fasta=self.target_fasta_path,
target_fasta=J(self.database_dir, self.modeller_database + '.dmnd'),
outfmt=' '.join(['6'] + columns),
run=terminal.Run(verbose=False),
progress=terminal.Progress(verbose=False),
)
driver.blastp()
# Change back to user directory
os.chdir(self.start_dir)
search_df = driver.view_as_dataframe(J(self.directory, driver.tabular_output_path))
matches_found = search_df.shape[0]
if not matches_found:
self.run.warning("No proteins with homologous sequence were found for {}. No structure will be modelled".format(self.corresponding_gene_call))
raise self.EndModeller
# We need the gene length for pident
target_fasta = u.SequenceSource(self.target_fasta_path, lazy_init=False)
while next(target_fasta):
gene_length = len(target_fasta.seq)
# add some useful columns
search_df["code"] = search_df["sseqid"].str[:-1]
search_df["chain"] = search_df["sseqid"].str[-1]
search_df["align_fraction"] = (search_df["length"] - search_df["gaps"]) / gene_length
search_df["proper_pident"] = search_df["pident"] * search_df["align_fraction"]
# Find best match for align fraction and pident
code_chain_id_of_best = tuple(search_df.iloc[search_df['proper_pident'].argmax()][['code', 'chain']].values)
best_hit = search_df.loc[
(search_df['code'] == code_chain_id_of_best[0]) & \
(search_df['chain'] == code_chain_id_of_best[1]), ['pident', 'align_fraction']
].iloc[0]
# filter results by self.percent_cutoff and self.alignment_fraction_cutoff
search_df = search_df[search_df["pident"] >= self.percent_cutoff]
search_df = search_df[search_df["align_fraction"] >= self.alignment_fraction_cutoff]
# Rank by the alignment fraction times the percent id
search_df = search_df.sort_values("proper_pident", ascending=False)
# If more than 1 template in 1 PDB id, just choose 1
search_df = search_df.drop_duplicates('code', keep='first')
matches_after_filter = len(search_df)
if not matches_after_filter:
self.run.warning("Gene {} did not have a search result with percent identicalness above or equal "
"to {}% and alignment fraction above {}%. The best match was chain {} of https://www.rcsb.org/structure/{}, which had a "
"percent identicalness of {:.2f}% and an alignment fraction of {:.3f}. No structure will be modelled.".\
format(self.corresponding_gene_call,
self.percent_cutoff,
self.alignment_fraction_cutoff,
code_chain_id_of_best[1],
code_chain_id_of_best[0],
best_hit['pident'],
best_hit['align_fraction']))
raise self.EndModeller
# Filter out templates with proper_pident more than 5% less than best match
# http://merenlab.org/2018/09/04/getting-started-with-anvi-structure/#how-much-do-templates-matter
search_df = search_df[search_df['proper_pident'] >= (search_df['proper_pident'].max() - 5)]
# get up to self.modeller.max_number_templates of those with the highest proper_ident scores.
search_df = search_df.iloc[:min([len(search_df), self.max_number_templates])]
# Get their chain and 4-letter ids
self.list_of_template_code_and_chain_ids = list(zip(search_df["code"], search_df["chain"]))
self.run.info("Max number of templates allowed", self.max_number_templates)
self.run.info("Number of candidate templates", matches_found)
self.run.info("After >{}% identical filter".format(self.percent_cutoff), matches_after_filter)
self.run.info("Number accepted as templates", len(self.list_of_template_code_and_chain_ids))
# update user on which templates are used, and write the templates to self.out
for i in range(len(self.list_of_template_code_and_chain_ids)):
pdb_id, chain_id = self.list_of_template_code_and_chain_ids[i]
proper_percent_similarity = search_df["proper_pident"].iloc[i]
percent_similarity = search_df["pident"].iloc[i]
align_fraction = search_df["align_fraction"].iloc[i]
self.out["templates"]["pdb_id"].append(pdb_id)
self.out["templates"]["chain_id"].append(chain_id)
self.out["templates"]["proper_percent_similarity"].append(proper_percent_similarity)
self.out["templates"]["percent_similarity"].append(percent_similarity)
self.out["templates"]["align_fraction"].append(align_fraction)
self.run.info("Template {}".format(i+1),
"Protein ID: {}, Chain {} ({:.1f}% identical, {:.2f} align fraction)".format(pdb_id, chain_id, percent_similarity, align_fraction))
def check_database(self):
"""Setup the database files
Downloads the .pir file if it is missing
Binarizes .pir file if .bin is missing
Creates the .dmnd file if it is missing
"""
bin_db_path = J(self.database_dir, self.modeller_database + ".bin")
pir_db_path = J(self.database_dir, self.modeller_database + ".pir")
bin_exists = utils.filesnpaths.is_file_exists(bin_db_path, dont_raise=True)
pir_exists = utils.filesnpaths.is_file_exists(pir_db_path, dont_raise=True)
if bin_exists and pir_exists:
# We good
pass
else:
if not pir_exists:
# Download .pir
self.run.warning("Anvi'o looked in {} for a database with the name {} and with an extension "
"of either .bin or .pir, but didn't find anything matching that "
"criteria. Anvi'o will try and download the best database it knows of from "
"https://salilab.org/modeller/downloads/pdb_95.pir.gz and use that. "
"You can checkout https://salilab.org/modeller/ for more info about the pdb_95 "
"database".format(self.database_dir, self.modeller_database))
db_download_path = os.path.join(self.database_dir, "pdb_95.pir.gz")
utils.download_file("https://salilab.org/modeller/downloads/pdb_95.pir.gz", db_download_path)
utils.run_command(['gzip', '-d', db_download_path], log_file_path=filesnpaths.get_temp_file_path())
# Binarize .pir (make .bin)
self.run.warning("Your database is not in binary format. That means accessing its contents is slower "
"than it could be. Anvi'o is going to make a binary format. Just FYI")
self.run_binarize_database(pir_db_path, bin_db_path)
dmnd_db_path = J(self.database_dir, self.modeller_database + '.dmnd')
if os.path.exists(dmnd_db_path):
return
self.run.warning("Your diamond database does not exist. It will be created.")
script_name = "pir_to_fasta.py"
self.copy_script_to_directory(script_name)
input_pir_path = J(self.database_dir, self.modeller_database + '.pir')
fasta_path = J(self.database_dir, self.modeller_database + '.fa')
dmnd_path = J(self.database_dir, self.modeller_database)
command = [self.executable,
script_name,
input_pir_path,
fasta_path]
self.run_command(command,
script_name=script_name,
rename_log=False)
temp = u.FastaOutput(filesnpaths.get_temp_file_path())
fasta = u.SequenceSource(fasta_path)
while next(fasta):
temp.write_id(fasta.id)
temp.write_seq(fasta.seq.replace('-', '').replace('.', 'X'))
shutil.move(temp.output_file_path, fasta_path)
fasta.close()
temp.close()
driver = diamond.Diamond(
query_fasta=fasta_path,
run=terminal.Run(verbose=False),
progress=terminal.Progress(verbose=False),
)
driver.makedb(output_file_path=dmnd_path)
os.remove(fasta_path)
def run_binarize_database(self, pir_db_path, bin_db_path):
"""Binarizes a .pir file
Databases can be read in .pir format, but can be more quickly read in binarized format. This
does that.
Parameters
==========
pir_db_path : str
Path to existing .pir file
bin_db_path : str
Path to the will-be-made .bin file
"""
script_name = "binarize_database.py"
# check script exists, then copy the script into the working directory
self.copy_script_to_directory(script_name)
command = [self.executable,
script_name,
pir_db_path,
bin_db_path]
self.run_command(command,
script_name=script_name,
check_output=[bin_db_path],
rename_log=False)
self.run.info("New database", bin_db_path)
def copy_script_to_directory(self, script_name, add_to_scripts_dict=True, directory=None):
"""Copy source script to working directory
All MODELLER scripts are housed in anvio/data/misc/MODELLER/scripts/. This function checks
that script_name is in anvio/data/misc/MODELLER/scripts/ and then copies the script into
self.directory. Why copy into self.directory? Whenever a script is ran by MODELLER, a log
file is output in the directory of the script. By copying the script into self.directory,
the log is written there instead of anvio/data/misc/MODELLER/scripts/.
"""
if not directory:
directory = self.directory
script_path = J(self.scripts_folder, script_name)
try:
utils.filesnpaths.is_file_exists(script_path)
except:
raise ConfigError("MODELLER :: The script {} is not in {}".format(script_name, self.scripts_folder))
# add script to scripts dictionary
if add_to_scripts_dict:
self.scripts[script_name] = J(directory, script_name)
# If all is well, copy script to directory
shutil.copy2(script_path, directory)
def run_fasta_to_pir(self):
"""Convert a fasta file to a pir format.
MODELLER uses their own .pir format for search and alignment instead of .fasta. This script
does the conversion. An example pir formatted sequence shown here:
>P1;TvLDH
sequence:TvLDH:::::::0.00: 0.00
MSEAAHVLITGAAGQIGYILSHWIASGELYGDRQVYLHLLDIPPAMNRLTALTMELEDCAFPHLAGFVATTDPKA
AFKDIDCAFLVASMPLKPGQVRADLISSNSVIFKNTGEYLSKWAKPSVKVLVIGNPDNTNCEIAMLHAKNLKPEN
FSSLSMLDQNRAYYEVASKLGVDVKDVHDIIVWGNHGESMVADLTQATFTKEGKTQKVVDVLDHDYVFDTFFKKI
GHRAWDILEHRGFTSAASPTKAAIQHMKAWLFGTAPGEVLSMGIPVPEGNPYGIKPGVVFSFPCNVDKEGKIHVV
EGFKVNDWLREKLDFTEKDLFHEKEIALNHLAQGG*
You can find more details via https://salilab.org/modeller/tutorial/basic.html
"""
script_name = "fasta_to_pir.py"
# check script exists, then copy the script into the working directory
self.copy_script_to_directory(script_name)
# name pir file by the corresponding_gene_call (i.e. defline of the fasta)
self.target_pir_path = J(self.directory, "{}.pir".format(self.corresponding_gene_call))
command = [self.executable,
script_name,
self.target_fasta_path,
self.target_pir_path]
self.run_command(command,
script_name = script_name,
check_output = [self.target_pir_path])
self.run.info("Target alignment file", os.path.basename(self.target_pir_path))
def run_command(self, command, script_name, check_output=None, rename_log=True):
"""Base routine for running MODELLER scripts
Parameters
==========
command : list of strs
E.g. ['mod921', 'test_script.py', 'input1', 'input2'] corresponds to the command line
"mod9.21 test_script.py input1 input2"
script_name : str
E.g. 'test_script.py'
check_output : list, None
Verify that this list of filepaths exist after the command is ran
rename_log : bool, True
MODELLER outputs a log that is renamed to reflect the command and gene used
"""
# first things first, we CD into MODELLER's directory
os.chdir(self.directory)
# try and execute the command
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = process.communicate()
# if MODELLER script gave a traceback, it is caught here and everything is stopped
if process.returncode:
error = error.decode('utf-8').strip()
error = "\n" + "\n".join(error.split('\n'))
print(terminal.c(error, color='red'))
if not self.check_db_only:
self.out["structure_exists"] = False
raise ModellerScriptError("The MODELLER script {} did not execute properly. Hopefully it is clear "
"from the above error message. No structure is going to be modelled."\
.format(script_name))
# If we made it this far, the MODELLER script ran to completion. Now check outputs exist
if check_output:
for output in check_output:
utils.filesnpaths.is_file_exists(output)
# MODELLER outputs a log that we rename right here, right now
old_log_name = os.path.splitext(script_name)[0] + ".log"
if rename_log:
new_log_name = "gene_{}_{}".format(self.corresponding_gene_call, old_log_name)
os.rename(old_log_name, new_log_name)
else:
new_log_name = old_log_name
# add to logs
self.logs[script_name] = new_log_name
self.run.info("Log of {}".format(script_name), new_log_name)
# last things last, we CD back into the starting directory
os.chdir(self.start_dir)
class EndModeller(Exception):
pass
def check_MODELLER(executable=None):
"""Test if MODELLER is going to work.
Checks the executable exists, that a license exists, and can produce the expected output of a
modeller executable. Exists outside of the class MODELLER so it does not have to be checked
everytime the class is initialized.
Parameters
==========
executable : str, None
The string representation of a binary MODELLER program. E.g "mod9.21". If None,
up_to_date_modeller_exec is chosen and tested.
Returns
=======
executable : str
Returns the executable that you _should_ use, which is not necessarily what is input
"""
executable = executable if executable else up_to_date_modeller_exec
scripts_folder = J(os.path.dirname(anvio.__file__), 'data/misc/MODELLER/scripts')
if utils.filesnpaths.is_dir_empty(scripts_folder):
raise ConfigError("Anvi'o houses all its MODELLER scripts in %s, but your directory "
"contains no scripts. Why you did dat?" % scripts_folder)
try:
utils.is_program_exists(executable)
except ConfigError:
*prefix, sub_version = up_to_date_modeller_exec.split('.')
prefix, sub_version = ''.join(prefix), int(sub_version)
for alternate_version in reversed(range(sub_version - 10, sub_version + 10)):
alternate_program = prefix + '.' + str(alternate_version)
if utils.is_program_exists(alternate_program, dont_raise=True):
executable = alternate_program
break
else:
raise ConfigError("Anvi'o needs a MODELLER program to be installed on your system. You didn't specify one "
"(which can be done with `--modeller-executable`), so anvi'o tried the most recent version "
"it knows about: '%s'. If you are certain you have it on your system (for instance you can run it "
"by typing '%s' in your terminal window), you may want to send a detailed bug report. If you "
"don't have it on your system, check out these installation instructions on our website: "
"http://merenlab.org/2016/06/18/installing-third-party-software/#modeller" % (executable, executable))
temp_dir = filesnpaths.get_temp_directory_path()
shutil.copy2(J(scripts_folder, 'fasta_to_pir.py'), temp_dir)
test_script = J(temp_dir, 'fasta_to_pir.py')
test_input = J(os.path.dirname(anvio.__file__), 'tests/sandbox/mock_data_for_structure/proteins.fa')
test_output = J(temp_dir, 'test_out')
command = [executable,
test_script,
test_input,
test_output]
# try and execute the command
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = process.communicate()
if process.returncode:
# modeller has failed
error = error.decode('utf-8').strip()
is_licence_key_error = True if error.find('Invalid license key') > -1 else False
if is_licence_key_error:
# its a valid modeller program with no license key
license_target_file = error.split('\n')[-1]
raise ConfigError("You're making progress and anvi'o is proud of you! You just need to validate your MODELLER "
"with a license key (it's free). Please go to https://salilab.org/modeller/registration.html "
"to register for a new license. After you receive an e-mail with your key, please open '%s' "
"and replace the characters XXXXX with your own key. Save the file and try again. " % license_target_file)
else:
error = "\n" + "\n".join(error.split('\n'))
print(terminal.c(error, color='red'))
raise ConfigError("The executable you requested is called `%s`, but anvi'o doesn't agree with you that "
"it is a working MODELLER program. That was determined by running the command `%s`, which raised the "
"error seen above. If you want to specify a specific MODELLER program, you can specify it with "
"`--modeller-executable`." % (executable, " ".join(command)))
# no error was raised. now check if output file exists
try:
filesnpaths.is_file_exists(test_output)
except FilesNPathsError:
raise ConfigError("The executable you requested is called `%s`, but anvi'o doesn't agree with you that "
"it is a working MODELLER program. That was determined by running the command `%s`, which did not "
"output the file expected. If you want to specify a specific MODELLER program, you can specify it with "
"`--modeller-executable`." % (executable, " ".join(command)))
return executable
| gpl-3.0 |
awkspace/ansible | test/runner/lib/cloud/aws.py | 12 | 3262 | """AWS plugin for integration tests."""
from __future__ import absolute_import, print_function
import os
from lib.util import (
ApplicationError,
display,
is_shippable,
)
from lib.cloud import (
CloudProvider,
CloudEnvironment,
)
from lib.core_ci import (
AnsibleCoreCI,
)
class AwsCloudProvider(CloudProvider):
"""AWS cloud provider plugin. Sets up cloud resources before delegation."""
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
if os.path.isfile(self.config_static_path):
return
aci = self._create_ansible_core_ci()
if os.path.isfile(aci.ci_key):
return
if is_shippable():
return
super(AwsCloudProvider, self).filter(targets, exclude)
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(AwsCloudProvider, self).setup()
aws_config_path = os.path.expanduser('~/.aws')
if os.path.exists(aws_config_path) and not self.args.docker and not self.args.remote:
raise ApplicationError('Rename "%s" or use the --docker or --remote option to isolate tests.' % aws_config_path)
if not self._use_static_config():
self._setup_dynamic()
def _setup_dynamic(self):
"""Request AWS credentials through the Ansible Core CI service."""
display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
config = self._read_config_template()
aci = self._create_ansible_core_ci()
response = aci.start()
if not self.args.explain:
credentials = response['aws']['credentials']
values = dict(
ACCESS_KEY=credentials['access_key'],
SECRET_KEY=credentials['secret_key'],
SECURITY_TOKEN=credentials['session_token'],
REGION='us-east-1',
)
config = self._populate_config_template(config, values)
self._write_config(config)
def _create_ansible_core_ci(self):
"""
:rtype: AnsibleCoreCI
"""
return AnsibleCoreCI(self.args, 'aws', 'sts', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider)
class AwsCloudEnvironment(CloudEnvironment):
"""AWS cloud environment plugin. Updates integration test environment after delegation."""
def configure_environment(self, env, cmd):
"""
:type env: dict[str, str]
:type cmd: list[str]
"""
cmd.append('-e')
cmd.append('@%s' % self.config_path)
cmd.append('-e')
cmd.append('resource_prefix=%s' % self.resource_prefix)
def on_failure(self, target, tries):
"""
:type target: TestTarget
:type tries: int
"""
if not tries and self.managed:
display.notice('If %s failed due to permissions, the IAM test policy may need to be updated. '
'For help, consult @mattclay or @gundalow on GitHub or #ansible-devel on IRC.' % target.name)
| gpl-3.0 |
odejesush/tensorflow | venv/lib/python2.7/site-packages/werkzeug/contrib/atom.py | 259 | 15588 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.atom
~~~~~~~~~~~~~~~~~~~~~
This module provides a class called :class:`AtomFeed` which can be
used to generate feeds in the Atom syndication format (see :rfc:`4287`).
Example::
def atom_feed(request):
feed = AtomFeed("My Blog", feed_url=request.url,
url=request.host_url,
subtitle="My example blog for a feed test.")
for post in Post.query.limit(10).all():
feed.add(post.title, post.body, content_type='html',
author=post.author, url=post.url, id=post.uid,
updated=post.last_update, published=post.pub_date)
return feed.get_response()
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from werkzeug.utils import escape
from werkzeug.wrappers import BaseResponse
from werkzeug._compat import implements_to_string, string_types
XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml'
def _make_text_block(name, content, content_type=None):
"""Helper function for the builder that creates an XML text block."""
if content_type == 'xhtml':
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
(name, XHTML_NAMESPACE, content, name)
if not content_type:
return u'<%s>%s</%s>\n' % (name, escape(content), name)
return u'<%s type="%s">%s</%s>\n' % (name, content_type,
escape(content), name)
def format_iso8601(obj):
"""Format a datetime object for iso8601"""
iso8601 = obj.isoformat()
if obj.tzinfo:
return iso8601
return iso8601 + 'Z'
@implements_to_string
class AtomFeed(object):
"""A helper class that creates Atom feeds.
:param title: the title of the feed. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the feed (not the url *of* the feed)
:param id: a globally unique id for the feed. Must be an URI. If
not present the `feed_url` is used, but one of both is
required.
:param updated: the time the feed was modified the last time. Must
be a :class:`datetime.datetime` object. If not
present the latest entry's `updated` is used.
Treated as UTC if naive datetime.
:param feed_url: the URL to the feed. Should be the URL that was
requested.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param icon: an icon for the feed.
:param logo: a logo for the feed.
:param rights: copyright information for the feed.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param subtitle: a short description of the feed.
:param subtitle_type: the type attribute for the subtitle element.
One of ``'text'``, ``'html'``, ``'text'``
or ``'xhtml'``. Default is ``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param generator: the software that generated this feed. This must be
a tuple in the form ``(name, url, version)``. If
you don't want to specify one of them, set the item
to `None`.
:param entries: a list with the entries for the feed. Entries can also
be added later with :meth:`add`.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
default_generator = ('Werkzeug', None, None)
def __init__(self, title=None, entries=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.url = kwargs.get('url')
self.feed_url = kwargs.get('feed_url', self.url)
self.id = kwargs.get('id', self.feed_url)
self.updated = kwargs.get('updated')
self.author = kwargs.get('author', ())
self.icon = kwargs.get('icon')
self.logo = kwargs.get('logo')
self.rights = kwargs.get('rights')
self.rights_type = kwargs.get('rights_type')
self.subtitle = kwargs.get('subtitle')
self.subtitle_type = kwargs.get('subtitle_type', 'text')
self.generator = kwargs.get('generator')
if self.generator is None:
self.generator = self.default_generator
self.links = kwargs.get('links', [])
self.entries = entries and list(entries) or []
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, string_types + (dict,)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
for author in self.author:
if 'name' not in author:
raise TypeError('author must contain at least a name')
def add(self, *args, **kwargs):
"""Add a new entry to the feed. This function can either be called
with a :class:`FeedEntry` or some keyword and positional arguments
that are forwarded to the :class:`FeedEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
self.entries.append(args[0])
else:
kwargs['feed_url'] = self.feed_url
self.entries.append(FeedEntry(*args, **kwargs))
def __repr__(self):
return '<%s %r (%d entries)>' % (
self.__class__.__name__,
self.title,
len(self.entries)
)
def generate(self):
"""Return a generator that yields pieces of XML."""
# atom demands either an author element in every entry or a global one
if not self.author:
if False in map(lambda e: bool(e.author), self.entries):
self.author = ({'name': 'Unknown author'},)
if not self.updated:
dates = sorted([entry.updated for entry in self.entries])
self.updated = dates and dates[-1] or datetime.utcnow()
yield u'<?xml version="1.0" encoding="utf-8"?>\n'
yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
yield ' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
if self.feed_url:
yield u' <link href="%s" rel="self" />\n' % \
escape(self.feed_url)
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' %
(k, escape(link[k])) for k in link)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield ' <email>%s</email>\n' % escape(author['email'])
yield ' </author>\n'
if self.subtitle:
yield ' ' + _make_text_block('subtitle', self.subtitle,
self.subtitle_type)
if self.icon:
yield u' <icon>%s</icon>\n' % escape(self.icon)
if self.logo:
yield u' <logo>%s</logo>\n' % escape(self.logo)
if self.rights:
yield ' ' + _make_text_block('rights', self.rights,
self.rights_type)
generator_name, generator_url, generator_version = self.generator
if generator_name or generator_url or generator_version:
tmp = [u' <generator']
if generator_url:
tmp.append(u' uri="%s"' % escape(generator_url))
if generator_version:
tmp.append(u' version="%s"' % escape(generator_version))
tmp.append(u'>%s</generator>\n' % escape(generator_name))
yield u''.join(tmp)
for entry in self.entries:
for line in entry.generate():
yield u' ' + line
yield u'</feed>\n'
def to_string(self):
"""Convert the feed into a string."""
return u''.join(self.generate())
def get_response(self):
"""Return a response object for the feed."""
return BaseResponse(self.to_string(), mimetype='application/atom+xml')
def __call__(self, environ, start_response):
"""Use the class as WSGI response object."""
return self.get_response()(environ, start_response)
def __str__(self):
return self.to_string()
@implements_to_string
class FeedEntry(object):
"""Represents a single entry in a feed.
:param title: the title of the entry. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param content: the content of the entry.
:param content_type: the type attribute for the content element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param summary: a summary of the entry's content.
:param summary_type: the type attribute for the summary element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the entry.
:param id: a globally unique id for the entry. Must be an URI. If
not present the URL is used, but one of both is required.
:param updated: the time the entry was modified the last time. Must
be a :class:`datetime.datetime` object. Treated as
UTC if naive datetime. Required.
:param author: the author of the entry. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if the feed does not have an
author element.
:param published: the time the entry was initially published. Must
be a :class:`datetime.datetime` object. Treated as
UTC if naive datetime.
:param rights: copyright information for the entry.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param categories: categories for the entry. Must be a list of dictionaries
with term (required), scheme and label (all optional)
:param xml_base: The xml base (url) for this feed item. If not provided
it will default to the item url.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
def __init__(self, title=None, content=None, feed_url=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.content = content
self.content_type = kwargs.get('content_type', 'html')
self.url = kwargs.get('url')
self.id = kwargs.get('id', self.url)
self.updated = kwargs.get('updated')
self.summary = kwargs.get('summary')
self.summary_type = kwargs.get('summary_type', 'html')
self.author = kwargs.get('author', ())
self.published = kwargs.get('published')
self.rights = kwargs.get('rights')
self.links = kwargs.get('links', [])
self.categories = kwargs.get('categories', [])
self.xml_base = kwargs.get('xml_base', feed_url)
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, string_types + (dict,)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
if not self.updated:
raise ValueError('updated is required')
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.title
)
def generate(self):
"""Yields pieces of ATOM XML."""
base = ''
if self.xml_base:
base = ' xml:base="%s"' % escape(self.xml_base)
yield u'<entry%s>\n' % base
yield u' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.published:
yield u' <published>%s</published>\n' % \
format_iso8601(self.published)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield u' <email>%s</email>\n' % escape(author['email'])
yield u' </author>\n'
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' %
(k, escape(link[k])) for k in link)
for category in self.categories:
yield u' <category %s/>\n' % ''.join('%s="%s" ' %
(k, escape(category[k])) for k in category)
if self.summary:
yield u' ' + _make_text_block('summary', self.summary,
self.summary_type)
if self.content:
yield u' ' + _make_text_block('content', self.content,
self.content_type)
yield u'</entry>\n'
def to_string(self):
"""Convert the feed item into a unicode object."""
return u''.join(self.generate())
def __str__(self):
return self.to_string()
| apache-2.0 |
aequitas/home-assistant | homeassistant/components/smhi/weather.py | 2 | 6958 | """Support for the Swedish weather institute weather service."""
import asyncio
from datetime import timedelta
import logging
from typing import Dict, List
import aiohttp
import async_timeout
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION, ATTR_FORECAST_PRECIPITATION, ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW, ATTR_FORECAST_TIME, WeatherEntity)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import aiohttp_client
from homeassistant.util import Throttle, slugify
from .const import ATTR_SMHI_CLOUDINESS, ENTITY_ID_SENSOR_FORMAT
_LOGGER = logging.getLogger(__name__)
# Used to map condition from API results
CONDITION_CLASSES = {
'cloudy': [5, 6],
'fog': [7],
'hail': [],
'lightning': [21],
'lightning-rainy': [11],
'partlycloudy': [3, 4],
'pouring': [10, 20],
'rainy': [8, 9, 18, 19],
'snowy': [15, 16, 17, 25, 26, 27],
'snowy-rainy': [12, 13, 14, 22, 23, 24],
'sunny': [1, 2],
'windy': [],
'windy-variant': [],
'exceptional': [],
}
# 5 minutes between retrying connect to API again
RETRY_TIMEOUT = 5*60
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=31)
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Old way of setting up components.
Can only be called when a user accidentally mentions smhi in the
config. In that case it will be ignored.
"""
pass
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry,
config_entries) -> bool:
"""Add a weather entity from map location."""
location = config_entry.data
name = slugify(location[CONF_NAME])
session = aiohttp_client.async_get_clientsession(hass)
entity = SmhiWeather(
location[CONF_NAME], location[CONF_LATITUDE], location[CONF_LONGITUDE],
session=session)
entity.entity_id = ENTITY_ID_SENSOR_FORMAT.format(name)
config_entries([entity], True)
return True
class SmhiWeather(WeatherEntity):
"""Representation of a weather entity."""
def __init__(self, name: str, latitude: str,
longitude: str,
session: aiohttp.ClientSession = None) -> None:
"""Initialize the SMHI weather entity."""
from smhi import Smhi
self._name = name
self._latitude = latitude
self._longitude = longitude
self._forecasts = None
self._fail_count = 0
self._smhi_api = Smhi(self._longitude, self._latitude, session=session)
@property
def unique_id(self) -> str:
"""Return a unique id."""
return '{}, {}'.format(self._latitude, self._longitude)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self) -> None:
"""Refresh the forecast data from SMHI weather API."""
from smhi.smhi_lib import SmhiForecastException
def fail():
"""Postpone updates."""
self._fail_count += 1
if self._fail_count < 3:
self.hass.helpers.event.async_call_later(
RETRY_TIMEOUT, self.retry_update())
try:
with async_timeout.timeout(10):
self._forecasts = await self.get_weather_forecast()
self._fail_count = 0
except (asyncio.TimeoutError, SmhiForecastException):
_LOGGER.error(
"Failed to connect to SMHI API, retry in 5 minutes")
fail()
async def retry_update(self):
"""Retry refresh weather forecast."""
self.async_update()
async def get_weather_forecast(self) -> []:
"""Return the current forecasts from SMHI API."""
return await self._smhi_api.async_get_forecast()
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._name
@property
def temperature(self) -> int:
"""Return the temperature."""
if self._forecasts is not None:
return self._forecasts[0].temperature
return None
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def humidity(self) -> int:
"""Return the humidity."""
if self._forecasts is not None:
return self._forecasts[0].humidity
return None
@property
def wind_speed(self) -> float:
"""Return the wind speed."""
if self._forecasts is not None:
# Convert from m/s to km/h
return round(self._forecasts[0].wind_speed * 18 / 5)
return None
@property
def wind_bearing(self) -> int:
"""Return the wind bearing."""
if self._forecasts is not None:
return self._forecasts[0].wind_direction
return None
@property
def visibility(self) -> float:
"""Return the visibility."""
if self._forecasts is not None:
return self._forecasts[0].horizontal_visibility
return None
@property
def pressure(self) -> int:
"""Return the pressure."""
if self._forecasts is not None:
return self._forecasts[0].pressure
return None
@property
def cloudiness(self) -> int:
"""Return the cloudiness."""
if self._forecasts is not None:
return self._forecasts[0].cloudiness
return None
@property
def condition(self) -> str:
"""Return the weather condition."""
if self._forecasts is None:
return None
return next((
k for k, v in CONDITION_CLASSES.items()
if self._forecasts[0].symbol in v), None)
@property
def attribution(self) -> str:
"""Return the attribution."""
return 'Swedish weather institute (SMHI)'
@property
def forecast(self) -> List:
"""Return the forecast."""
if self._forecasts is None or len(self._forecasts) < 2:
return None
data = []
for forecast in self._forecasts[1:]:
condition = next((
k for k, v in CONDITION_CLASSES.items()
if forecast.symbol in v), None)
data.append({
ATTR_FORECAST_TIME: forecast.valid_time.isoformat(),
ATTR_FORECAST_TEMP: forecast.temperature_max,
ATTR_FORECAST_TEMP_LOW: forecast.temperature_min,
ATTR_FORECAST_PRECIPITATION:
round(forecast.total_precipitation, 1),
ATTR_FORECAST_CONDITION: condition,
})
return data
@property
def device_state_attributes(self) -> Dict:
"""Return SMHI specific attributes."""
if self.cloudiness:
return {ATTR_SMHI_CLOUDINESS: self.cloudiness}
| apache-2.0 |
huijunwu/heron | heronpy/streamlet/impl/reducebykeyandwindowbolt.py | 1 | 5269 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""module for join bolt: ReduceByKeyAndWindowBolt"""
import collections
from heronpy.api.bolt.window_bolt import SlidingWindowBolt
from heronpy.api.custom_grouping import ICustomGrouping
from heronpy.api.component.component_spec import GlobalStreamId
from heronpy.api.stream import Grouping
from heronpy.streamlet.keyedwindow import KeyedWindow
from heronpy.streamlet.streamlet import Streamlet
from heronpy.streamlet.window import Window
from heronpy.streamlet.windowconfig import WindowConfig
from heronpy.streamlet.impl.streamletboltbase import StreamletBoltBase
# pylint: disable=unused-argument
class ReduceByKeyAndWindowBolt(SlidingWindowBolt, StreamletBoltBase):
"""ReduceByKeyAndWindowBolt"""
FUNCTION = 'function'
WINDOWDURATION = SlidingWindowBolt.WINDOW_DURATION_SECS
SLIDEINTERVAL = SlidingWindowBolt.WINDOW_SLIDEINTERVAL_SECS
def initialize(self, config, context):
super(ReduceByKeyAndWindowBolt, self).initialize(config, context)
if ReduceByKeyAndWindowBolt.FUNCTION not in config:
raise RuntimeError("FUNCTION not specified in reducebywindow operator")
self.reduce_function = config[ReduceByKeyAndWindowBolt.FUNCTION]
if not callable(self.reduce_function):
raise RuntimeError("Reduce Function has to be callable")
@staticmethod
def _add(key, value, mymap):
if key in mymap:
mymap[key].append(value)
else:
mymap[key] = [value]
def processWindow(self, window_config, tuples):
# our temporary map
mymap = {}
for tup in tuples:
userdata = tup.values[0]
if not isinstance(userdata, collections.Iterable) or len(userdata) != 2:
raise RuntimeError("ReduceByWindow tuples must be iterable of length 2")
self._add(userdata[0], userdata[1], mymap)
for (key, values) in list(mymap.items()):
result = values[0]
for value in values[1:]:
result = self.reduce_function(result, value)
keyedwindow = KeyedWindow(key, Window(window_config.start, window_config.end))
self.emit([(keyedwindow, result)], stream='output')
# pylint: disable=unused-argument
class ReduceGrouping(ICustomGrouping):
def prepare(self, context, component, stream, target_tasks):
self.target_tasks = target_tasks
def choose_tasks(self, values):
assert isinstance(values, list) and len(values) == 1
userdata = values[0]
if not isinstance(userdata, collections.Iterable) or len(userdata) != 2:
raise RuntimeError("Tuples going to reduce must be iterable of length 2")
# only emits to the first task id
hashvalue = hash(userdata[0])
target_index = hashvalue % len(self.target_tasks)
return [self.target_tasks[target_index]]
# pylint: disable=protected-access
class ReduceByKeyAndWindowStreamlet(Streamlet):
"""ReduceByKeyAndWindowStreamlet"""
def __init__(self, window_config, reduce_function, parent):
super(ReduceByKeyAndWindowStreamlet, self).__init__()
if not isinstance(window_config, WindowConfig):
raise RuntimeError("window config has to be a WindowConfig")
if not callable(reduce_function):
raise RuntimeError("ReduceByKeyAndWindow function has to be callable")
if not isinstance(parent, Streamlet):
raise RuntimeError("Parent of Filter Streamlet has to be a Streamlet")
self._window_config = window_config
self._reduce_function = reduce_function
self._parent = parent
def _calculate_inputs(self):
return {GlobalStreamId(self._parent.get_name(), self._parent._output) :
Grouping.custom("heronpy.streamlet.impl.reducebykeyandwindowbolt.ReduceGrouping")}
def _build_this(self, builder, stage_names):
if not self.get_name():
self.set_name(self._default_stage_name_calculator("reducebykeyandwindow", stage_names))
if self.get_name() in stage_names:
raise RuntimeError("Duplicate Names")
stage_names.add(self.get_name())
builder.add_bolt(self.get_name(), ReduceByKeyAndWindowBolt, par=self.get_num_partitions(),
inputs=self._calculate_inputs(),
config={ReduceByKeyAndWindowBolt.FUNCTION : self._reduce_function,
ReduceByKeyAndWindowBolt.WINDOWDURATION :
self._window_config._window_duration.seconds,
ReduceByKeyAndWindowBolt.SLIDEINTERVAL :
self._window_config._slide_interval.seconds})
return True
| apache-2.0 |
jeanlinux/calibre | src/calibre/ebooks/compression/tcr.py | 24 | 5143 | # -*- coding: utf-8 -*-
__license__ = 'GPL 3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import re
class TCRCompressor(object):
'''
TCR compression takes the form header+code_dict+coded_text.
The header is always "!!8-Bit!!". The code dict is a list of 256 strings.
The list takes the form 1 byte length and then a string. Each position in
The list corresponds to a code found in the file. The coded text is
string of characters values. for instance the character Q represents the
value 81 which corresponds to the string in the code list at position 81.
'''
def _reset(self):
# List of indexes in the codes list that are empty and can hold new codes
self.unused_codes = set()
self.coded_txt = ''
# Generate initial codes from text.
# The index of the list will be the code that represents the characters at that location
# in the list
self.codes = []
def _combine_codes(self):
'''
Combine two codes that always appear in pair into a single code.
The intent is to create more unused codes.
'''
possible_codes = []
a_code = set(re.findall('(?msu).', self.coded_txt))
for code in a_code:
single_code = set(re.findall('(?msu)%s.' % re.escape(code), self.coded_txt))
if len(single_code) == 1:
possible_codes.append(single_code.pop())
for code in possible_codes:
self.coded_txt = self.coded_txt.replace(code, code[0])
self.codes[ord(code[0])] = '%s%s' % (self.codes[ord(code[0])], self.codes[ord(code[1])])
def _free_unused_codes(self):
'''
Look for codes that do no not appear in the coded text and add them to
the list of free codes.
'''
for i in xrange(256):
if i not in self.unused_codes:
if chr(i) not in self.coded_txt:
self.unused_codes.add(i)
def _new_codes(self):
'''
Create new codes from codes that occur in pairs often.
'''
possible_new_codes = list(set(re.findall('(?msu)..', self.coded_txt)))
new_codes_count = []
for c in possible_new_codes:
count = self.coded_txt.count(c)
# Less than 3 occurrences will not produce any size reduction.
if count > 2:
new_codes_count.append((c, count))
# Arrange the codes in order of least to most occurring.
possible_new_codes = [x[0] for x in sorted(new_codes_count, key=lambda c: c[1])]
return possible_new_codes
def compress(self, txt):
self._reset()
self.codes = list(set(re.findall('(?msu).', txt)))
# Replace the text with their corresponding code
for c in txt:
self.coded_txt += chr(self.codes.index(c))
# Zero the unused codes and record which are unused.
for i in range(len(self.codes), 256):
self.codes.append('')
self.unused_codes.add(i)
self._combine_codes()
possible_codes = self._new_codes()
while possible_codes and self.unused_codes:
while possible_codes and self.unused_codes:
unused_code = self.unused_codes.pop()
# Take the last possible codes and split it into individual
# codes. The last possible code is the most often occurring.
code1, code2 = possible_codes.pop()
self.codes[unused_code] = '%s%s' % (self.codes[ord(code1)], self.codes[ord(code2)])
self.coded_txt = self.coded_txt.replace('%s%s' % (code1, code2), chr(unused_code))
self._combine_codes()
self._free_unused_codes()
possible_codes = self._new_codes()
self._free_unused_codes()
# Generate the code dictionary.
code_dict = []
for i in xrange(0, 256):
if i in self.unused_codes:
code_dict.append(chr(0))
else:
code_dict.append(chr(len(self.codes[i])) + self.codes[i])
# Join the identifier with the dictionary and coded text.
return '!!8-Bit!!'+''.join(code_dict)+self.coded_txt
def decompress(stream):
txt = []
stream.seek(0)
if stream.read(9) != '!!8-Bit!!':
raise ValueError('File %s contains an invalid TCR header.' % stream.name)
# Codes that the file contents are broken down into.
entries = []
for i in xrange(256):
entry_len = ord(stream.read(1))
entries.append(stream.read(entry_len))
# Map the values in the file to locations in the string list.
entry_loc = stream.read(1)
while entry_loc != '': # EOF
txt.append(entries[ord(entry_loc)])
entry_loc = stream.read(1)
return ''.join(txt)
def compress(txt):
t = TCRCompressor()
return t.compress(txt)
| gpl-3.0 |
dana-i2cat/felix | modules/resource/manager/stitching-entity/src/core/utils/credentials.py | 2 | 10070 | #!/usr/bin/python
#----------------------------------------------------------------------
# Copyright (c) 2011 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
"""
Utilities to manage credentials
"""
import datetime
import dateutil.parser
import logging
import os
import traceback
import xml.dom.minidom as md
# FIXME: Doesn't distinguish v2 vs v3 yet
def is_valid_v3(logger, credString):
'''Is the given credential a valid geni_sfa style v3 credential?'''
if not logger:
logger = logging.getLogger("omnilib.credparsing")
if not credString:
logger.warn("None credString - not geni_sfa v3")
return False
if not isinstance(credString, str):
logger.warn("Not string credString %s", credString)
return False
if credString.strip() == "":
logger.warn("Empty string cred")
return False
if credString.startswith("-----BEGIN CERTIFICATE"):
logger.warn("Cred seems to be a certificate: %s", credString)
return False
if not credString.startswith("<?xml"):
logger.warn("No ?xml to start cred: %s", credString)
return False
if not "signed-credential" in credString:
logger.warn("No signed-credential in cred: %s", credString)
return False
if not "owner_gid" in credString:
logger.warn("No owner_gid in cred: %s", credString)
return False
if not "target_gid" in credString:
logger.warn("No target_gid in cred: %s", credString)
return False
if not "Signature" in credString:
logger.warn("No Signature in cred: %s", credString)
return False
try:
doc = md.parseString(credString)
signed_cred = doc.getElementsByTagName("signed-credential")
# Is this a signed-cred or just a cred?
if len(signed_cred) > 0:
cred = signed_cred[0].getElementsByTagName("credential")[0]
else:
logger.warn("No signed-credential element found")
return False
targetnode = cred.getElementsByTagName("target_urn")[0]
if len(targetnode.childNodes) > 0:
urn = str(targetnode.childNodes[0].nodeValue)
else:
logger.warn("No target_urn found")
return False
except Exception, exc:
logger.warn("Exception parsing cred to get target_urn: %s", exc)
return False
# Want to rule out ABAC
# Want to rule out geni_sfa v2 if possible
# 1) Call:
# sfa.trust.credential.cred.verify(trusted_certs=None, schema=FIXME, trusted_certs_required=False)
# Will have to check in the xsd I think
# 2)
# also for each cert:
# retrieve gid from cred
# check not expired
# check version is 3
# get email, urn, uuid - all not empty
# more? CA:TRUE/FALSE? real serialNum? Has a DN?
# if type is slice or user, CA:FALSE. It type is authority, CA:TRUE
# Should this be in gid.py? or in geni.util.cert_util?
#
return True
def get_cred_target_urn(logger, cred):
'''Parse the given credential to get its target URN'''
credString = get_cred_xml(cred)
urn = ""
if credString is None:
return urn
# If credString is not a string then complain and return
if type(credString) != type('abc'):
if logger is None:
level = logging.INFO
logging.basicConfig(level=level)
logger = logging.getLogger("omni.credparsing")
logger.setLevel(level)
logger.error("Cannot parse target URN: Credential is not a string: %s", str(credString))
return credexp
try:
doc = md.parseString(credString)
signed_cred = doc.getElementsByTagName("signed-credential")
# Is this a signed-cred or just a cred?
if len(signed_cred) > 0:
cred = signed_cred[0].getElementsByTagName("credential")[0]
else:
cred = doc.getElementsByTagName("credential")[0]
targetnode = cred.getElementsByTagName("target_urn")[0]
if len(targetnode.childNodes) > 0:
urn = str(targetnode.childNodes[0].nodeValue)
else:
if logger is None:
level = logging.INFO
logging.basicConfig(level=level)
logger = logging.getLogger("omni.credparsing")
logger.setLevel(level)
logger.warn("Found no targetnode to get target_urn?")
except Exception, exc:
if logger is None:
level = logging.INFO
logging.basicConfig(level=level)
logger = logging.getLogger("omni.credparsing")
logger.setLevel(level)
logger.error("Failed to parse credential for target URN: %s", exc)
logger.info("Unparsable credential: %s", credString)
logger.debug(traceback.format_exc())
return urn
def get_cred_exp(logger, cred):
'''Parse the given credential in GENI AM API XML format to get its expiration time and return that'''
# Don't fully parse credential: grab the expiration from the string directly
credexp = datetime.datetime.fromordinal(1)
credString = get_cred_xml(cred)
if credString is None:
# failed to get a credential string. Can't check
return credexp
# If credString is not a string then complain and return
if type(credString) != type('abc'):
if logger is None:
level = logging.INFO
logging.basicConfig(level=level)
logger = logging.getLogger("omni.credparsing")
logger.setLevel(level)
logger.error("Cannot parse expiration date: Credential is not a string: %s", str(credString))
return credexp
try:
doc = md.parseString(credString)
signed_cred = doc.getElementsByTagName("signed-credential")
# Is this a signed-cred or just a cred?
if len(signed_cred) > 0:
cred = signed_cred[0].getElementsByTagName("credential")[0]
else:
cred = doc.getElementsByTagName("credential")[0]
expirnode = cred.getElementsByTagName("expires")[0]
if len(expirnode.childNodes) > 0:
credexp = dateutil.parser.parse(expirnode.childNodes[0].nodeValue)
except Exception, exc:
if logger is None:
level = logging.INFO
logging.basicConfig(level=level)
logger = logging.getLogger("omni.credparsing")
logger.setLevel(level)
logger.error("Failed to parse credential for expiration time: %s", exc)
logger.info("Unparsable credential: %s", credString)
logger.debug(traceback.format_exc())
return credexp
def is_cred_xml(cred):
'''Is this a cred in XML format, or a struct? Return true if XML'''
if cred is None:
return False
if not isinstance(cred, str):
return False
if cred.strip() == "":
return False
cred = cred.strip()
if not cred.startswith("<?xml"):
return False
if not "signed-credential" in cred:
return False
try:
doc = md.parseString(cred)
signed_cred = doc.getElementsByTagName("signed-credential")
# Is this a signed-cred or just a cred?
if len(signed_cred) > 0:
credEle = signed_cred[0].getElementsByTagName("credential")[0]
else:
return False
targetnode = credEle.getElementsByTagName("target_urn")[0]
if len(targetnode.childNodes) > 0:
urn = str(targetnode.childNodes[0].nodeValue)
else:
return False
except Exception, exc:
return False
# Anything else? Starts with <?
return True
def get_cred_xml(cred):
'''Return the cred XML, from the struct if any, else None'''
if is_cred_xml(cred):
return cred
# Make sure the cred is the right struct?
# extract the real cred
# {
# geni_type: <string>,
# geni_version: <string>,
# geni_value: <the credential as a string>
# }
if isinstance(cred, dict) and cred.has_key("geni_value"):
return cred["geni_value"]
return None
def wrap_cred(cred):
"""
Wrap the given cred in the appropriate struct for this framework.
"""
if isinstance(cred, dict):
print "Called wrap on a cred that is already a dict? %s", cred
return cred
elif not isinstance(cred, str):
print "Called wrap on non string cred? Stringify. %s", cred
cred = str(cred)
ret = dict(geni_type="geni_sfa", geni_version="2", geni_value=cred)
if is_valid_v3(None, cred):
ret["geni_version"] = "3"
return ret
def get_creds_file_contents(filename):
creds_path = os.path.normpath(os.path.join(os.path.dirname(__file__), "../..", "creds"))
if not os.path.isabs(filename):
filename = os.path.join(creds_path, filename)
filename = os.path.abspath(os.path.expanduser(filename))
contents = None
with open(filename, "r") as f:
contents = f.read()
return contents
| apache-2.0 |
monikagrabowska/osf.io | addons/base/testing/logger.py | 11 | 1786 | import abc
from nose.tools import * # noqa (PEP8 asserts)
from framework.auth import Auth
from tests.factories import AuthUserFactory, ProjectFactory
class AddonNodeLoggerTestSuiteMixinBase(object):
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def addon_short_name(self):
pass
@abc.abstractproperty
def NodeLogger(self):
pass
def setUp(self):
super(AddonNodeLoggerTestSuiteMixinBase, self).setUp()
self.auth = Auth(AuthUserFactory())
self.node = ProjectFactory(creator=self.auth.user)
self.path = None
self.node.add_addon(self.addon_short_name, auth=self.auth)
self.logger = self.NodeLogger(node=self.node, auth=self.auth)
class StorageAddonNodeLoggerTestSuiteMixin(AddonNodeLoggerTestSuiteMixinBase):
def setUp(self):
super(StorageAddonNodeLoggerTestSuiteMixin, self).setUp()
def test_log_file_added(self):
self.logger.log('file_added', save=True)
last_log = self.node.logs.latest()
assert_equal(last_log.action, '{0}_{1}'.format(self.addon_short_name, 'file_added'))
def test_log_file_removed(self):
self.logger.log('file_removed', save=True)
last_log = self.node.logs.latest()
assert_equal(last_log.action, '{0}_{1}'.format(self.addon_short_name, 'file_removed'))
def test_log_deauthorized_when_node_settings_are_deleted(self):
node_settings = self.node.get_addon(self.addon_short_name)
node_settings.delete(save=True)
# sanity check
assert_true(node_settings.deleted)
self.logger.log(action='node_deauthorized', save=True)
last_log = self.node.logs.latest()
assert_equal(last_log.action, '{0}_node_deauthorized'.format(self.addon_short_name))
| apache-2.0 |
terrameijar/oppia | main_taskqueue.py | 11 | 1803 | # Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main package for URL routing for requests originating from the task queue."""
# pylint: disable=relative-import
from core.controllers import tasks
from core.platform import models
import feconf
import main
# pylint: enable=relative-import
import webapp2
transaction_services = models.Registry.import_transaction_services()
# Register the URLs with the classes responsible for handling them.
URLS = [
main.get_redirect_route(
r'%s' % feconf.TASK_URL_FEEDBACK_MESSAGE_EMAILS,
tasks.UnsentFeedbackEmailHandler),
main.get_redirect_route(
r'%s' % feconf.TASK_URL_SUGGESTION_EMAILS,
tasks.SuggestionEmailHandler),
main.get_redirect_route(
r'%s' % feconf.TASK_URL_FLAG_EXPLORATION_EMAILS,
tasks.FlagExplorationEmailHandler),
main.get_redirect_route(
r'%s' % feconf.TASK_URL_INSTANT_FEEDBACK_EMAILS,
tasks.InstantFeedbackMessageEmailHandler),
main.get_redirect_route(
r'%s' % feconf.TASK_URL_FEEDBACK_STATUS_EMAILS,
tasks.FeedbackThreadStatusChangeEmailHandler),
]
app = transaction_services.toplevel_wrapper( # pylint: disable=invalid-name
webapp2.WSGIApplication(URLS, debug=feconf.DEBUG))
| apache-2.0 |
cpcloud/oh-my-zsh | plugins/git-prompt/gitstatus.py | 343 | 2372 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from subprocess import Popen, PIPE
import re
# change those symbols to whatever you prefer
symbols = {
'ahead of': '↑',
'behind': '↓',
'staged': '♦',
'changed': '‣',
'untracked': '…',
'clean': '⚡',
'unmerged': '≠',
'sha1': ':'
}
output, error = Popen(
['git', 'status'], stdout=PIPE, stderr=PIPE, universal_newlines=True).communicate()
if error:
import sys
sys.exit(0)
lines = output.splitlines()
behead_re = re.compile(
r"^# Your branch is (ahead of|behind) '(.*)' by (\d+) commit")
diverge_re = re.compile(r"^# and have (\d+) and (\d+) different")
status = ''
staged = re.compile(r'^# Changes to be committed:$', re.MULTILINE)
changed = re.compile(r'^# Changed but not updated:$', re.MULTILINE)
untracked = re.compile(r'^# Untracked files:$', re.MULTILINE)
unmerged = re.compile(r'^# Unmerged paths:$', re.MULTILINE)
def execute(*command):
out, err = Popen(stdout=PIPE, stderr=PIPE, *command).communicate()
if not err:
nb = len(out.splitlines())
else:
nb = '?'
return nb
if staged.search(output):
nb = execute(
['git', 'diff', '--staged', '--name-only', '--diff-filter=ACDMRT'])
status += '%s%s' % (symbols['staged'], nb)
if unmerged.search(output):
nb = execute(['git', 'diff', '--staged', '--name-only', '--diff-filter=U'])
status += '%s%s' % (symbols['unmerged'], nb)
if changed.search(output):
nb = execute(['git', 'diff', '--name-only', '--diff-filter=ACDMRT'])
status += '%s%s' % (symbols['changed'], nb)
if untracked.search(output):
status += symbols['untracked']
if status == '':
status = symbols['clean']
remote = ''
bline = lines[0]
if bline.find('Not currently on any branch') != -1:
branch = symbols['sha1'] + Popen([
'git',
'rev-parse',
'--short',
'HEAD'], stdout=PIPE).communicate()[0][:-1]
else:
branch = bline.split(' ')[-1]
bstatusline = lines[1]
match = behead_re.match(bstatusline)
if match:
remote = symbols[match.groups()[0]]
remote += match.groups()[2]
elif lines[2:]:
div_match = diverge_re.match(lines[2])
if div_match:
remote = "{behind}{1}{ahead of}{0}".format(
*div_match.groups(), **symbols)
print('\n'.join([branch, remote, status]))
| mit |
bloomberg/chef-bcpc | virtual/bin/generate-chef-databags.py | 1 | 1488 | #!/usr/bin/env python3
# Copyright 2020, Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import yaml
from builtins import FileExistsError
from lib.bcc_chef_databags import BCCChefDatabags
if __name__ == '__main__':
desc = "BCC Chef Databag Generator"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
"-s", "--save",
default=False,
action="store_true",
help="save databag to file",
)
parser.add_argument(
"-f", "--force",
default=False,
action="store_true",
help="force databag overwrite if one already exists",
)
args = parser.parse_args()
bccdb = BCCChefDatabags()
if args.save:
try:
bccdb.save(force=args.force)
sys.exit(0)
except FileExistsError as e:
print(e)
sys.exit(1)
print(yaml.dump(bccdb.generate(), default_flow_style=False, indent=2))
| apache-2.0 |
leuschel/logen | old_logen/pylogen/Pmw/Pmw_1_2/tests/Options_test.py | 3 | 10791 | # This tests Pmw option and component handling.
import Tkinter
import Test
import Pmw
Test.initialise()
"""
Definitions:
initialisation option: an option that can be set in the call
to the constructor but not in configure()
configuration option: an option that can be set in the call
to the constructor and to configure()
option: either an initialisation option or a configuration option
Tests
-----
in constructor:
+ define an option, its default value and whether it is an
initialisation or a configuration option
+ set a callback function for a configuration option
+ set a different default for an option of a base class
+ set a different default for an option of a component of a base class
+ override the callback for a configuration option of a base class
+ create a component
+ create an alias for a component
+ create an alias for a sub-component
calling constructor:
+ set an option
+ set an option of a base class
+ set an option of a component created in the constructor
+ set an option of an aliased component or sub-component created in
the constructor
+ set an option of one or more components via their group name
+ use the default value of an option
+ use the default value of an option of a base class
+ use the default value of an option of a base class where the default
value is redefined in the derived class
calling configure:
+ set a configuration option
+ set a configuration option of a base class
+ set a configuration option of a component
+ set a configuration option of an aliased component or sub-component
+ set a configuration option of one or more components via their group name
+ set a configuration option with a callback
+ set a configuration option of a base class with a callback in the
derived class
"""
class Simple(Pmw.MegaWidget):
def __init__(self, parent = None, **kw):
optiondefs = (
('initsimple1', 'initsimple1', Pmw.INITOPT),
('initsimple2', 'initsimple2', Pmw.INITOPT),
('optsimple1', 'optsimple1', None),
('optsimple2', 'optsimple2', None),
)
self.defineoptions(kw, optiondefs)
Pmw.MegaWidget.__init__(self, parent)
interior = self.interior()
self._widget = self.createcomponent('widget',
(('widgy', 'widget'),), None,
Tkinter.Button, (interior,))
self._widget.grid(column=0, row=0, sticky='nsew')
self.initialiseoptions()
class Complex(Pmw.MegaWidget):
def __init__(self, parent = None, **kw):
optiondefs = (
('initcomplex1', 'initcomplex1', Pmw.INITOPT),
('initcomplex2', 'initcomplex2', Pmw.INITOPT),
('optcomplex1', 'optcomplex1', None),
('optcomplex2', 'optcomplex2', None),
)
self.defineoptions(kw, optiondefs)
Pmw.MegaWidget.__init__(self, parent)
interior = self.interior()
self._simple = self.createcomponent('simple',
(('widget', 'simple_widget'),), None,
Simple, (interior,))
self._simple.grid(column=0, row=0, sticky='nsew')
self.initialiseoptions()
class Base(Pmw.MegaWidget):
def __init__(self, parent = None, **kw):
optiondefs = (
('initbase1', 'initbase1', Pmw.INITOPT),
('initbase2', 'initbase2', Pmw.INITOPT),
('initbase3', 'initbase3', Pmw.INITOPT),
('optbase1', 'optbase1', self._optbase1),
('optbase2', 'optbase2', None),
('optbase3', 'optbase3', None),
)
self.defineoptions(kw, optiondefs)
Pmw.MegaWidget.__init__(self, parent)
oldInterior = Pmw.MegaWidget.interior(self)
self._widget = self.createcomponent('basesimple',
(('widget', 'basesimple_widget'),), None,
Simple, (oldInterior,))
self._widget.grid(column=0, row=0, sticky='nsew')
self._child = self.createcomponent('child',
(), 'Mygroup',
Tkinter.Frame, (oldInterior,))
self._child.grid(column=0, row=1, sticky='nsew')
self._groupie = self.createcomponent('groupie',
(), 'Mygroup',
Tkinter.Button, (oldInterior,), text = 'XXXXX')
self._groupie.grid(column=0, row=2, sticky='nsew')
self.basedummy = []
self.initialiseoptions()
def _optbase1(self):
self.basedummy.append(self['optbase1'])
def getbasedummy(self):
return self.basedummy
def interior(self):
return self._child
class Derived(Base):
def __init__(self, parent = None, **kw):
# Define the options for this megawidget.
optiondefs = (
('initbase2', 'initbase2inderived', Pmw.INITOPT),
('initderived1', 'initderived1', Pmw.INITOPT),
('initderived2', 'initderived2', Pmw.INITOPT),
('optbase1', 'optbase1', self._optbase1),
('optderived1', 'optderived1', None),
('optderived2', 'optderived2', None),
('groupie_text', 'YYYYY', None),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining my options).
Base.__init__(self, parent)
# Create components.
interior = self.interior()
self._widget = self.createcomponent('derivedcomplex',
(('derivedsimple', 'derivedcomplex_simple'),), None,
Complex, (interior,))
self._widget.grid(column=0, row=0, sticky='nsew')
# Initialise instance.
# Initialise instance variables.
self.deriveddummy = []
# Check keywords and initialise options.
self.initialiseoptions()
def _optbase1(self):
self.deriveddummy.append(self['optbase1'])
def getderiveddummy(self):
return self.deriveddummy
testData = ()
c = Simple
kw_1 = {
'hull_borderwidth' :2,
'hull_relief' :'sunken',
'hull_background' :'red',
'widget_text' :'simple',
'widgy_foreground' :'red',
'initsimple1' :'initsimple1_new',
}
tests = (
(c.pack, ()),
(c.components, (), ['hull', 'widget']),
(c.componentaliases, (), [('widgy', 'widget'),]),
(c.options, (), [('initsimple1', 'initsimple1', 1), ('initsimple2', 'initsimple2', 1), ('optsimple1', 'optsimple1', 0), ('optsimple2', 'optsimple2', 0)]),
(c.cget, 'initsimple1', 'initsimple1_new'),
(c.cget, 'initsimple2', 'initsimple2'),
(c.cget, 'optsimple1', 'optsimple1'),
(c.cget, 'optsimple2', 'optsimple2'),
(c.cget, 'widget_foreground', 'red'),
('optsimple1', 'optsimple1_new'),
(c.cget, 'optsimple1', 'optsimple1_new'),
)
testData = testData + ((c, ((tests, kw_1),)),)
c = Complex
kw_1 = {
'hull_borderwidth' : 2,
'hull_relief' : 'sunken',
'hull_background' : 'red',
'simple_widget_text' : 'complex',
'widget_foreground' : 'yellow',
}
tests = (
(c.pack, ()),
(c.components, (), ['hull', 'simple']),
(c.componentaliases, (), [('widget', 'simple_widget'),]),
(c.options, (), [('initcomplex1', 'initcomplex1', 1), ('initcomplex2', 'initcomplex2', 1), ('optcomplex1', 'optcomplex1', 0), ('optcomplex2', 'optcomplex2', 0)]),
)
testData = testData + ((c, ((tests, kw_1),)),)
c = Base
kw_1 = {
'hull_borderwidth' : 2,
'hull_relief' : 'sunken',
'hull_background' : 'red',
'basesimple_widget_text' : 'base',
'widget_foreground' : 'green',
'initbase1' : 'initbase1_new',
}
tests = (
(c.pack, ()),
(c.components, (), ['basesimple', 'child', 'groupie', 'hull']),
(c.componentaliases, (), [('widget', 'basesimple_widget'),]),
(c.options, (), [('initbase1', 'initbase1', 1), ('initbase2', 'initbase2', 1), ('initbase3', 'initbase3', 1), ('optbase1', 'optbase1', 0), ('optbase2', 'optbase2', 0), ('optbase3', 'optbase3', 0)]),
(c.cget, 'widget_foreground', 'green'),
(c.cget, 'basesimple_widget_foreground', 'green'),
(c.cget, 'basesimple_widgy_foreground', 'green'),
('widget_foreground', 'blue'),
(c.cget, 'widget_foreground', 'blue'),
(c.cget, 'basesimple_widget_foreground', 'blue'),
(c.cget, 'basesimple_widgy_foreground', 'blue'),
(c.cget, 'optbase1', 'optbase1'),
(c.cget, 'groupie_text', 'XXXXX'),
# When Test created the widget, it performed a test where it configured
# each option. Hence, _optbase1() has been called twice:
(c.getbasedummy, (), ['optbase1', 'optbase1']),
('optbase1', 'basedummy_new'),
(c.getbasedummy, (), ['optbase1', 'optbase1', 'basedummy_new']),
)
testData = testData + ((c, ((tests, kw_1),)),)
c = Derived
kw_1 = {
'hull_borderwidth' : 2,
'hull_relief' : 'sunken',
'hull_background' : 'red',
'basesimple_widget_text' : 'base simple',
'derivedcomplex_widget_text' : 'derived complex',
'initderived1' : 'initderived1_new',
'initbase1' : 'initbase1_new',
'optbase3' : 'optbase3_new',
'derivedcomplex_initcomplex1' : 'derivedcomplex_initcomplex1',
'derivedsimple_initsimple1' : 'derivedsimple_initsimple1',
'hull_cursor' : 'gumby',
'Mygroup_borderwidth' : 2,
'Mygroup_relief' : 'ridge',
}
tests = (
(c.pack, ()),
(c.components, (), ['basesimple', 'child', 'derivedcomplex', 'groupie', 'hull']),
(c.componentaliases, (), [('derivedsimple', 'derivedcomplex_simple'), ('widget', 'basesimple_widget'),]),
(c.options, (), [('initbase1', 'initbase1', 1), ('initbase2', 'initbase2inderived', 1), ('initbase3', 'initbase3', 1), ('initderived1', 'initderived1', 1), ('initderived2', 'initderived2', 1), ('optbase1', 'optbase1', 0), ('optbase2', 'optbase2', 0), ('optbase3', 'optbase3', 0), ('optderived1', 'optderived1', 0), ('optderived2', 'optderived2', 0), ]),
(c.getbasedummy, (), []),
(c.getderiveddummy, (), ['optbase1', 'optbase1']),
('optbase1', 'derivedbasedummy_new'),
(c.getbasedummy, (), []),
(c.getderiveddummy, (), ['optbase1', 'optbase1', 'derivedbasedummy_new']),
(c.cget, 'optbase3', 'optbase3_new'),
('optbase3', 'optbase3_newer'),
(c.cget, 'optbase3', 'optbase3_newer'),
(c.cget, 'optderived1', 'optderived1'),
(c.cget, 'initderived1', 'initderived1_new'),
(c.cget, 'initbase2', 'initbase2inderived'),
(c.cget, 'initbase1', 'initbase1_new'),
(c.cget, 'initbase3', 'initbase3'),
(c.cget, 'groupie_text', 'YYYYY'),
('groupie_text', 'ZZZZZ'),
(c.cget, 'groupie_text', 'ZZZZZ'),
(c.cget, 'derivedcomplex_optcomplex1', 'optcomplex1'),
('derivedcomplex_optcomplex1', 'optcomplex1_new'),
(c.cget, 'derivedcomplex_optcomplex1', 'optcomplex1_new'),
(c.cget, 'derivedsimple_optsimple2', 'optsimple2'),
('derivedsimple_optsimple2', 'optsimple2_new'),
(c.cget, 'derivedcomplex_simple_optsimple2', 'optsimple2_new'),
('derivedcomplex_simple_optsimple2', 'optsimple2_newer'),
(c.cget, 'derivedsimple_optsimple2', 'optsimple2_newer'),
(c.cget, 'hull_cursor', 'gumby'),
(c.cget, 'groupie_relief', 'ridge'),
(c.cget, 'Mygroup_relief', 'ridge'),
('Mygroup_relief', 'sunken'),
(c.cget, 'groupie_relief', 'sunken'),
(c.cget, 'Mygroup_relief', 'sunken'),
('groupie_relief', 'groove'),
(c.cget, 'groupie_relief', 'groove'),
)
testData = testData + ((c, ((tests, kw_1),)),)
if __name__ == '__main__':
#Test.setverbose(1)
Test.runTests(testData)
| apache-2.0 |
jylaxp/django | django/core/management/commands/startproject.py | 503 | 1359 | from importlib import import_module
from django.core.management.base import CommandError
from django.core.management.templates import TemplateCommand
from django.utils.crypto import get_random_string
class Command(TemplateCommand):
help = ("Creates a Django project directory structure for the given "
"project name in the current directory or optionally in the "
"given directory.")
missing_args_message = "You must provide a project name."
def handle(self, **options):
project_name, target = options.pop('name'), options.pop('directory')
self.validate_name(project_name, "project")
# Check that the project_name cannot be imported.
try:
import_module(project_name)
except ImportError:
pass
else:
raise CommandError("%r conflicts with the name of an existing "
"Python module and cannot be used as a "
"project name. Please try another name." %
project_name)
# Create a random SECRET_KEY to put it in the main settings.
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
options['secret_key'] = get_random_string(50, chars)
super(Command, self).handle('project', project_name, target, **options)
| bsd-3-clause |
julianprabhakar/eden_car | modules/unit_tests/s3/s3validators.py | 6 | 9762 | # -*- coding: utf-8 -*-
#
# Validators Unit Tests
#
# To run this script use:
# python web2py.py -S eden -M -R applications/eden/modules/unit_tests/s3/s3validators.py
#
import unittest
from gluon import current
from s3.s3fields import *
from s3.s3validators import *
# =============================================================================
class ISLatTest(unittest.TestCase):
"""
Latitude has to be in decimal degrees between -90 & 90
- we can convert D/M/S or D°M'S" format into decimal degrees:
Zero padded, separated by spaces or : or (d, m, s) or (°, ', ") or run together and followed by cardinal direction initial (N,S) Note: Only seconds can have decimals places. A decimal point with no trailing digits is invalid.
Matches
40:26:46N | 40°26'47?N | 40d 26m 47s N | 90 00 00.0 | 89 59 50.4141 S | 00 00 00.0
Non-Matches
90 00 00.001 N | 9 00 00.00 N | 9 00 00.00 | 90 61 50.4121 S | -90 48 50. N | 90 00 00. N | 00 00 00.
"""
pass
# =============================================================================
class ISLonTest(unittest.TestCase):
"""
Longitude has to be in decimal degrees between -180 & 180
- we can convert D/M/S format into decimal degrees
Zero padded, separated by spaces or : or (d, m, s) or (°, ', ") or run together and followed by cardinal direction initial (E,W) Note: Only seconds can have decimals places. A decimal point with no trailing digits is invalid.
Matches
079:56:55W | 079°58'36?W | 079d 58' 36? W | 180 00 00.0 | 090 29 20.4 E | 000 00 00.0
Non-Matches
180 00 00.001 E | 79 00 00.00 E | -79 00 00.00 | 090 29 20.4 E | -090 29 20.4 E | 180 00 00. E | 000 00 00.
"""
pass
# =============================================================================
class ISONEOFLazyRepresentationTests(unittest.TestCase):
def setUp(self):
s3db = current.s3db
current.auth.override = True
current.deployment_settings.org.branches = True
orgs = [Storage(name="ISONEOF%s" % i,
acronym="IOO%s" % i)
for i in xrange(5)]
ids = []
table = s3db.org_organisation
for org in orgs:
org_id = table.insert(**org)
org["id"] = org_id
s3db.update_super(table, org)
ids.append(org_id)
self.ids = ids
self.orgs = orgs
# -------------------------------------------------------------------------
def testIsOneOfBuildSet(self):
renderer = S3Represent(lookup="org_organisation")
db = current.db
table = current.s3db.org_organisation
validator = IS_ONE_OF(db(table.id.belongs(self.ids)),
"org_organisation.id",
renderer)
options = Storage(validator.options())
for org in self.orgs:
self.assertTrue(str(org.id) in options)
self.assertEqual(options[str(org.id)], org.name)
self.assertEqual(renderer.queries, 0)
# -------------------------------------------------------------------------
def testOrgOrganisationRepresent(self):
s3db = current.s3db
renderer = s3db.org_OrganisationRepresent()
db = current.db
table = s3db.org_organisation
validator = IS_ONE_OF(db(table.id.belongs(self.ids)),
"org_organisation.id",
renderer)
options = Storage(validator.options())
for org in self.orgs:
self.assertTrue(str(org.id) in options)
self.assertEqual(options[str(org.id)],
"%s (%s)" % (org.name, org.acronym))
self.assertEqual(renderer.queries, 1) # using custom query
renderer = s3db.org_OrganisationRepresent(parent=False)
db = current.db
table = current.s3db.org_organisation
validator = IS_ONE_OF(db(table.id.belongs(self.ids)),
"org_organisation.id",
renderer)
options = Storage(validator.options())
for org in self.orgs:
self.assertTrue(str(org.id) in options)
self.assertEqual(options[str(org.id)],
"%s (%s)" % (org.name, org.acronym))
self.assertEqual(renderer.queries, 0) # using default query
renderer = s3db.org_OrganisationRepresent(parent=False,
acronym=False)
db = current.db
table = current.s3db.org_organisation
validator = IS_ONE_OF(db(table.id.belongs(self.ids)),
"org_organisation.id",
renderer)
options = Storage(validator.options())
for org in self.orgs:
self.assertTrue(str(org.id) in options)
self.assertEqual(options[str(org.id)], org.name)
self.assertEqual(renderer.queries, 0) # using default query
# -------------------------------------------------------------------------
def tearDown(self):
current.deployment_settings.org.branches = False
current.auth.override = False
current.db.rollback()
# =============================================================================
class IS_PHONE_NUMBER_Tests(unittest.TestCase):
""" Test IS_PHONE_NUMBER single phone number validator """
def testStandardNotationRequirement(self):
""" Test phone number validation with standard notation requirement """
assertEqual = self.assertEqual
assertNotEqual = self.assertNotEqual
validate = IS_PHONE_NUMBER(international=False)
number = "(021) 3847589"
value, error = validate(number)
assertEqual(error, None)
assertEqual(value, "(021) 3847589")
number = "0049-681-5049321"
value, error = validate(number)
assertEqual(error, None)
assertEqual(value, "0049-681-5049321")
number = " 1-992-883742"
value, error = validate(number)
assertEqual(error, None)
assertEqual(value, "1-992-883742")
number = "1.123.736489"
value, error = validate(number)
assertEqual(error, None)
assertEqual(value, "1.123.736489")
number = "+44848958493 "
value, error = validate(number)
assertEqual(error, None)
assertEqual(value, "+44848958493")
number = "(021) 3ADF589"
value, error = validate(number)
assertNotEqual(error, None)
number = "Test"
value, error = validate(number)
assertNotEqual(error, None)
# @todo: this is still recognized as valid, as is "-1"
#number = "1"
#value, error = validate(number)
#assertNotEqual(error, None)
number = "+44848958493/+44736282167"
value, error = validate(number)
assertNotEqual(error, None)
number = None
value, error = validate(number)
assertNotEqual(error, None)
number = ""
value, error = validate(number)
assertNotEqual(error, None)
def testInternationalFormat(self):
""" Test phone number validation with international notation requirement """
# Store current setting
settings = current.deployment_settings
current_setting = settings.get_msg_require_international_phone_numbers()
assertEqual = self.assertEqual
assertNotEqual = self.assertNotEqual
validate = IS_PHONE_NUMBER(international=True)
# Turn on notation requirement globally
settings.msg.require_international_phone_numbers = True
number = "+46-73-3847589"
value, error = validate(number)
assertEqual(error, None)
assertEqual(value, "+46733847589")
number = "+49.681.5049321"
value, error = validate(number)
assertEqual(error, None)
assertEqual(value, "+496815049321")
number = "+1992883742"
value, error = validate(number)
assertEqual(error, None)
assertEqual(value, "+1992883742")
number = "(021) 36374589"
value, error = validate(number)
assertNotEqual(error, None)
number = "Test"
value, error = validate(number)
assertNotEqual(error, None)
number = "1-364-283745"
value, error = validate(number)
assertNotEqual(error, None)
number = None
value, error = validate(number)
assertNotEqual(error, None)
number = ""
value, error = validate(number)
assertNotEqual(error, None)
# Turn off notation requirement globally
settings.msg.require_international_phone_numbers = False
number = "1-364-283745"
value, error = validate(number)
assertEqual(error, None)
assertEqual(value, "1-364-283745")
# Restore current setting
settings.msg.require_international_phone_numbers = current_setting
# =============================================================================
def run_suite(*test_classes):
""" Run the test suite """
loader = unittest.TestLoader()
suite = unittest.TestSuite()
for test_class in test_classes:
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
if suite is not None:
unittest.TextTestRunner(verbosity=2).run(suite)
return
if __name__ == "__main__":
run_suite(
ISLatTest,
ISLonTest,
ISONEOFLazyRepresentationTests,
IS_PHONE_NUMBER_Tests,
)
# END ========================================================================
| mit |
shahankhatch/scikit-learn | sklearn/decomposition/dict_learning.py | 104 | 44632 | """ Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=False, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
clf = Lasso(alpha=alpha, fit_intercept=False, normalize=False,
precompute=gram, max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=False)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
lars = Lars(fit_intercept=False, verbose=False, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
new_code = orthogonal_mp_gram(gram, cov, regularization, None,
row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threhold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
# Transposing product to ensure Fortran ordering
gram = np.dot(dictionary, dictionary.T).T
if cov is None and algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
code = _sparse_encode(X,
dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init,
max_iter=max_iter)
# This ensure that dimensionality of code is always 2,
# consistant with the case n_jobs > 1
if code.ndim == 1:
code = code[np.newaxis, :]
return code
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram,
cov[:, this_slice] if cov is not None else None,
algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print ("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
dictionary = check_array(dictionary.T, order='F', dtype=np.float64,
copy=False)
X_train = check_array(X_train, order='C', dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
| bsd-3-clause |
dgarros/ansible | lib/ansible/modules/monitoring/zabbix_group.py | 35 | 7601 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2013-2014, Epic Games, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zabbix_group
short_description: Zabbix host groups creates/deletes
description:
- Create host groups if they do not exist.
- Delete existing host groups if they exist.
version_added: "1.8"
author:
- "(@cove)"
- "Tony Minfei Ding"
- "Harrison Gu (@harrisongu)"
requirements:
- "python >= 2.6"
- zabbix-api
options:
server_url:
description:
- Url of Zabbix server, with protocol (http or https).
C(url) is an alias for C(server_url).
required: true
aliases: [ "url" ]
login_user:
description:
- Zabbix user name.
required: true
login_password:
description:
- Zabbix user password.
required: true
http_login_user:
description:
- Basic Auth login
required: false
default: None
version_added: "2.1"
http_login_password:
description:
- Basic Auth password
required: false
default: None
version_added: "2.1"
state:
description:
- Create or delete host group.
required: false
default: "present"
choices: [ "present", "absent" ]
timeout:
description:
- The timeout of API request(seconds).
default: 10
host_groups:
description:
- List of host groups to create or delete.
required: true
aliases: [ "host_group" ]
notes:
- Too many concurrent updates to the same group may cause Zabbix to return errors, see examples for a workaround if needed.
'''
EXAMPLES = '''
# Base create host groups example
- name: Create host groups
local_action:
module: zabbix_group
server_url: http://monitor.example.com
login_user: username
login_password: password
state: present
host_groups:
- Example group1
- Example group2
# Limit the Zabbix group creations to one host since Zabbix can return an error when doing concurrent updates
- name: Create host groups
local_action:
module: zabbix_group
server_url: http://monitor.example.com
login_user: username
login_password: password
state: present
host_groups:
- Example group1
- Example group2
when: inventory_hostname==groups['group_name'][0]
'''
try:
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
from zabbix_api import Already_Exists
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
class HostGroup(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
# create host group(s) if not exists
def create_host_group(self, group_names):
try:
group_add_list = []
for group_name in group_names:
result = self._zapi.hostgroup.get({'filter': {'name': group_name}})
if not result:
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.hostgroup.create({'name': group_name})
group_add_list.append(group_name)
except Already_Exists:
return group_add_list
return group_add_list
except Exception as e:
self._module.fail_json(msg="Failed to create host group(s): %s" % e)
# delete host group(s)
def delete_host_group(self, group_ids):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.hostgroup.delete(group_ids)
except Exception as e:
self._module.fail_json(msg="Failed to delete host group(s), Exception: %s" % e)
# get group ids by name
def get_group_ids(self, host_groups):
group_ids = []
group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': host_groups}})
for group in group_list:
group_id = group['groupid']
group_ids.append(group_id)
return group_ids, group_list
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True, aliases=['url']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
http_login_user=dict(type='str',required=False, default=None),
http_login_password=dict(type='str',required=False, default=None, no_log=True),
host_groups=dict(type='list', required=True, aliases=['host_group']),
state=dict(default="present", choices=['present','absent']),
timeout=dict(type='int', default=10)
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing required zabbix-api module (check docs or install with: pip install zabbix-api)")
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
host_groups = module.params['host_groups']
state = module.params['state']
timeout = module.params['timeout']
zbx = None
# login to zabbix
try:
zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
zbx.login(login_user, login_password)
except Exception as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
hostGroup = HostGroup(module, zbx)
group_ids = []
group_list = []
if host_groups:
group_ids, group_list = hostGroup.get_group_ids(host_groups)
if state == "absent":
# delete host groups
if group_ids:
delete_group_names = []
hostGroup.delete_host_group(group_ids)
for group in group_list:
delete_group_names.append(group['name'])
module.exit_json(changed=True,
result="Successfully deleted host group(s): %s." % ",".join(delete_group_names))
else:
module.exit_json(changed=False, result="No host group(s) to delete.")
else:
# create host groups
group_add_list = hostGroup.create_host_group(host_groups)
if len(group_add_list) > 0:
module.exit_json(changed=True, result="Successfully created host group(s): %s" % group_add_list)
else:
module.exit_json(changed=False)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
EarthTeam/earthteam.net | sites/all/modules/olfp/lib/OL-FeaturePopups/doc/_scripts/split-classes.py | 5 | 1689 | #!/usr/bin/env python
import re
import os
import sys
import shutil
def splitClasses (inputFilename, outputDirectory, clear):
print "Splitting classes, input: %s output: %s " % (inputFilename, outputDirectory)
if not os.path.isfile(inputFilename):
print "\nProcess aborted due to errors."
sys.exit('ERROR: Input file "%s" does not exist!' % inputFilename)
if clear == "Y" and os.path.isdir(outputDirectory):
try:
shutil.rmtree(outputDirectory, False)
except Exception, E:
print "\nAbnormal termination: Unable to clear or create working folder \"%s\"," % outputDirectory
print " check if there is a process that blocks the folder."
sys.exit("ERROR: %s" % E)
pathName, fileName = os.path.split(inputFilename)
pathName = os.path.join(outputDirectory, pathName.replace("../",""))
if not os.path.exists(pathName):
print "Creating folder:", pathName
os.makedirs(pathName)
fileName, fileExt = os.path.splitext(fileName)
fIn = open(inputFilename)
sourceIn = fIn.read()
fIn.close()
sourceOut = re.split('/\*\* *\r*\n *\* *(Class: |Namespace: )', sourceIn)
for i in range(1, len(sourceOut), 2): #. enumerate(sourceOut):
outputFileName = os.path.join(pathName, fileName + "-" + format(i/2) + fileExt)
print "Splited to:", outputFileName
fOut = open(outputFileName,"w")
fOut.write("/**\n * " + sourceOut[i] + sourceOut[i + 1])
fOut.close()
print "Done!"
# -----------------
# main
# -----------------
if __name__ == '__main__':
splitClasses(sys.argv[1], sys.argv[2], sys.argv[3])
| gpl-2.0 |
ltilve/ChromiumGStreamerBackend | ios/crnet/build.py | 33 | 9894 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import shutil
import subprocess
import sys
import tempfile
import time
SUPPORTED_ARCHES = ['i386', 'x86_64', 'armv7', 'arm64']
class SubprocessError(Exception):
pass
class ConfigurationError(Exception):
pass
def out_directories(root):
"""Returns all output directories containing crnet objects under root.
Currently this list is just hardcoded.
Args:
root: prefix for output directories.
"""
out_dirs = ['Release-iphoneos', 'Release-iphonesimulator']
return map(lambda x: os.path.join(root, 'out', x), out_dirs)
def check_command(command):
"""Runs a command, raising an exception if it fails.
If the command returns a nonzero exit code, prints any data the command
emitted on stdout and stderr.
Args:
command: command to execute, in argv format.
Raises:
SubprocessError: the specified command returned nonzero exit status.
"""
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
return
message = 'Command failed: {0} (status {1})'.format(command, p.returncode)
print message
print 'stdout: {0}'.format(stdout)
print 'stderr: {0}'.format(stderr)
raise SubprocessError(message)
def file_contains_string(path, string):
"""Returns whether the file named by path contains string.
Args:
path: path of the file to search.
string: string to search the file for.
Returns:
True if file contains string, False otherwise.
"""
with open(path, 'r') as f:
for line in f:
if string in line:
return True
return False
def is_object_filename(filename):
"""Returns whether the given filename names an object file.
Args:
filename: filename to inspect.
Returns:
True if filename names an object file, false otherwise.
"""
(_, ext) = os.path.splitext(filename)
return ext in ('.a', '.o')
class Step(object):
"""Represents a single step of the crnet build process.
This parent class exists only to define the interface Steps present and keep
track of elapsed time for each step. Subclasses of Step should override the
run() method, which is called internally by start().
Attributes:
name: human-readable name of this step, used in debug output.
started_at: seconds since epoch that this step started running at.
"""
def __init__(self, name):
self._name = name
self._started_at = None
self._ended_at = None
@property
def name(self):
return self._name
def start(self):
"""Start running this step.
This method keeps track of how long the run() method takes to run and emits
the elapsed time after run() returns.
"""
self._started_at = time.time()
print '{0}: '.format(self._name),
sys.stdout.flush()
self._run()
self._ended_at = time.time()
print '{0:.2f}s'.format(self._ended_at - self._started_at)
def _run(self):
"""Actually run this step.
Subclasses should override this method to implement their own step logic.
"""
raise NotImplementedError
class CleanStep(Step):
"""Clean the build output directories.
This step deletes intermediates generated by the build process. Some of these
intermediates (crnet_consumer.app and crnet_resources.bundle) are directories,
which contain files ninja doesn't know and hence won't remove, so the run()
method here explicitly deletes those directories before running 'ninja -t
clean'.
Attributes:
dirs: list of output directories to clean.
"""
def __init__(self, root):
super(CleanStep, self).__init__('clean')
self._dirs = out_directories(root)
def _run(self):
"""Runs the clean step.
Deletes crnet_consumer.app and crnet_resources.bundle in each output
directory and runs 'ninja -t clean' in each output directory.
"""
for d in self._dirs:
if os.path.exists(os.path.join(d, 'crnet_consumer.app')):
shutil.rmtree(os.path.join(d, 'crnet_consumer.app'))
if os.path.exists(os.path.join(d, 'crnet_resources.bundle')):
shutil.rmtree(os.path.join(d, 'crnet_resources.bundle'))
check_command(['ninja', '-C', d, '-t', 'clean'])
class HooksStep(Step):
"""Validates the gyp config and reruns gclient hooks.
Attributes:
root: directory to find gyp config under.
"""
def __init__(self, root):
super(HooksStep, self).__init__('hooks')
self._root = root
def _run(self):
"""Runs the hooks step.
Checks that root/build/common.gypi contains target_subarch = both in a crude
way, then calls 'gclient runhooks'. TODO(ellyjones): parse common.gypi in a
more robust way.
Raises:
ConfigurationError: if target_subarch != both
"""
common_gypi = os.path.join(self._root, 'build', 'common.gypi')
if not file_contains_string(common_gypi, "'target_subarch%': 'both'"):
raise ConfigurationError('target_subarch must be both in {0}'.format(
common_gypi))
check_command(['gclient', 'runhooks'])
class BuildStep(Step):
"""Builds all the intermediate crnet binaries.
All the hard work of this step is done by ninja; this step just shells out to
ninja to build the crnet_pack target.
Attributes:
dirs: output directories to run ninja in.
"""
def __init__(self, root):
super(BuildStep, self).__init__('build')
self._dirs = out_directories(root)
def _run(self):
"""Runs the build step.
For each output directory, run ninja to build the crnet_pack target in that
directory.
"""
for d in self._dirs:
check_command(['ninja', '-C', d, 'crnet_pack'])
class PackageStep(Step):
"""Packages the built object files for release.
The release format is a tarball, containing one gzipped tarball per
architecture and a manifest file, which lists metadata about the build.
Attributes:
outdirs: directories containing built object files.
workdir: temporary working directory. Deleted at end of the step.
archdir: temporary directory under workdir. Used for collecting per-arch
binaries.
proddir: temporary directory under workdir. Used for intermediate per-arch
tarballs.
"""
def __init__(self, root, outfile):
super(PackageStep, self).__init__('package')
self._outdirs = out_directories(root)
self._outfile = outfile
def _run(self):
"""Runs the package step.
Packages each architecture from |root| into an individual .tar.gz file, then
packages all the .tar.gz files into one .tar file, which is written to
|outfile|.
"""
(workdir, archdir, proddir) = self.create_work_dirs()
for arch in SUPPORTED_ARCHES:
self.package_arch(archdir, proddir, arch)
self.package(proddir)
shutil.rmtree(workdir)
def create_work_dirs(self):
"""Creates working directories and returns their paths."""
workdir = tempfile.mkdtemp()
archdir = os.path.join(workdir, 'arch')
proddir = os.path.join(workdir, 'prod')
os.mkdir(archdir)
os.mkdir(proddir)
return (workdir, archdir, proddir)
def object_files_for_arch(self, arch):
"""Returns a list of object files for the given architecture.
Under each outdir d, per-arch files are stored in d/arch, and object files
for a given arch contain the arch's name as a substring.
Args:
arch: architecture name. Must be in SUPPORTED_ARCHES.
Returns:
List of full pathnames to object files in outdirs for the named arch.
"""
arch_files = []
for d in self._outdirs:
files = os.listdir(os.path.join(d, 'arch'))
for f in filter(is_object_filename, files):
if arch in f:
arch_files.append(os.path.join(d, 'arch', f))
return arch_files
def package_arch(self, archdir, proddir, arch):
"""Packages an individual architecture.
Copies all the object files for the specified arch into a working directory
under self.archdir, then tars them up into a gzipped tarball under
self.proddir.
Args:
archdir: directory to stage architecture files in.
proddir: directory to stage result tarballs in.
arch: architecture name to package. Must be in SUPPORTED_ARCHES.
"""
arch_files = self.object_files_for_arch(arch)
os.mkdir(os.path.join(archdir, arch))
for f in arch_files:
shutil.copy(f, os.path.join(archdir, arch))
out_filename = os.path.join(proddir, '{0}.tar.gz'.format(arch))
check_command(['tar', '-C', archdir, '-czf', out_filename, arch])
def package(self, proddir):
"""Final packaging step. Packages all the arch tarballs into one tarball."""
arch_tarballs = []
for a in SUPPORTED_ARCHES:
arch_tarballs.append('{0}.tar.gz'.format(a))
check_command(['tar', '-C', proddir, '-cf', self._outfile] +
arch_tarballs)
def main():
step_classes = {
'clean': lambda: CleanStep(args.rootdir),
'hooks': lambda: HooksStep(args.rootdir),
'build': lambda: BuildStep(args.rootdir),
'package': lambda: PackageStep(args.rootdir, args.outfile)
}
parser = argparse.ArgumentParser(description='Build and package crnet.')
parser.add_argument('--outfile', dest='outfile', default='crnet.tar',
help='Output file to generate (default: crnet.tar)')
parser.add_argument('--rootdir', dest='rootdir', default='../..',
help='Root directory to build from (default: ../..)')
parser.add_argument('steps', metavar='step', nargs='*')
args = parser.parse_args()
step_names = args.steps or ['clean', 'hooks', 'build', 'package']
steps = [step_classes[x]() for x in step_names]
for step in steps:
step.start()
if __name__ == '__main__':
main()
| bsd-3-clause |
umitproject/packet-manipulator | umit/pm/gui/core/app.py | 2 | 5811 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2008, 2009 Adriano Monteiro Marques
#
# Author: Francesco Piccinno <stack.box@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This module contains the PMApp singleton object that gives access to
the all PacketManipulator functionalities
"""
import gtk
import gobject
import os
import sys
from optparse import OptionParser
from umit.pm.core.i18n import _
from umit.pm.core.atoms import Singleton
from umit.pm.core.bus import services_boot, ServiceBus
from umit.pm.gui.core.splash import SplashScreen
from umit.pm.gui.plugins.engine import PluginEngine
from umit.pm.manager.preferencemanager import Prefs
class PMApp(Singleton):
"The PacketManipulator application singleton object"
def __init__(self, args):
"""
PacketManipulator Application class
@param args pass sys.argv
"""
gobject.threads_init()
self._args = args
root = False
try:
# FIXME: add maemo
if sys.platform == 'win32':
import ctypes
root = bool(ctypes.windll.shell32.IsUserAnAdmin())
elif os.getuid() == 0:
root = True
except: pass
if Prefs()['system.check_pyver'].value == True and \
sys.version_info[1] < 6:
dialog = gtk.MessageDialog(None, gtk.DIALOG_MODAL,
gtk.MESSAGE_WARNING,
gtk.BUTTONS_YES_NO,
_('Packet Manipulator requires at least '
'2.6 version of Python but you\'ve %s '
'installed. We not guarantee that all '
'functionalities works properly.\n\n'
'Do you want to continue?') % ".".join(
map(str, sys.version_info[:3])))
ret = dialog.run()
dialog.hide()
dialog.destroy()
if ret == gtk.RESPONSE_NO:
sys.exit(-1)
if Prefs()['system.check_root'].value == True and not root:
dialog = gtk.MessageDialog(None, gtk.DIALOG_MODAL,
gtk.MESSAGE_WARNING,
gtk.BUTTONS_YES_NO,
_('You are running Packet Manipulator as'
' non-root user!\nSome functionalities'
' need root privileges to work\n\nDo '
'you want to continue?'))
ret = dialog.run()
dialog.hide()
dialog.destroy()
if ret == gtk.RESPONSE_NO:
sys.exit(-1)
self.phase = 0
self.splash = SplashScreen()
def _idle(self):
if self.phase == 0:
self.splash.text = _("Registering icons ...")
from icons import register_icons
register_icons()
elif self.phase == 1:
self.splash.text = _("Loading preferences ...")
from umit.pm.manager.preferencemanager import Prefs
self.prefs = Prefs()
elif self.phase == 2:
services_boot()
self.splash.text = _("Creating main window ...")
from mainwindow import MainWindow
self.bus = ServiceBus()
self.main_window = MainWindow()
self.main_window.connect_tabs_signals()
self.plugin_engine = PluginEngine()
self.plugin_engine.load_selected_plugins()
# Destroy the splash screen
self.splash.hide()
self.splash.destroy()
self.splash.finished = True
del self.splash
# Now let's parse the args passed in the constructor
parser = self._create_parser()
options, args = parser.parse_args(self._args)
if options.fread:
self.main_window.open_generic_file_async(options.fread)
if options.audit:
dev1, dev2, bpf_filter = options.audit, '', ''
try:
dev1, dev2 = options.audit.split(',', 1)
dev2, bpf_filter = other.split(':', 1)
except:
pass
self.main_window.start_new_audit(dev1, dev2, bpf_filter, False, False)
return False
self.phase += 1
return True
def run(self):
self.splash.show_all()
gobject.idle_add(self._idle)
gtk.main()
def _create_parser(self):
"""
@return an OptionParser object that can handle the sys.argv passed in
the constructor.
"""
opt = OptionParser()
opt.add_option('-r', None, dest="fread",
help="Read packets/sequence from file.")
opt.add_option('-a', None, dest="audit",
help="Start an audit using intf1[,intf2][:bpf_filter]")
return opt
| gpl-2.0 |
noba3/KoTos | addons/plugin.video.genesis/resources/lib/indexers/phstreams.py | 28 | 28077 | # -*- coding: utf-8 -*-
'''
Phoenix Add-on
Copyright (C) 2015 Blazetamer
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os,re,sys,urllib,urlparse
from resources.lib.libraries import cache
from resources.lib.libraries import cachemeta
from resources.lib.libraries import control
from resources.lib.libraries import client
from resources.lib.libraries import workers
from resources.lib.libraries import views
phLink = 'http://mecca.watchkodi.com/phstreams.xml'
phSearch = 'http://%s/search/search.xml'
phTest = 'testings.xml'
def getCategory():
getDirectory('0', phLink, '0', '0', '0', '0', '0', close=False)
addCategoryItem('NHL', 'nhlDirectory', 'hockey.jpg')
addCategoryItem(control.lang(30701).encode('utf-8'), 'openSettings', 'settings.png')
addCategoryItem(control.lang(30721).encode('utf-8'), 'downloader', 'downloader.png')
addCategoryItem(control.lang(30702).encode('utf-8'), 'search', 'search.png')
if phTest in control.listDir(control.dataPath)[1]:
addCategoryItem('Testings', 'localDirectory', 'home.png')
endCategory()
def localDirectory():
getDirectory('0', os.path.join(control.dataPath, phTest), '0', '0', '0', '0', '0', local=True)
def getDirectory(name, url, audio, image, fanart, playable, content, close=True, local=False):
if local == True:
f = control.openFile(url) ; result = f.read() ; f.close()
else:
result = cache.get(client.request, 0, url)
result = str(result).replace('\r','').replace('\n','').replace('\t','').replace(' ','')
try: fanart = re.findall('<fanart>(.+?)</fanart>', result)[0]
except: fanart = '0'
try:
notify = re.compile('<notify>(.+?)</notify>').findall(result)[0]
vip = re.findall('<poster>(.+?)</poster>', result)[0]
if not re.search('[a-zA-Z]', vip): raise Exception()
def message(vip): return (vip+version)
check = cache.get(message, 600000000, vip, table='rel_vip')
version = re.findall('<new>(.+?)</new>', notify)[0]
if not version.isdigit(): raise Exception()
if check == (vip+version): raise Exception()
title = '[B]Announcement From %s![/B]' % vip
msg1 = re.findall('<message1>(.+?)</message1>', notify)[0]
msg2 = re.findall('<message2>(.+?)</message2>', notify)[0]
msg3 = re.findall('<message3>(.+?)</message3>', notify)[0]
check = cache.get(message, 0, vip, table='rel_vip')
control.dialog.ok(str(title), str(msg1), str(msg2), str(msg3))
except:
pass
infos = re.compile('<info>(.+?)</info>').findall(result)
for info in infos:
try:
name = re.findall('<message>(.+?)</message>', info)[0]
try: image = re.findall('<thumbnail>(.+?)</thumbnail>', info)[0]
except: image = '0'
addDirectoryItem(name, '0', '0', image, image, fanart, '0', '0', {})
except:
pass
popups = re.compile('<popup>(.+?)</popup>').findall(result)
for popup in popups:
try:
name = re.findall('<name>(.+?)</name>', popup)[0]
url = re.findall('<popImage>(.+?)</popImage>', popup)[0]
try: image = re.findall('<thumbnail>(.+?)</thumbnail>', popup)[0]
except: image = '0'
try: audio = re.findall('<sound>(.+?)</sound>', popup)[0]
except: audio = '0'
addDirectoryItem(name, url, 'openDialog', image, image, fanart, audio, '0', {})
except:
pass
special = re.compile('<name>([^<]+)</name><link>([^<]+)</link><thumbnail>([^<]+)</thumbnail><date>([^<]+)</date>').findall(result)
for name, url, image, date in special:
if re.search(r'\d+', date): name += ' [COLOR red] Updated %s[/COLOR]' % date
addDirectoryItem(name, url, 'ndmode', image, image, fanart, '0', '0', {})
special = re.compile('<name>([^<]+)</name><link>([^<]+)</link><thumbnail>([^<]+)</thumbnail><mode>([^<]+)</mode>').findall(result)
for name, url, image, action in special:
addDirectoryItem(name, url, action, image, image, fanart, '0', '0', {})
meta = False
try: content = re.findall('<meta>(.+?)</meta>', result)[0]
except: content = '0'
try: tvshow = re.findall('<tvshow>(.+?)</tvshow>', result)[0]
except: tvshow = '0'
if content in ['seasons', 'episodes'] and tvshow == '0':
content = '0'
if content in ['movies', 'tvshows'] and control.setting('meta') == 'true':
try:
from metahandler import metahandlers
metaget = metahandlers.MetaData(preparezip=False)
meta = True
except:
meta = False
elif content in ['seasons', 'episodes']:
try:
from metahandler import metahandlers
metaget = metahandlers.MetaData(preparezip=False)
#tvd = metaget.get_meta('tvshow', tvshow)
tvd = cachemeta.get(metaget.get_meta, 24, 'tvshow', tvshow, '', '', '')
except:
tvd = {}
dirs = re.compile('<dir>(.+?)</dir>').findall(result)
totalItems = len(dirs)
for dir in dirs:
try:
data = {}
name = re.findall('<name>(.+?)</name>', dir)[0]
url = re.findall('<link>(.+?)</link>', dir)[0]
try: image = re.findall('<thumbnail>(.+?)</thumbnail>', dir)[0]
except: image = '0'
try: fanart2 = re.findall('<fanart>(.+?)</fanart>', dir)[0]
except: fanart2 = fanart
if meta == True and content =='tvshows':
try:
title = cleantitle(name).encode('utf-8')
data = {'title': title, 'tvshowtitle': title}
#data = metaget.get_meta('tvshow', title)
data = cachemeta.get(metaget.get_meta, 24, 'tvshow', title, '', '', '')
metafanart = data['backdrop_url']
if not metafanart == '': fanart2 = metafanart
except:
pass
elif content =='tvshows':
try:
title = cleantitle(name).encode('utf-8')
data = {'title': title, 'tvshowtitle': title}
except:
pass
elif content =='seasons':
try:
title = cleantitle(tvshow).encode('utf-8')
data = {'title': title, 'tvshowtitle': title}
data.update(tvd)
metafanart = tvd['backdrop_url']
if not metafanart == '': fanart2 = metafanart
except:
pass
addDirectoryItem(name, url, 'ndmode', image, image, fanart2, '0', content, data, totalItems=totalItems)
except:
pass
items = re.compile('<item>(.+?)</item>').findall(result)
try: sort = re.findall('<sort>(.+?)</sort>', result)[0]
except: sort = ''
if sort == 'yes': items = sorted(items)
totalItems = len(items)
for item in items:
try:
data = {}
name = re.findall('<title>(.+?)</title>', item)[0]
url = re.findall('<link>(.+?)</link>', item)[0]
try: image = image2 = re.findall('<thumbnail>(.+?)</thumbnail>', item)[0]
except: image = image2 = '0'
try: fanart2 = re.findall('<fanart>(.+?)</fanart>', item)[0]
except: fanart2 = fanart
if meta == True and content == 'movies':
try:
title = cleantitle(name).encode('utf-8')
data = {'title': title}
title, year = re.compile('(.+?)[(](\d{4})[)]').findall(name)[0]
title = cleantitle(title).encode('utf-8')
data = {'title': title, 'year': year}
#data = metaget.get_meta('movie', title, year=year)
data = cachemeta.get(metaget.get_meta, 24, 'movie', title, '', '', year)
metaimage = data['cover_url']
if not metaimage == '': image = metaimage
metafanart = data['backdrop_url']
if not metafanart == '': fanart2 = metafanart
except:
pass
elif content =='movies':
try:
title = cleantitle(name).encode('utf-8')
data = {'title': title}
title, year = re.compile('(.+?)[(](\d{4})[)]').findall(name)[0]
title = cleantitle(title).encode('utf-8')
data = {'title': title, 'year': year}
except:
pass
elif content == 'episodes':
try:
title = cleantitle(name).encode('utf-8')
data = {'title': title, 'tvshowtitle': tvshow}
except:
pass
try:
i = cleaneptitle(tvshow, title)
title, season, episode = i[0].encode('utf-8'), i[1], i[2]
data = {'title': title, 'tvshowtitle': tvshow, 'season': season, 'episode': episode}
except:
pass
try:
data.update({'year': tvd['year'], 'imdb_id' : tvd['imdb_id'], 'tvdb_id' : tvd['tvdb_id'], 'tvshowtitle': tvd['TVShowTitle'], 'genre' : tvd['genre'], 'studio': tvd['studio'], 'status': tvd['status'], 'duration' : tvd['duration'], 'rating': tvd['rating'], 'mpaa' : tvd['mpaa'], 'plot': tvd['plot'], 'cast': tvd['cast']})
metafanart = tvd['backdrop_url']
if not metafanart == '': image = fanart2 = metafanart
except:
pass
if 'sublink' in url:
addDirectoryItem(name, url, 'subDirectory', image, image2, fanart2, '0', content, data, tvshow=tvshow, totalItems=totalItems, isFolder=True)
else:
addDirectoryItem(name, url, 'resolveUrl', image, image2, fanart2, '0', content, data, totalItems=totalItems, isFolder=False)
except:
pass
endDirectory(content, close)
def subDirectory(name, url, audio, image, fanart, playable, tvshow, content):
match = re.compile('<sublink>(.+?)</sublink>').findall(url)
if len(match) == 0: return
try:
title = cleantitle(name).encode('utf-8')
data = {'title': title}
except:
pass
try:
if not content == 'movies': raise Exception()
title = cleantitle(name).encode('utf-8')
data = {'title': title}
title, year = re.compile('(.+?)[(](\d{4})[)]').findall(name)[0]
title = cleantitle(title).encode('utf-8')
data = {'title': title, 'year': year}
from metahandler import metahandlers
metaget = metahandlers.MetaData(preparezip=False)
#data = metaget.get_meta('movie', title, year=year)
data = cachemeta.get(metaget.get_meta, 24, 'movie', title, '', '', year)
metaimage = data['cover_url']
if not metaimage == '': image = metaimage
metafanart = data['backdrop_url']
if not metafanart == '': fanart = metafanart
except:
pass
try:
if tvshow == '0' and not content == 'episodes': raise Exception()
try:
title = cleantitle(name).encode('utf-8')
data = {'title': title, 'tvshowtitle': tvshow}
except:
pass
try:
i = cleaneptitle(tvshow, title)
title, season, episode = i[0].encode('utf-8'), i[1], i[2]
data = {'title': title, 'tvshowtitle': tvshow, 'season': season, 'episode': episode}
except:
pass
from metahandler import metahandlers
metaget = metahandlers.MetaData(preparezip=False)
tvd = metaget.get_meta('tvshow', tvshow)
tvd = cachemeta.get(metaget.get_meta, 24, 'tvshow', tvshow, '', '', '')
data.update({'year': tvd['year'], 'imdb_id' : tvd['imdb_id'], 'tvdb_id' : tvd['tvdb_id'], 'tvshowtitle': tvd['TVShowTitle'], 'genre' : tvd['genre'], 'studio': tvd['studio'], 'status': tvd['status'], 'duration' : tvd['duration'], 'rating': tvd['rating'], 'mpaa' : tvd['mpaa'], 'plot': tvd['plot'], 'cast': tvd['cast']})
metafanart = tvd['backdrop_url']
if not metafanart == '': image = fanart = metafanart
except:
pass
for i in range(0, len(match)):
url = match[i]
label = '%s %s %s' % (name, control.lang(30704).encode('utf-8'), str(i+1))
addDirectoryItem(label, url, 'resolveUrl', image, image, fanart, '0', content, data, isFolder=False)
control.directory(int(sys.argv[1]), cacheToDisc=True)
def getSearch():
addDirectoryItem('%s...' % control.lang(30702).encode('utf-8'), '0', 'searchDirectory', '0', '0', '0', '0', '0', {})
addDirectoryItem(control.lang(30703).encode('utf-8'), '0', 'clearSearch', '0', '0', '0', '0', '0', {})
try:
def search(): return
result = cache.get(search, 600000000, table='rel_srch')
for q in result:
try: addDirectoryItem('%s...' % q, q, 'searchDirectory2', '0', '0', '0', '0', '0', {})
except: pass
except:
pass
control.directory(int(sys.argv[1]), cacheToDisc=True)
def searchDirectory(query=None):
if (query == None or query == ''):
keyboard = control.keyboard('', control.lang(30702).encode('utf-8'))
keyboard.doModal()
if not (keyboard.isConfirmed()): return
query = keyboard.getText()
if (query == None or query == ''): return
def search(): return [query]
result = cache.get(search, 600000000, table='rel_srch')
def search(): return [x for y,x in enumerate((result + [query])) if x not in (result + [query])[:y]]
result = cache.get(search, 0, table='rel_srch')
global global_search ; global_search = []
def worker(url): global_search.append(str(client.request(url)))
servers = client.request(phLink)
servers = str(servers).replace('\n','')
servers = re.findall('</name><link>(.+?)</link>', servers)
servers = [urlparse.urlparse(i).netloc for i in servers]
servers = [phSearch % i for i in servers if not 'mecca' in i]
threads = []
for server in servers: threads.append(workers.Thread(worker, server))
[i.start() for i in threads]
[i.join() for i in threads]
urls = global_search ; global_search = []
urls = [str(i).replace('\n','') for i in urls]
urls = [re.findall('<link>(.+?)</link>', i)[:30] for i in urls]
urls = sum(urls, [])
threads = []
for url in urls: threads.append(workers.Thread(worker, url))
[i.start() for i in threads]
[i.join() for i in threads]
links = global_search ; global_search = []
for link in links:
try:
link = str(link).replace('\r','').replace('\n','').replace('\t','').replace(' ','')
try: fanart = re.findall('<fanart>(.+?)</fanart>', link)[0]
except: fanart = '0'
try: vip = re.findall('<poster>(.+?)</poster>', link)[0]
except: vip = ''
if vip == 'Team Phoenix': vip = ''
try: content = re.findall('<meta>(.+?)</meta>', link)[0]
except: content = '0'
try: tvshow = re.findall('<tvshow>(.+?)</tvshow>', link)[0]
except: tvshow = '0'
if content in ['seasons', 'episodes'] and tvshow == '0':
content = '0'
dirs = re.compile('<dir>(.+?)</dir>').findall(link)
for dir in dirs:
try:
data = {}
name = re.findall('<name>(.+?)</name>', dir)[0]
name = cleantitle(name)
if not query.lower() in name.lower() : raise Exception()
url = re.findall('<link>(.+?)</link>', dir)[0]
try: image = re.findall('<thumbnail>(.+?)</thumbnail>', dir)[0]
except: image = '0'
try: fanart2 = re.findall('<fanart>(.+?)</fanart>', dir)[0]
except: fanart2 = fanart
if content =='tvshows':
try:
title = cleantitle(name).encode('utf-8')
data = {'title': title, 'tvshowtitle': title}
except:
pass
if re.search('[a-zA-Z]', vip): name += ' [COLOR orange]%s[/COLOR]' % vip
addDirectoryItem(name, url, 'ndmode', image, image, fanart2, '0', content, data)
except:
pass
items = re.compile('<item>(.+?)</item>').findall(link)
for item in items:
try:
data = {}
name = re.findall('<title>(.+?)</title>', item)[0]
name = cleantitle(name)
if not query.lower() in name.lower() : raise Exception()
url = re.findall('<link>(.+?)</link>', item)[0]
try: image = re.findall('<thumbnail>(.+?)</thumbnail>', item)[0]
except: image = '0'
try: fanart2 = re.findall('<fanart>(.+?)</fanart>', item)[0]
except: fanart2 = fanart
if content =='movies':
try:
title = cleantitle(name).encode('utf-8')
data = {'title': title}
title, year = re.compile('(.+?)[(](\d{4})[)]').findall(name)[0]
title = cleantitle(title).encode('utf-8')
data = {'title': title, 'year': year}
except:
pass
if re.search('[a-zA-Z]', vip): name += ' [COLOR orange]%s[/COLOR]' % vip
if 'sublink' in url:
addDirectoryItem(name, url, 'subDirectory', image, image, fanart2, '0', content, data, isFolder=True)
else:
addDirectoryItem(name, url, 'resolveUrl', image, image, fanart2, '0', content, data, isFolder=False)
except:
pass
except:
pass
control.directory(int(sys.argv[1]), cacheToDisc=True)
def clearSearch():
cache.clear('rel_srch')
control.refresh()
def resolveUrl(name, url, audio, image, fanart, playable, content):
try:
if '.f4m'in url:
label = cleantitle(name)
ext = url.split('?')[0].split('&')[0].split('|')[0].rsplit('.')[-1].replace('/', '').lower()
if not ext == 'f4m': raise Exception()
from resources.lib.libraries.f4mproxy.F4mProxy import f4mProxyHelper
return f4mProxyHelper().playF4mLink(url, label, None, None,'',image)
#legacy issue, will be removed later
if 'afdah.org' in url and not '</source>' in url: url += '<source>afdah</source>'
if '</source>' in url:
source = re.compile('<source>(.+?)</source>').findall(url)[0]
url = re.compile('(.+?)<source>').findall(url)[0]
for i in ['_mv', '_tv', '_mv_tv']:
try: call = __import__('resources.lib.sources.%s%s' % (source, i), globals(), locals(), ['object'], -1).source()
except: pass
from resources.lib import sources ; d = sources.sources()
url = call.get_sources(url, d.hosthdfullDict, d.hostsdfullDict, d.hostlocDict)
if type(url) == list and len(url) == 1:
url = url[0]['url']
elif type(url) == list:
url = sorted(url, key=lambda k: k['quality'])
for i in url: i.update((k, '720p') for k, v in i.iteritems() if v == 'HD')
for i in url: i.update((k, '480p') for k, v in i.iteritems() if v == 'SD')
q = ['[B]%s[/B] | %s' % (i['source'].upper(), i['quality'].upper()) for i in url]
u = [i['url'] for i in url]
select = control.selectDialog(q)
if select == -1: return
url = u[select]
url = call.resolve(url)
from resources.lib import resolvers
host = (urlparse.urlparse(url).netloc).rsplit('.', 1)[0].rsplit('.')[-1]
url = resolvers.request(url)
if type(url) == list and len(url) == 1:
url = url[0]['url']
elif type(url) == list:
url = sorted(url, key=lambda k: k['quality'])
for i in url: i.update((k, '720p') for k, v in i.iteritems() if v == 'HD')
for i in url: i.update((k, '480p') for k, v in i.iteritems() if v == 'SD')
q = ['[B]%s[/B] | %s' % (host.upper(), i['quality'].upper()) for i in url]
u = [i['url'] for i in url]
select = control.selectDialog(q)
if select == -1: return
url = u[select]
if url == None: raise Exception()
except:
return control.infoDialog(control.lang(30705).encode('utf-8'))
pass
if playable == 'true':
item = control.item(path=url)
return control.resolve(int(sys.argv[1]), True, item)
else:
label = cleantitle(name)
item = control.item(path=url, iconImage=image, thumbnailImage=image)
item.setInfo( type='Video', infoLabels = {'title': label} )
control.playlist.clear()
control.player.play(url, item)
def addCategoryItem(name, action, image, isFolder=True):
u = '%s?action=%s' % (sys.argv[0], str(action))
image = control.addonInfo('path') + '/resources/media/phstreams/' + image
item = control.item(name, iconImage=image, thumbnailImage=image)
item.addContextMenuItems([], replaceItems=False)
item.setProperty('Fanart_Image', control.addonInfo('fanart'))
control.addItem(handle=int(sys.argv[1]),url=u,listitem=item,isFolder=isFolder)
def addDirectoryItem(name, url, action, image, image2, fanart, audio, content, data, tvshow='0', totalItems=0, isFolder=True):
if not str(image).lower().startswith('http'): image = control.addonInfo('icon')
if not str(image2).lower().startswith('http'): image2 = control.addonInfo('icon')
if not str(fanart).lower().startswith('http'): fanart = control.addonInfo('fanart')
if content in ['movies', 'episodes']: playable = 'true'
else: playable = 'false'
sysaddon = sys.argv[0]
u = '%s?name=%s&url=%s&audio=%s&image=%s&fanart=%s&playable=%s&tvshow=%s&content=%s&action=%s' % (sysaddon, urllib.quote_plus(name), urllib.quote_plus(url), urllib.quote_plus(audio), urllib.quote_plus(image), urllib.quote_plus(fanart), urllib.quote_plus(playable), str(tvshow), str(content), str(action))
cm = []
if content in ['movies', 'tvshows']:
data.update({'trailer': '%s?action=trailer&name=%s' % (sysaddon, urllib.quote_plus(name))})
cm.append((control.lang(30707).encode('utf-8'), 'RunPlugin(%s?action=trailer&name=%s)' % (sysaddon, urllib.quote_plus(name))))
if not 'plot' in data:
data.update({'plot': control.lang(30706).encode('utf-8')})
if content == 'movies':
cm.append((control.lang(30708).encode('utf-8'), 'XBMC.Action(Info)'))
elif content in ['tvshows', 'seasons']:
cm.append((control.lang(30709).encode('utf-8'), 'XBMC.Action(Info)'))
elif content == 'episodes':
cm.append((control.lang(30710).encode('utf-8'), 'XBMC.Action(Info)'))
if content == 'movies' and not isFolder == True:
downloadFile = name
try: downloadFile = '%s (%s)' % (data['title'], data['year'])
except: pass
cm.append((control.lang(30722).encode('utf-8'), 'RunPlugin(%s?action=addDownload&name=%s&url=%s&image=%s)' % (sysaddon, urllib.quote_plus(downloadFile), urllib.quote_plus(url), urllib.quote_plus(image))))
elif content == 'episodes' and not isFolder == True:
downloadFile = name
try: downloadFile = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode']))
except: pass
cm.append((control.lang(30722).encode('utf-8'), 'RunPlugin(%s?action=addDownload&name=%s&url=%s&image=%s)' % (sysaddon, urllib.quote_plus(downloadFile), urllib.quote_plus(url), urllib.quote_plus(image))))
if content == 'movies':
cm.append((control.lang(30711).encode('utf-8'), 'RunPlugin(%s?action=addView&content=movies)' % sysaddon))
elif content == 'tvshows':
cm.append((control.lang(30712).encode('utf-8'), 'RunPlugin(%s?action=addView&content=tvshows)' % sysaddon))
elif content == 'seasons':
cm.append((control.lang(30713).encode('utf-8'), 'RunPlugin(%s?action=addView&content=seasons)' % sysaddon))
elif content == 'episodes':
cm.append((control.lang(30714).encode('utf-8'), 'RunPlugin(%s?action=addView&content=episodes)' % sysaddon))
item = control.item(name, iconImage='DefaultFolder.png', thumbnailImage=image)
try: item.setArt({'poster': image2, 'tvshow.poster': image2, 'season.poster': image2, 'banner': image, 'tvshow.banner': image, 'season.banner': image})
except: pass
item.addContextMenuItems(cm, replaceItems=False)
item.setProperty('Fanart_Image', fanart)
if playable == 'true': item.setProperty('IsPlayable', 'true')
item.setInfo(type='Video', infoLabels=data)
control.addItem(handle=int(sys.argv[1]),url=u,listitem=item,totalItems=totalItems,isFolder=isFolder)
def endCategory():
if control.skin == 'skin.confluence': control.execute('Container.SetViewMode(500)')
control.directory(int(sys.argv[1]), cacheToDisc=True)
def endDirectory(content, close):
if content in ['movies', 'tvshows', 'seasons', 'episodes']:
control.content(int(sys.argv[1]), content)
if close == True: control.directory(int(sys.argv[1]), cacheToDisc=True)
if close == True and content in ['movies', 'tvshows', 'seasons', 'episodes']:
views.setView(content)
def cleantitle(name):
name = re.sub('(\.|\_|\(|\[|\s)(Link \d*|link \d*)(\.|\_|\)|\]|$)', '', name)
name = re.sub('\(\d{4}.+?\d{4}\)$', '', name)
name = re.sub('\s\[COLOR.+?\].+?\[/COLOR\]|\[/COLOR\]\[COLOR.+?\]\s.+?\[/COLOR\]|\[COLOR.+?\]|\[/COLOR\]', '', name)
name = re.sub('\s\s+', ' ', name)
name = name.strip()
return name
def cleaneptitle(tvshow, name):
try:
p = re.compile('(S\d*E\d*)').findall(name)
p += re.compile('(s\d*e\d*)').findall(name)
p += re.compile('(Season \d* Episode \d*)').findall(name)
p += re.compile('(\d*x Episode \d*)').findall(name)
p += re.compile('(\d*x\d*)').findall(name)
p = p[0]
name = name.replace(tvshow, '').replace(p, '')
name = re.sub('-|:', '', name)
name = re.sub('\s\s+', ' ', name)
name = name.strip()
season = re.compile('(\d*)').findall(p)
season = [i for i in season if i.isdigit()][0]
season = '%01d' % int(season)
episode = re.compile('(\d*)').findall(p)
episode = [i for i in episode if i.isdigit()][-1]
episode = '%01d' % int(episode)
if re.match('[A-Z0-9]', name) == None:
name = '%s S%02dE%02d' % (tvshow, int(season), int(episode))
return (name, season, episode)
except:
return
| gpl-2.0 |
netscaler/horizon | openstack_dashboard/dashboards/project/databases/urls.py | 10 | 1085 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls.defaults import patterns # noqa
from django.conf.urls.defaults import url # noqa
from openstack_dashboard.dashboards.project.databases import views
urlpatterns = patterns(
'',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^launch$', views.LaunchInstanceView.as_view(), name='launch'),
url(r'^(?P<instance_id>[^/]+)/$', views.DetailView.as_view(),
name='detail'),
)
| apache-2.0 |
ChinaQuants/zipline | zipline/utils/data.py | 31 | 12761 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import numpy as np
import pandas as pd
from copy import deepcopy
def _ensure_index(x):
if not isinstance(x, pd.Index):
x = pd.Index(sorted(x))
return x
class RollingPanel(object):
"""
Preallocation strategies for rolling window over expanding data set
Restrictions: major_axis can only be a DatetimeIndex for now
"""
def __init__(self,
window,
items,
sids,
cap_multiple=2,
dtype=np.float64,
initial_dates=None):
self._pos = window
self._window = window
self.items = _ensure_index(items)
self.minor_axis = _ensure_index(sids)
self.cap_multiple = cap_multiple
self.dtype = dtype
if initial_dates is None:
self.date_buf = np.empty(self.cap, dtype='M8[ns]') * pd.NaT
elif len(initial_dates) != window:
raise ValueError('initial_dates must be of length window')
else:
self.date_buf = np.hstack(
(
initial_dates,
np.empty(
window * (cap_multiple - 1),
dtype='datetime64[ns]',
),
),
)
self.buffer = self._create_buffer()
@property
def cap(self):
return self.cap_multiple * self._window
@property
def _start_index(self):
return self._pos - self._window
@property
def start_date(self):
return self.date_buf[self._start_index]
def oldest_frame(self, raw=False):
"""
Get the oldest frame in the panel.
"""
if raw:
return self.buffer.values[:, self._start_index, :]
return self.buffer.iloc[:, self._start_index, :]
def set_minor_axis(self, minor_axis):
self.minor_axis = _ensure_index(minor_axis)
self.buffer = self.buffer.reindex(minor_axis=self.minor_axis)
def set_items(self, items):
self.items = _ensure_index(items)
self.buffer = self.buffer.reindex(items=self.items)
def _create_buffer(self):
panel = pd.Panel(
items=self.items,
minor_axis=self.minor_axis,
major_axis=range(self.cap),
dtype=self.dtype,
)
return panel
def extend_back(self, missing_dts):
"""
Resizes the buffer to hold a new window with a new cap_multiple.
If cap_multiple is None, then the old cap_multiple is used.
"""
delta = len(missing_dts)
if not delta:
raise ValueError(
'missing_dts must be a non-empty index',
)
self._window += delta
self._pos += delta
self.date_buf = self.date_buf.copy()
self.date_buf.resize(self.cap)
self.date_buf = np.roll(self.date_buf, delta)
old_vals = self.buffer.values
shape = old_vals.shape
nan_arr = np.empty((shape[0], delta, shape[2]))
nan_arr.fill(np.nan)
new_vals = np.column_stack(
(nan_arr,
old_vals,
np.empty((shape[0], delta * (self.cap_multiple - 1), shape[2]))),
)
self.buffer = pd.Panel(
data=new_vals,
items=self.items,
minor_axis=self.minor_axis,
major_axis=np.arange(self.cap),
dtype=self.dtype,
)
# Fill the delta with the dates we calculated.
where = slice(self._start_index, self._start_index + delta)
self.date_buf[where] = missing_dts
def add_frame(self, tick, frame, minor_axis=None, items=None):
"""
"""
if self._pos == self.cap:
self._roll_data()
values = frame
if isinstance(frame, pd.DataFrame):
values = frame.values
self.buffer.values[:, self._pos, :] = values.astype(self.dtype)
self.date_buf[self._pos] = tick
self._pos += 1
def get_current(self, item=None, raw=False, start=None, end=None):
"""
Get a Panel that is the current data in view. It is not safe to persist
these objects because internal data might change
"""
item_indexer = slice(None)
if item:
item_indexer = self.items.get_loc(item)
start_index = self._start_index
end_index = self._pos
# get inital date window
where = slice(start_index, end_index)
current_dates = self.date_buf[where]
def convert_datelike_to_long(dt):
if isinstance(dt, pd.Timestamp):
return dt.asm8
if isinstance(dt, datetime.datetime):
return np.datetime64(dt)
return dt
# constrict further by date
if start:
start = convert_datelike_to_long(start)
start_index += current_dates.searchsorted(start)
if end:
end = convert_datelike_to_long(end)
_end = current_dates.searchsorted(end, 'right')
end_index -= len(current_dates) - _end
where = slice(start_index, end_index)
values = self.buffer.values[item_indexer, where, :]
current_dates = self.date_buf[where]
if raw:
# return copy so we can change it without side effects here
return values.copy()
major_axis = pd.DatetimeIndex(deepcopy(current_dates), tz='utc')
if values.ndim == 3:
return pd.Panel(values, self.items, major_axis, self.minor_axis,
dtype=self.dtype)
elif values.ndim == 2:
return pd.DataFrame(values, major_axis, self.minor_axis,
dtype=self.dtype)
def set_current(self, panel):
"""
Set the values stored in our current in-view data to be values of the
passed panel. The passed panel must have the same indices as the panel
that would be returned by self.get_current.
"""
where = slice(self._start_index, self._pos)
self.buffer.values[:, where, :] = panel.values
def current_dates(self):
where = slice(self._start_index, self._pos)
return pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
def _roll_data(self):
"""
Roll window worth of data up to position zero.
Save the effort of having to expensively roll at each iteration
"""
self.buffer.values[:, :self._window, :] = \
self.buffer.values[:, -self._window:, :]
self.date_buf[:self._window] = self.date_buf[-self._window:]
self._pos = self._window
@property
def window_length(self):
return self._window
class MutableIndexRollingPanel(object):
"""
A version of RollingPanel that exists for backwards compatibility with
batch_transform. This is a copy to allow behavior of RollingPanel to drift
away from this without breaking this class.
This code should be considered frozen, and should not be used in the
future. Instead, see RollingPanel.
"""
def __init__(self, window, items, sids, cap_multiple=2, dtype=np.float64):
self._pos = 0
self._window = window
self.items = _ensure_index(items)
self.minor_axis = _ensure_index(sids)
self.cap_multiple = cap_multiple
self.cap = cap_multiple * window
self.dtype = dtype
self.date_buf = np.empty(self.cap, dtype='M8[ns]')
self.buffer = self._create_buffer()
def _oldest_frame_idx(self):
return max(self._pos - self._window, 0)
def oldest_frame(self, raw=False):
"""
Get the oldest frame in the panel.
"""
if raw:
return self.buffer.values[:, self._oldest_frame_idx(), :]
return self.buffer.iloc[:, self._oldest_frame_idx(), :]
def set_sids(self, sids):
self.minor_axis = _ensure_index(sids)
self.buffer = self.buffer.reindex(minor_axis=self.minor_axis)
def _create_buffer(self):
panel = pd.Panel(
items=self.items,
minor_axis=self.minor_axis,
major_axis=range(self.cap),
dtype=self.dtype,
)
return panel
def get_current(self):
"""
Get a Panel that is the current data in view. It is not safe to persist
these objects because internal data might change
"""
where = slice(self._oldest_frame_idx(), self._pos)
major_axis = pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
return pd.Panel(self.buffer.values[:, where, :], self.items,
major_axis, self.minor_axis, dtype=self.dtype)
def set_current(self, panel):
"""
Set the values stored in our current in-view data to be values of the
passed panel. The passed panel must have the same indices as the panel
that would be returned by self.get_current.
"""
where = slice(self._oldest_frame_idx(), self._pos)
self.buffer.values[:, where, :] = panel.values
def current_dates(self):
where = slice(self._oldest_frame_idx(), self._pos)
return pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
def _roll_data(self):
"""
Roll window worth of data up to position zero.
Save the effort of having to expensively roll at each iteration
"""
self.buffer.values[:, :self._window, :] = \
self.buffer.values[:, -self._window:, :]
self.date_buf[:self._window] = self.date_buf[-self._window:]
self._pos = self._window
def add_frame(self, tick, frame, minor_axis=None, items=None):
"""
"""
if self._pos == self.cap:
self._roll_data()
if isinstance(frame, pd.DataFrame):
minor_axis = frame.columns
items = frame.index
if set(minor_axis).difference(set(self.minor_axis)) or \
set(items).difference(set(self.items)):
self._update_buffer(frame)
vals = frame.T.astype(self.dtype)
self.buffer.loc[:, self._pos, :] = vals
self.date_buf[self._pos] = tick
self._pos += 1
def _update_buffer(self, frame):
# Get current frame as we only need to care about the data that is in
# the active window
old_buffer = self.get_current()
if self._pos >= self._window:
# Don't count the last major_axis entry if we're past our window,
# since it's about to roll off the end of the panel.
old_buffer = old_buffer.iloc[:, 1:, :]
nans = pd.isnull(old_buffer)
# Find minor_axes that have only nans
# Note that minor is axis 2
non_nan_cols = set(old_buffer.minor_axis[~np.all(nans, axis=(0, 1))])
# Determine new columns to be added
new_cols = set(frame.columns).difference(non_nan_cols)
# Update internal minor axis
self.minor_axis = _ensure_index(new_cols.union(non_nan_cols))
# Same for items (fields)
# Find items axes that have only nans
# Note that items is axis 0
non_nan_items = set(old_buffer.items[~np.all(nans, axis=(1, 2))])
new_items = set(frame.index).difference(non_nan_items)
self.items = _ensure_index(new_items.union(non_nan_items))
# :NOTE:
# There is a simpler and 10x faster way to do this:
#
# Reindex buffer to update axes (automatically adds nans)
# self.buffer = self.buffer.reindex(items=self.items,
# major_axis=np.arange(self.cap),
# minor_axis=self.minor_axis)
#
# However, pandas==0.12.0, for which we remain backwards compatible,
# has a bug in .reindex() that this triggers. Using .update() as before
# seems to work fine.
new_buffer = self._create_buffer()
new_buffer.update(
self.buffer.loc[non_nan_items, :, non_nan_cols])
self.buffer = new_buffer
| apache-2.0 |
metamx/Diamond | src/diamond/handler/graphitepickle.py | 8 | 3126 | # coding=utf-8
"""
Send metrics to a [graphite](http://graphite.wikidot.com/) using the high
performace pickle interface.
Graphite is an enterprise-scale monitoring tool that runs well on cheap
hardware. It was originally designed and written by Chris Davis at Orbitz in
2006 as side project that ultimately grew to be a foundational monitoring tool.
In 2008, Orbitz allowed Graphite to be released under the open source Apache
2.0 license. Since then Chris has continued to work on Graphite and has
deployed it at other companies including Sears, where it serves as a pillar of
the e-commerce monitoring system. Today many
[large companies](http://graphite.readthedocs.org/en/latest/who-is-using.html)
use it.
- enable it in `diamond.conf` :
` handlers = diamond.handler.graphitepickle.GraphitePickleHandler
`
"""
import struct
from graphite import GraphiteHandler
try:
import cPickle as pickle
pickle # workaround for pyflakes issue #13
except ImportError:
import pickle as pickle
class GraphitePickleHandler(GraphiteHandler):
"""
Overrides the GraphiteHandler class
Sending data to graphite using batched pickle format
"""
def __init__(self, config=None):
"""
Create a new instance of the GraphitePickleHandler
"""
# Initialize GraphiteHandler
GraphiteHandler.__init__(self, config)
# Initialize Data
self.batch = []
# Initialize Options
self.batch_size = int(self.config['batch'])
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(GraphitePickleHandler, self).get_default_config_help()
config.update({
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(GraphitePickleHandler, self).get_default_config()
config.update({
})
return config
def process(self, metric):
# Convert metric to pickle format
m = (metric.path, (metric.timestamp, metric.value))
# Add the metric to the match
self.batch.append(m)
# If there are sufficient metrics, then pickle and send
if len(self.batch) >= self.batch_size:
# Log
self.log.debug("GraphitePickleHandler: Sending batch size: %d",
self.batch_size)
# Pickle the batch of metrics
self.metrics = [self._pickle_batch()]
# Send pickled batch
self._send()
# Flush the metric pack down the wire
self.flush()
# Clear Batch
self.batch = []
def _pickle_batch(self):
"""
Pickle the metrics into a form that can be understood
by the graphite pickle connector.
"""
# Pickle
payload = pickle.dumps(self.batch)
# Pack Message
header = struct.pack("!L", len(payload))
message = header + payload
# Return Message
return message
| mit |
zooba/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/pip/_vendor/distlib/compat.py | 60 | 41404 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2017 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import absolute_import
import os
import re
import sys
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
if sys.version_info[0] < 3: # pragma: no cover
from StringIO import StringIO
string_types = basestring,
text_type = unicode
from types import FileType as file_type
import __builtin__ as builtins
import ConfigParser as configparser
from ._backport import shutil
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit
from urllib import (urlretrieve, quote as _quote, unquote, url2pathname,
pathname2url, ContentTooShortError, splittype)
def quote(s):
if isinstance(s, unicode):
s = s.encode('utf-8')
return _quote(s)
import urllib2
from urllib2 import (Request, urlopen, URLError, HTTPError,
HTTPBasicAuthHandler, HTTPPasswordMgr,
HTTPHandler, HTTPRedirectHandler,
build_opener)
if ssl:
from urllib2 import HTTPSHandler
import httplib
import xmlrpclib
import Queue as queue
from HTMLParser import HTMLParser
import htmlentitydefs
raw_input = raw_input
from itertools import ifilter as filter
from itertools import ifilterfalse as filterfalse
_userprog = None
def splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
global _userprog
if _userprog is None:
import re
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match: return match.group(1, 2)
return None, host
else: # pragma: no cover
from io import StringIO
string_types = str,
text_type = str
from io import TextIOWrapper as file_type
import builtins
import configparser
import shutil
from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote,
unquote, urlsplit, urlunsplit, splittype)
from urllib.request import (urlopen, urlretrieve, Request, url2pathname,
pathname2url,
HTTPBasicAuthHandler, HTTPPasswordMgr,
HTTPHandler, HTTPRedirectHandler,
build_opener)
if ssl:
from urllib.request import HTTPSHandler
from urllib.error import HTTPError, URLError, ContentTooShortError
import http.client as httplib
import urllib.request as urllib2
import xmlrpc.client as xmlrpclib
import queue
from html.parser import HTMLParser
import html.entities as htmlentitydefs
raw_input = input
from itertools import filterfalse
filter = filter
try:
from ssl import match_hostname, CertificateError
except ImportError: # pragma: no cover
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
parts = dn.split('.')
leftmost, remainder = parts[0], parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
try:
from types import SimpleNamespace as Container
except ImportError: # pragma: no cover
class Container(object):
"""
A generic container for when multiple values need to be returned
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
try:
from shutil import which
except ImportError: # pragma: no cover
# Implementation from Python 3.3
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
# ZipFile is a context manager in 2.7, but not in 2.6
from zipfile import ZipFile as BaseZipFile
if hasattr(BaseZipFile, '__enter__'): # pragma: no cover
ZipFile = BaseZipFile
else: # pragma: no cover
from zipfile import ZipExtFile as BaseZipExtFile
class ZipExtFile(BaseZipExtFile):
def __init__(self, base):
self.__dict__.update(base.__dict__)
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
# return None, so if an exception occurred, it will propagate
class ZipFile(BaseZipFile):
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
# return None, so if an exception occurred, it will propagate
def open(self, *args, **kwargs):
base = BaseZipFile.open(self, *args, **kwargs)
return ZipExtFile(base)
try:
from platform import python_implementation
except ImportError: # pragma: no cover
def python_implementation():
"""Return a string identifying the Python implementation."""
if 'PyPy' in sys.version:
return 'PyPy'
if os.name == 'java':
return 'Jython'
if sys.version.startswith('IronPython'):
return 'IronPython'
return 'CPython'
try:
import sysconfig
except ImportError: # pragma: no cover
from ._backport import sysconfig
try:
callable = callable
except NameError: # pragma: no cover
from collections import Callable
def callable(obj):
return isinstance(obj, Callable)
try:
fsencode = os.fsencode
fsdecode = os.fsdecode
except AttributeError: # pragma: no cover
# Issue #99: on some systems (e.g. containerised),
# sys.getfilesystemencoding() returns None, and we need a real value,
# so fall back to utf-8. From the CPython 2.7 docs relating to Unix and
# sys.getfilesystemencoding(): the return value is "the user’s preference
# according to the result of nl_langinfo(CODESET), or None if the
# nl_langinfo(CODESET) failed."
_fsencoding = sys.getfilesystemencoding() or 'utf-8'
if _fsencoding == 'mbcs':
_fserrors = 'strict'
else:
_fserrors = 'surrogateescape'
def fsencode(filename):
if isinstance(filename, bytes):
return filename
elif isinstance(filename, text_type):
return filename.encode(_fsencoding, _fserrors)
else:
raise TypeError("expect bytes or str, not %s" %
type(filename).__name__)
def fsdecode(filename):
if isinstance(filename, text_type):
return filename
elif isinstance(filename, bytes):
return filename.decode(_fsencoding, _fserrors)
else:
raise TypeError("expect bytes or str, not %s" %
type(filename).__name__)
try:
from tokenize import detect_encoding
except ImportError: # pragma: no cover
from codecs import BOM_UTF8, lookup
import re
cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)")
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
try:
filename = readline.__self__.name
except AttributeError:
filename = None
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
# Decode as UTF-8. Either the line is an encoding declaration,
# in which case it should be pure ASCII, or it must be UTF-8
# per default encoding.
line_string = line.decode('utf-8')
except UnicodeDecodeError:
msg = "invalid or missing encoding declaration"
if filename is not None:
msg = '{} for {!r}'.format(msg, filename)
raise SyntaxError(msg)
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = _get_normal_name(matches[0])
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
if filename is None:
msg = "unknown encoding: " + encoding
else:
msg = "unknown encoding for {!r}: {}".format(filename,
encoding)
raise SyntaxError(msg)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
if filename is None:
msg = 'encoding problem: utf-8'
else:
msg = 'encoding problem for {!r}: utf-8'.format(filename)
raise SyntaxError(msg)
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
# For converting & <-> & etc.
try:
from html import escape
except ImportError:
from cgi import escape
if sys.version_info[:2] < (3, 4):
unescape = HTMLParser().unescape
else:
from html import unescape
try:
from collections import ChainMap
except ImportError: # pragma: no cover
from collections import MutableMapping
try:
from reprlib import recursive_repr as _recursive_repr
except ImportError:
def _recursive_repr(fillvalue='...'):
'''
Decorator to make a repr function return fillvalue for a recursive
call
'''
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self): # like Django's Context.push()
'New ChainMap with a new dict followed by all previous maps.'
return self.__class__({}, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
try:
from importlib.util import cache_from_source # Python >= 3.4
except ImportError: # pragma: no cover
try:
from imp import cache_from_source
except ImportError: # pragma: no cover
def cache_from_source(path, debug_override=None):
assert path.endswith('.py')
if debug_override is None:
debug_override = __debug__
if debug_override:
suffix = 'c'
else:
suffix = 'o'
return path + suffix
try:
from collections import OrderedDict
except ImportError: # pragma: no cover
## {{{ http://code.activestate.com/recipes/576693/ (r9)
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running=None):
'od.__repr__() <==> repr(od)'
if not _repr_running: _repr_running = {}
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
try:
from logging.config import BaseConfigurator, valid_ident
except ImportError: # pragma: no cover
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = staticmethod(__import__)
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, string_types):
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
| apache-2.0 |
DavidLP/home-assistant | tests/components/automation/test_time_pattern.py | 11 | 6175 | """The tests for the time_pattern automation."""
import pytest
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
import homeassistant.components.automation as automation
from tests.common import async_fire_time_changed, mock_component
from tests.components.automation import common
from tests.common import async_mock_service
@pytest.fixture
def calls(hass):
"""Track calls to a mock serivce."""
return async_mock_service(hass, 'test', 'automation')
@pytest.fixture(autouse=True)
def setup_comp(hass):
"""Initialize components."""
mock_component(hass, 'group')
async def test_if_fires_when_hour_matches(hass, calls):
"""Test for firing if hour is matching."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time_pattern',
'hours': 0,
'minutes': '*',
'seconds': '*',
},
'action': {
'service': 'test.automation'
}
}
})
async_fire_time_changed(hass, dt_util.utcnow().replace(hour=0))
await hass.async_block_till_done()
assert 1 == len(calls)
await common.async_turn_off(hass)
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow().replace(hour=0))
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_if_fires_when_minute_matches(hass, calls):
"""Test for firing if minutes are matching."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time_pattern',
'hours': '*',
'minutes': 0,
'seconds': '*',
},
'action': {
'service': 'test.automation'
}
}
})
async_fire_time_changed(hass, dt_util.utcnow().replace(minute=0))
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_if_fires_when_second_matches(hass, calls):
"""Test for firing if seconds are matching."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time_pattern',
'hours': '*',
'minutes': '*',
'seconds': 0,
},
'action': {
'service': 'test.automation'
}
}
})
async_fire_time_changed(hass, dt_util.utcnow().replace(second=0))
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_if_fires_when_all_matches(hass, calls):
"""Test for firing if everything matches."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time_pattern',
'hours': 1,
'minutes': 2,
'seconds': 3,
},
'action': {
'service': 'test.automation'
}
}
})
async_fire_time_changed(hass, dt_util.utcnow().replace(
hour=1, minute=2, second=3))
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_if_fires_periodic_seconds(hass, calls):
"""Test for firing periodically every second."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time_pattern',
'hours': '*',
'minutes': '*',
'seconds': "/2",
},
'action': {
'service': 'test.automation'
}
}
})
async_fire_time_changed(hass, dt_util.utcnow().replace(
hour=0, minute=0, second=2))
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_if_fires_periodic_minutes(hass, calls):
"""Test for firing periodically every minute."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time_pattern',
'hours': '*',
'minutes': "/2",
'seconds': '*',
},
'action': {
'service': 'test.automation'
}
}
})
async_fire_time_changed(hass, dt_util.utcnow().replace(
hour=0, minute=2, second=0))
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_if_fires_periodic_hours(hass, calls):
"""Test for firing periodically every hour."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time_pattern',
'hours': "/2",
'minutes': '*',
'seconds': '*',
},
'action': {
'service': 'test.automation'
}
}
})
async_fire_time_changed(hass, dt_util.utcnow().replace(
hour=2, minute=0, second=0))
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_default_values(hass, calls):
"""Test for firing at 2 minutes every hour."""
assert await async_setup_component(hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'time_pattern',
'minutes': "2",
},
'action': {
'service': 'test.automation'
}
}
})
async_fire_time_changed(hass, dt_util.utcnow().replace(
hour=1, minute=2, second=0))
await hass.async_block_till_done()
assert 1 == len(calls)
async_fire_time_changed(hass, dt_util.utcnow().replace(
hour=1, minute=2, second=1))
await hass.async_block_till_done()
assert 1 == len(calls)
async_fire_time_changed(hass, dt_util.utcnow().replace(
hour=2, minute=2, second=0))
await hass.async_block_till_done()
assert 2 == len(calls)
| apache-2.0 |
livingbio/Snoopy | snoopy/server/bin/sslstripSnoopy/build/lib.linux-i686-2.7/sslstrip/StrippingProxy.py | 108 | 1244 | # Copyright (c) 2004-2009 Moxie Marlinspike
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
from twisted.web.http import HTTPChannel
from ClientRequest import ClientRequest
class StrippingProxy(HTTPChannel):
'''sslstrip is, at heart, a transparent proxy server that does some unusual things.
This is the basic proxy server class, where we get callbacks for GET and POST methods.
We then proxy these out using HTTP or HTTPS depending on what information we have about
the (connection, client_address) tuple in our cache.
'''
requestFactory = ClientRequest
| mit |
realestate-com-au/python-dashing | dashmat/core_modules/splunk/splunk-sdk-1.3.0/setup.py | 2 | 7139 | #!/usr/bin/env python
#
# Copyright 2011-2014 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from distutils.core import setup, Command
from contextlib import closing
from fnmatch import fnmatch
import os
import splunklib
import tarfile
def run_test_suite():
try:
import unittest2 as unittest
except ImportError:
import unittest
original_cwd = os.path.abspath(os.getcwd())
os.chdir('tests')
suite = unittest.defaultTestLoader.discover('.')
unittest.TextTestRunner().run(suite)
os.chdir(original_cwd)
def run_test_suite_with_junit_output():
try:
import unittest2 as unittest
except ImportError:
import unittest
import xmlrunner
original_cwd = os.path.abspath(os.getcwd())
os.chdir('tests')
suite = unittest.defaultTestLoader.discover('.')
xmlrunner.XMLTestRunner(output='../test-reports').run(suite)
os.chdir(original_cwd)
class CoverageCommand(Command):
"""setup.py command to run code coverage of the test suite."""
description = "Create an HTML coverage report from running the full test suite."
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
import coverage
except ImportError:
print("Could not import coverage. Please install it and try again.")
exit(1)
cov = coverage.coverage(source=['splunklib'])
cov.start()
run_test_suite()
cov.stop()
cov.html_report(directory='coverage_report')
class TestCommand(Command):
"""setup.py command to run the whole test suite."""
description = "Run test full test suite."
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
run_test_suite()
class JunitXmlTestCommand(Command):
"""setup.py command to run the whole test suite."""
description = "Run test full test suite with JUnit-formatted output."
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
run_test_suite_with_junit_output()
class DistCommand(Command):
"""setup.py command to create .spl files for modular input and search
command examples"""
description = "Build modular input and search command example .spl files."
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
@staticmethod
def get_python_files(files):
"""Utility function to get .py files from a list"""
python_files = []
for file_name in files:
if file_name.endswith(".py"):
python_files.append(file_name)
return python_files
def run(self):
# Create random_numbers.spl and github_forks.spl
app_names = ['random_numbers', 'github_forks']
splunklib_arcname = "splunklib"
modinput_dir = os.path.join(splunklib_arcname, "modularinput")
if not os.path.exists("build"):
os.makedirs("build")
for app in app_names:
with closing(tarfile.open(os.path.join("build", app + ".spl"), "w")) as spl:
spl.add(
os.path.join("examples", app, app + ".py"),
arcname=os.path.join(app, "bin", app + ".py")
)
spl.add(
os.path.join("examples", app, "default", "app.conf"),
arcname=os.path.join(app, "default", "app.conf")
)
spl.add(
os.path.join("examples", app, "README", "inputs.conf.spec"),
arcname=os.path.join(app, "README", "inputs.conf.spec")
)
splunklib_files = self.get_python_files(os.listdir(splunklib_arcname))
for file_name in splunklib_files:
spl.add(
os.path.join(splunklib_arcname, file_name),
arcname=os.path.join(app, "bin", splunklib_arcname, file_name)
)
modinput_files = self.get_python_files(os.listdir(modinput_dir))
for file_name in modinput_files:
spl.add(
os.path.join(modinput_dir, file_name),
arcname=os.path.join(app, "bin", modinput_dir, file_name)
)
spl.close()
# Create searchcommands_app.spl
sdk_dir = os.path.abspath('.')
def exclude(path):
# TODO: Replace with filter function because exclude is deprecated
basename = os.path.basename(path)
for pattern in ['.DS_Store', '.idea', '*.log', '*.py[co]']:
if fnmatch(basename, pattern):
return True
return False
tarball = os.path.join(sdk_dir, 'build', 'searchcommands_app.spl')
splunklib_arcname = os.path.join(
'searchcommands_app', 'bin', 'splunklib')
manifest = [
(os.path.join(sdk_dir, 'examples', 'searchcommands_app'),
'searchcommands_app'),
(os.path.join(sdk_dir, 'splunklib'),
splunklib_arcname)
]
with closing(tarfile.open(tarball, 'w')) as spl:
for source, target in manifest:
spl.add(source, arcname=target, exclude=exclude)
return
setup(
author="Splunk, Inc.",
author_email="devinfo@splunk.com",
cmdclass={'coverage': CoverageCommand,
'test': TestCommand,
'testjunit': JunitXmlTestCommand,
'dist': DistCommand},
description="The Splunk Software Development Kit for Python.",
license="http://www.apache.org/licenses/LICENSE-2.0",
name="splunk-sdk",
packages = ["splunklib",
"splunklib.modularinput",
"splunklib.searchcommands",
"splunklib.searchcommands.splunk_csv"],
url="http://github.com/splunk/splunk-sdk-python",
version=splunklib.__version__,
classifiers = [
"Programming Language :: Python",
"Development Status :: 3 - Alpha",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Libraries :: Application Frameworks",
],
)
| mit |
indictranstech/phrerp | erpnext/support/doctype/maintenance_visit/maintenance_visit.py | 32 | 3160 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from erpnext.utilities.transaction_base import TransactionBase
class MaintenanceVisit(TransactionBase):
def get_item_details(self, item_code):
return frappe.db.get_value("Item", item_code, ["item_name", "description"], as_dict=1)
def validate_serial_no(self):
for d in self.get('maintenance_visit_details'):
if d.serial_no and not frappe.db.exists("Serial No", d.serial_no):
frappe.throw(_("Serial No {0} does not exist").format(d.serial_no))
def validate(self):
self.validate_serial_no()
def update_customer_issue(self, flag):
for d in self.get('maintenance_visit_details'):
if d.prevdoc_docname and d.prevdoc_doctype == 'Customer Issue' :
if flag==1:
mntc_date = self.mntc_date
service_person = d.service_person
work_done = d.work_done
if self.completion_status == 'Fully Completed':
status = 'Closed'
elif self.completion_status == 'Partially Completed':
status = 'Work In Progress'
else:
nm = frappe.db.sql("select t1.name, t1.mntc_date, t2.service_person, t2.work_done from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2 where t2.parent = t1.name and t1.completion_status = 'Partially Completed' and t2.prevdoc_docname = %s and t1.name!=%s and t1.docstatus = 1 order by t1.name desc limit 1", (d.prevdoc_docname, self.name))
if nm:
status = 'Work In Progress'
mntc_date = nm and nm[0][1] or ''
service_person = nm and nm[0][2] or ''
work_done = nm and nm[0][3] or ''
else:
status = 'Open'
mntc_date = ''
service_person = ''
work_done = ''
frappe.db.sql("update `tabCustomer Issue` set resolution_date=%s, resolved_by=%s, resolution_details=%s, status=%s where name =%s",(mntc_date,service_person,work_done,status,d.prevdoc_docname))
def check_if_last_visit(self):
"""check if last maintenance visit against same sales order/ customer issue"""
check_for_docname = None
for d in self.get('maintenance_visit_details'):
if d.prevdoc_docname:
check_for_docname = d.prevdoc_docname
#check_for_doctype = d.prevdoc_doctype
if check_for_docname:
check = frappe.db.sql("select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2 where t2.parent = t1.name and t1.name!=%s and t2.prevdoc_docname=%s and t1.docstatus = 1 and (t1.mntc_date > %s or (t1.mntc_date = %s and t1.mntc_time > %s))", (self.name, check_for_docname, self.mntc_date, self.mntc_date, self.mntc_time))
if check:
check_lst = [x[0] for x in check]
check_lst =','.join(check_lst)
frappe.throw(_("Cancel Material Visits {0} before cancelling this Maintenance Visit").format(check_lst))
raise Exception
else:
self.update_customer_issue(0)
def on_submit(self):
self.update_customer_issue(1)
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
self.check_if_last_visit()
frappe.db.set(self, 'status', 'Cancelled')
def on_update(self):
pass
| agpl-3.0 |
glaubitz/fs-uae-debian | arcade/OpenGL/GL/NV/vertex_array_range2.py | 9 | 1375 | '''OpenGL extension NV.vertex_array_range2
This module customises the behaviour of the
OpenGL.raw.GL.NV.vertex_array_range2 to provide a more
Python-friendly API
Overview (from the spec)
Enabling and disabling the vertex array range is specified by the
original NV_vertex_array_range extension specification to flush the
vertex array range implicitly. In retrospect, this semantic is
extremely misconceived and creates terrible performance problems
for any application that wishes to mix conventional vertex arrays
with vertex arrange range-enabled vertex arrays.
This extension provides a new token for enabling/disabling the
vertex array range that does NOT perform an implicit vertex array
range flush when the enable/disable is performed.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/vertex_array_range2.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.vertex_array_range2 import *
from OpenGL.raw.GL.NV.vertex_array_range2 import _EXTENSION_NAME
def glInitVertexArrayRange2NV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | gpl-2.0 |
ProfessionalIT/maxigenios-website | sdk/google_appengine/lib/protorpc-1.0/protorpc/wsgi/util.py | 13 | 6087 | #!/usr/bin/env python
#
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""WSGI utilities
Small collection of helpful utilities for working with WSGI.
"""
__author__ = 'rafek@google.com (Rafe Kaplan)'
import httplib
import re
from .. import util
__all__ = ['static_page',
'error',
'first_found',
]
_STATUS_PATTERN = re.compile('^(\d{3})\s')
@util.positional(1)
def static_page(content='',
status='200 OK',
content_type='text/html; charset=utf-8',
headers=None):
"""Create a WSGI application that serves static content.
A static page is one that will be the same every time it receives a request.
It will always serve the same status, content and headers.
Args:
content: Content to serve in response to HTTP request.
status: Status to serve in response to HTTP request. If string, status
is served as is without any error checking. If integer, will look up
status message. Otherwise, parameter is tuple (status, description):
status: Integer status of response.
description: Brief text description of response.
content_type: Convenient parameter for content-type header. Will appear
before any content-type header that appears in 'headers' parameter.
headers: Dictionary of headers or iterable of tuples (name, value):
name: String name of header.
value: String value of header.
Returns:
WSGI application that serves static content.
"""
if isinstance(status, (int, long)):
status = '%d %s' % (status, httplib.responses.get(status, 'Unknown Error'))
elif not isinstance(status, basestring):
status = '%d %s' % tuple(status)
if isinstance(headers, dict):
headers = headers.iteritems()
headers = [('content-length', str(len(content))),
('content-type', content_type),
] + list(headers or [])
# Ensure all headers are str.
for index, (key, value) in enumerate(headers):
if isinstance(value, unicode):
value = value.encode('utf-8')
headers[index] = key, value
if not isinstance(key, str):
raise TypeError('Header key must be str, found: %r' % (key,))
if not isinstance(value, str):
raise TypeError(
'Header %r must be type str or unicode, found: %r' % (key, value))
def static_page_application(environ, start_response):
start_response(status, headers)
return [content]
return static_page_application
@util.positional(2)
def error(status_code, status_message=None,
content_type='text/plain; charset=utf-8',
headers=None, content=None):
"""Create WSGI application that statically serves an error page.
Creates a static error page specifically for non-200 HTTP responses.
Browsers such as Internet Explorer will display their own error pages for
error content responses smaller than 512 bytes. For this reason all responses
are right-padded up to 512 bytes.
Error pages that are not provided will content will contain the standard HTTP
status message as their content.
Args:
status_code: Integer status code of error.
status_message: Status message.
Returns:
Static WSGI application that sends static error response.
"""
if status_message is None:
status_message = httplib.responses.get(status_code, 'Unknown Error')
if content is None:
content = status_message
content = util.pad_string(content)
return static_page(content,
status=(status_code, status_message),
content_type=content_type,
headers=headers)
def first_found(apps):
"""Serve the first application that does not response with 404 Not Found.
If no application serves content, will respond with generic 404 Not Found.
Args:
apps: List of WSGI applications to search through. Will serve the content
of the first of these that does not return a 404 Not Found. Applications
in this list must not modify the environment or any objects in it if they
do not match. Applications that do not obey this restriction can create
unpredictable results.
Returns:
Compound application that serves the contents of the first application that
does not response with 404 Not Found.
"""
apps = tuple(apps)
not_found = error(httplib.NOT_FOUND)
def first_found_app(environ, start_response):
"""Compound application returned from the first_found function."""
final_result = {} # Used in absence of Python local scoping.
def first_found_start_response(status, response_headers):
"""Replacement for start_response as passed in to first_found_app.
Called by each application in apps instead of the real start response.
Checks the response status, and if anything other than 404, sets 'status'
and 'response_headers' in final_result.
"""
status_match = _STATUS_PATTERN.match(status)
assert status_match, ('Status must be a string beginning '
'with 3 digit number. Found: %s' % status)
status_code = status_match.group(0)
if int(status_code) == httplib.NOT_FOUND:
return
final_result['status'] = status
final_result['response_headers'] = response_headers
for app in apps:
response = app(environ, first_found_start_response)
if final_result:
start_response(final_result['status'], final_result['response_headers'])
return response
return not_found(environ, start_response)
return first_found_app
| mit |
mcella/django | tests/generic_views/urls.py | 52 | 14592 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.contrib.auth.decorators import login_required
from django.views.decorators.cache import cache_page
from django.views.generic import TemplateView
from . import models, views
urlpatterns = [
# TemplateView
url(r'^template/no_template/$',
TemplateView.as_view()),
url(r'^template/login_required/$',
login_required(TemplateView.as_view())),
url(r'^template/simple/(?P<foo>\w+)/$',
TemplateView.as_view(template_name='generic_views/about.html')),
url(r'^template/custom/(?P<foo>\w+)/$',
views.CustomTemplateView.as_view(template_name='generic_views/about.html')),
url(r'^template/content_type/$',
TemplateView.as_view(template_name='generic_views/robots.txt', content_type='text/plain')),
url(r'^template/cached/(?P<foo>\w+)/$',
cache_page(2.0)(TemplateView.as_view(template_name='generic_views/about.html'))),
# DetailView
url(r'^detail/obj/$',
views.ObjectDetail.as_view()),
url(r'^detail/artist/(?P<pk>[0-9]+)/$',
views.ArtistDetail.as_view(),
name="artist_detail"),
url(r'^detail/author/(?P<pk>[0-9]+)/$',
views.AuthorDetail.as_view(),
name="author_detail"),
url(r'^detail/author/bycustompk/(?P<foo>[0-9]+)/$',
views.AuthorDetail.as_view(pk_url_kwarg='foo')),
url(r'^detail/author/byslug/(?P<slug>[\w-]+)/$',
views.AuthorDetail.as_view()),
url(r'^detail/author/bycustomslug/(?P<foo>[\w-]+)/$',
views.AuthorDetail.as_view(slug_url_kwarg='foo')),
url(r'^detail/author/bypkignoreslug/(?P<pk>[0-9]+)-(?P<slug>[\w-]+)/$',
views.AuthorDetail.as_view()),
url(r'^detail/author/bypkandslug/(?P<pk>[0-9]+)-(?P<slug>[\w-]+)/$',
views.AuthorDetail.as_view(query_pk_and_slug=True)),
url(r'^detail/author/(?P<pk>[0-9]+)/template_name_suffix/$',
views.AuthorDetail.as_view(template_name_suffix='_view')),
url(r'^detail/author/(?P<pk>[0-9]+)/template_name/$',
views.AuthorDetail.as_view(template_name='generic_views/about.html')),
url(r'^detail/author/(?P<pk>[0-9]+)/context_object_name/$',
views.AuthorDetail.as_view(context_object_name='thingy')),
url(r'^detail/author/(?P<pk>[0-9]+)/dupe_context_object_name/$',
views.AuthorDetail.as_view(context_object_name='object')),
url(r'^detail/page/(?P<pk>[0-9]+)/field/$',
views.PageDetail.as_view()),
url(r'^detail/author/invalid/url/$',
views.AuthorDetail.as_view()),
url(r'^detail/author/invalid/qs/$',
views.AuthorDetail.as_view(queryset=None)),
url(r'^detail/nonmodel/1/$',
views.NonModelDetail.as_view()),
url(r'^detail/doesnotexist/(?P<pk>[0-9]+)/$',
views.ObjectDoesNotExistDetail.as_view()),
# FormView
url(r'^contact/$',
views.ContactView.as_view()),
# Create/UpdateView
url(r'^edit/artists/create/$',
views.ArtistCreate.as_view()),
url(r'^edit/artists/(?P<pk>[0-9]+)/update/$',
views.ArtistUpdate.as_view()),
url(r'^edit/authors/create/naive/$',
views.NaiveAuthorCreate.as_view()),
url(r'^edit/authors/create/redirect/$',
views.NaiveAuthorCreate.as_view(success_url='/edit/authors/create/')),
url(r'^edit/authors/create/interpolate_redirect/$',
views.NaiveAuthorCreate.as_view(success_url='/edit/author/{id}/update/')),
url(r'^edit/authors/create/interpolate_redirect_nonascii/$',
views.NaiveAuthorCreate.as_view(success_url='/%C3%A9dit/author/{id}/update/')),
url(r'^edit/authors/create/restricted/$',
views.AuthorCreateRestricted.as_view()),
url(r'^[eé]dit/authors/create/$',
views.AuthorCreate.as_view()),
url(r'^edit/authors/create/special/$',
views.SpecializedAuthorCreate.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/update/naive/$',
views.NaiveAuthorUpdate.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/update/redirect/$',
views.NaiveAuthorUpdate.as_view(success_url='/edit/authors/create/')),
url(r'^edit/author/(?P<pk>[0-9]+)/update/interpolate_redirect/$',
views.NaiveAuthorUpdate.as_view(success_url='/edit/author/{id}/update/')),
url(r'^edit/author/(?P<pk>[0-9]+)/update/interpolate_redirect_nonascii/$',
views.NaiveAuthorUpdate.as_view(success_url='/%C3%A9dit/author/{id}/update/')),
url(r'^[eé]dit/author/(?P<pk>[0-9]+)/update/$',
views.AuthorUpdate.as_view()),
url(r'^edit/author/update/$',
views.OneAuthorUpdate.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/update/special/$',
views.SpecializedAuthorUpdate.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/naive/$',
views.NaiveAuthorDelete.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/redirect/$',
views.NaiveAuthorDelete.as_view(success_url='/edit/authors/create/')),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/interpolate_redirect/$',
views.NaiveAuthorDelete.as_view(success_url='/edit/authors/create/?deleted={id}')),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/interpolate_redirect_nonascii/$',
views.NaiveAuthorDelete.as_view(success_url='/%C3%A9dit/authors/create/?deleted={id}')),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/$',
views.AuthorDelete.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/special/$',
views.SpecializedAuthorDelete.as_view()),
# ArchiveIndexView
url(r'^dates/books/$',
views.BookArchive.as_view()),
url(r'^dates/books/context_object_name/$',
views.BookArchive.as_view(context_object_name='thingies')),
url(r'^dates/books/allow_empty/$',
views.BookArchive.as_view(allow_empty=True)),
url(r'^dates/books/template_name/$',
views.BookArchive.as_view(template_name='generic_views/list.html')),
url(r'^dates/books/template_name_suffix/$',
views.BookArchive.as_view(template_name_suffix='_detail')),
url(r'^dates/books/invalid/$',
views.BookArchive.as_view(queryset=None)),
url(r'^dates/books/paginated/$',
views.BookArchive.as_view(paginate_by=10)),
url(r'^dates/books/reverse/$',
views.BookArchive.as_view(queryset=models.Book.objects.order_by('pubdate'))),
url(r'^dates/books/by_month/$',
views.BookArchive.as_view(date_list_period='month')),
url(r'^dates/booksignings/$',
views.BookSigningArchive.as_view()),
url(r'^dates/books/sortedbyname/$',
views.BookArchive.as_view(ordering='name')),
url(r'^dates/books/sortedbynamedec/$',
views.BookArchive.as_view(ordering='-name')),
# ListView
url(r'^list/dict/$',
views.DictList.as_view()),
url(r'^list/dict/paginated/$',
views.DictList.as_view(paginate_by=1)),
url(r'^list/artists/$',
views.ArtistList.as_view(),
name="artists_list"),
url(r'^list/authors/$',
views.AuthorList.as_view(),
name="authors_list"),
url(r'^list/authors/paginated/$',
views.AuthorList.as_view(paginate_by=30)),
url(r'^list/authors/paginated/(?P<page>[0-9]+)/$',
views.AuthorList.as_view(paginate_by=30)),
url(r'^list/authors/paginated-orphaned/$',
views.AuthorList.as_view(paginate_by=30, paginate_orphans=2)),
url(r'^list/authors/notempty/$',
views.AuthorList.as_view(allow_empty=False)),
url(r'^list/authors/notempty/paginated/$',
views.AuthorList.as_view(allow_empty=False, paginate_by=2)),
url(r'^list/authors/template_name/$',
views.AuthorList.as_view(template_name='generic_views/list.html')),
url(r'^list/authors/template_name_suffix/$',
views.AuthorList.as_view(template_name_suffix='_objects')),
url(r'^list/authors/context_object_name/$',
views.AuthorList.as_view(context_object_name='author_list')),
url(r'^list/authors/dupe_context_object_name/$',
views.AuthorList.as_view(context_object_name='object_list')),
url(r'^list/authors/invalid/$',
views.AuthorList.as_view(queryset=None)),
url(r'^list/authors/paginated/custom_class/$',
views.AuthorList.as_view(paginate_by=5, paginator_class=views.CustomPaginator)),
url(r'^list/authors/paginated/custom_page_kwarg/$',
views.AuthorList.as_view(paginate_by=30, page_kwarg='pagina')),
url(r'^list/authors/paginated/custom_constructor/$',
views.AuthorListCustomPaginator.as_view()),
url(r'^list/books/sorted/$',
views.BookList.as_view(ordering='name')),
url(r'^list/books/sortedbypagesandnamedec/$',
views.BookList.as_view(ordering=('pages', '-name'))),
# YearArchiveView
# Mixing keyword and positional captures below is intentional; the views
# ought to be able to accept either.
url(r'^dates/books/(?P<year>[0-9]{4})/$',
views.BookYearArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/make_object_list/$',
views.BookYearArchive.as_view(make_object_list=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/allow_empty/$',
views.BookYearArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/allow_future/$',
views.BookYearArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/paginated/$',
views.BookYearArchive.as_view(make_object_list=True, paginate_by=30)),
url(r'^dates/books/(?P<year>\d{4})/sortedbyname/$',
views.BookYearArchive.as_view(make_object_list=True, ordering='name')),
url(r'^dates/books/(?P<year>\d{4})/sortedbypageandnamedec/$',
views.BookYearArchive.as_view(make_object_list=True, ordering=('pages', '-name'))),
url(r'^dates/books/no_year/$',
views.BookYearArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/reverse/$',
views.BookYearArchive.as_view(queryset=models.Book.objects.order_by('pubdate'))),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/$',
views.BookSigningYearArchive.as_view()),
# MonthArchiveView
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/$',
views.BookMonthArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/$',
views.BookMonthArchive.as_view(month_format='%m')),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/allow_empty/$',
views.BookMonthArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/allow_future/$',
views.BookMonthArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/paginated/$',
views.BookMonthArchive.as_view(paginate_by=30)),
url(r'^dates/books/(?P<year>[0-9]{4})/no_month/$',
views.BookMonthArchive.as_view()),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/$',
views.BookSigningMonthArchive.as_view()),
# WeekArchiveView
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/$',
views.BookWeekArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/allow_empty/$',
views.BookWeekArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/allow_future/$',
views.BookWeekArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/paginated/$',
views.BookWeekArchive.as_view(paginate_by=30)),
url(r'^dates/books/(?P<year>[0-9]{4})/week/no_week/$',
views.BookWeekArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/monday/$',
views.BookWeekArchive.as_view(week_format='%W')),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/$',
views.BookSigningWeekArchive.as_view()),
# DayArchiveView
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/$',
views.BookDayArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/(?P<day>[0-9]{1,2})/$',
views.BookDayArchive.as_view(month_format='%m')),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/allow_empty/$',
views.BookDayArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/allow_future/$',
views.BookDayArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/allow_empty_and_future/$',
views.BookDayArchive.as_view(allow_empty=True, allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/paginated/$',
views.BookDayArchive.as_view(paginate_by=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/no_day/$',
views.BookDayArchive.as_view()),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/$',
views.BookSigningDayArchive.as_view()),
# TodayArchiveView
url(r'^dates/books/today/$',
views.BookTodayArchive.as_view()),
url(r'^dates/books/today/allow_empty/$',
views.BookTodayArchive.as_view(allow_empty=True)),
url(r'^dates/booksignings/today/$',
views.BookSigningTodayArchive.as_view()),
# DateDetailView
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/(?P<pk>[0-9]+)/$',
views.BookDetail.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/(?P<day>[0-9]{1,2})/(?P<pk>[0-9]+)/$',
views.BookDetail.as_view(month_format='%m')),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/(?P<pk>[0-9]+)/allow_future/$',
views.BookDetail.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/nopk/$',
views.BookDetail.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/byslug/(?P<slug>[\w-]+)/$',
views.BookDetail.as_view()),
url(
r'^dates/books/get_object_custom_queryset/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/'
r'(?P<pk>[0-9]+)/$',
views.BookDetailGetObjectCustomQueryset.as_view(),
),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/(?P<pk>[0-9]+)/$',
views.BookSigningDetail.as_view()),
# Useful for testing redirects
url(r'^accounts/login/$', auth_views.login)
]
| bsd-3-clause |
ecavalcanti/TodoList | plugins/ti.alloy/plugin.py | 229 | 4668 | import os, sys, subprocess, hashlib
import subprocess
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
>>> check_output(['/usr/bin/python', '--version'])
Python 2.6.2
"""
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
def compile(config):
paths = {}
binaries = ["alloy","node"]
for binary in binaries:
try:
# see if the environment variable is defined
paths[binary] = os.environ["ALLOY_" + ("NODE_" if binary == "node" else "") + "PATH"]
except KeyError as ex:
# next try PATH, and then our guess paths
if sys.platform == "darwin" or sys.platform.startswith('linux'):
userPath = os.environ["HOME"]
guessPaths = [
"/usr/local/bin/"+binary,
"/opt/local/bin/"+binary,
userPath+"/local/bin/"+binary,
"/opt/bin/"+binary,
"/usr/bin/"+binary,
"/usr/local/share/npm/bin/"+binary
]
try:
binaryPath = check_output(["which",binary], stderr=subprocess.STDOUT).strip()
print "[DEBUG] %s installed at '%s'" % (binary,binaryPath)
except:
print "[WARN] Couldn't find %s on your PATH:" % binary
print "[WARN] %s" % os.environ["PATH"]
print "[WARN]"
print "[WARN] Checking for %s in a few default locations:" % binary
for p in guessPaths:
sys.stdout.write("[WARN] %s -> " % p)
if os.path.exists(p):
binaryPath = p
print "FOUND"
break
else:
print "not found"
binaryPath = None
if binaryPath is None:
print "[ERROR] Couldn't find %s" % binary
sys.exit(1)
else:
paths[binary] = binaryPath
# no guesses on windows, just use the PATH
elif sys.platform == "win32":
paths["alloy"] = "alloy.cmd"
f = os.path.abspath(os.path.join(config['project_dir'], 'app'))
if os.path.exists(f):
print "[INFO] alloy app found at %s" % f
rd = os.path.abspath(os.path.join(config['project_dir'], 'Resources'))
devicefamily = 'none'
simtype = 'none'
version = '0'
deploytype = 'development'
if config['platform']==u'ios':
version = config['iphone_version']
devicefamily = config['devicefamily']
deploytype = config['deploytype']
if config['platform']==u'android':
builder = config['android_builder']
version = builder.tool_api_level
deploytype = config['deploy_type']
if config['platform']==u'mobileweb':
builder = config['mobileweb_builder']
deploytype = config['deploytype']
cfg = "platform=%s,version=%s,simtype=%s,devicefamily=%s,deploytype=%s," % (config['platform'],version,simtype,devicefamily,deploytype)
if sys.platform == "win32":
cmd = [paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
else:
cmd = [paths["node"], paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
print "[INFO] Executing Alloy compile:"
print "[INFO] %s" % " ".join(cmd)
try:
print check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
if hasattr(ex, 'output'):
print ex.output
print "[ERROR] Alloy compile failed"
retcode = 1
if hasattr(ex, 'returncode'):
retcode = ex.returncode
sys.exit(retcode)
except EnvironmentError as ex:
print "[ERROR] Unexpected error with Alloy compiler plugin: %s" % ex.strerror
sys.exit(2)
| apache-2.0 |
malishevg/edugraph | common/djangoapps/util/tests/test_submit_feedback.py | 46 | 14588 | """Tests for the Zendesk"""
from django.contrib.auth.models import AnonymousUser
from django.http import Http404
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from student.tests.factories import UserFactory
from util import views
from zendesk import ZendeskError
import json
import mock
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_FEEDBACK_SUBMISSION": True})
@override_settings(ZENDESK_URL="dummy", ZENDESK_USER="dummy", ZENDESK_API_KEY="dummy")
@mock.patch("util.views.dog_stats_api")
@mock.patch("util.views._ZendeskApi", autospec=True)
class SubmitFeedbackTest(TestCase):
def setUp(self):
"""Set up data for the test case"""
self._request_factory = RequestFactory()
self._anon_user = AnonymousUser()
self._auth_user = UserFactory.create(
email="test@edx.org",
username="test",
profile__name="Test User"
)
# This contains issue_type and course_id to ensure that tags are submitted correctly
self._anon_fields = {
"email": "test@edx.org",
"name": "Test User",
"subject": "a subject",
"details": "some details",
"issue_type": "test_issue",
"course_id": "test_course"
}
# This does not contain issue_type nor course_id to ensure that they are optional
self._auth_fields = {"subject": "a subject", "details": "some details"}
def _build_and_run_request(self, user, fields):
"""
Generate a request and invoke the view, returning the response.
The request will be a POST request from the given `user`, with the given
`fields` in the POST body.
"""
req = self._request_factory.post(
"/submit_feedback",
data=fields,
HTTP_REFERER="test_referer",
HTTP_USER_AGENT="test_user_agent",
REMOTE_ADDR="1.2.3.4",
SERVER_NAME="test_server"
)
req.user = user
return views.submit_feedback(req)
def _assert_bad_request(self, response, field, zendesk_mock_class, datadog_mock):
"""
Assert that the given `response` contains correct failure data.
It should have a 400 status code, and its content should be a JSON
object containing the specified `field` and an `error`.
"""
self.assertEqual(response.status_code, 400)
resp_json = json.loads(response.content)
self.assertTrue("field" in resp_json)
self.assertEqual(resp_json["field"], field)
self.assertTrue("error" in resp_json)
# There should be absolutely no interaction with Zendesk
self.assertFalse(zendesk_mock_class.return_value.mock_calls)
self.assertFalse(datadog_mock.mock_calls)
def _test_bad_request_omit_field(self, user, fields, omit_field, zendesk_mock_class, datadog_mock):
"""
Invoke the view with a request missing a field and assert correctness.
The request will be a POST request from the given `user`, with POST
fields taken from `fields` minus the entry specified by `omit_field`.
The response should have a 400 (bad request) status code and specify
the invalid field and an error message, and the Zendesk API should not
have been invoked.
"""
filtered_fields = {k: v for (k, v) in fields.items() if k != omit_field}
resp = self._build_and_run_request(user, filtered_fields)
self._assert_bad_request(resp, omit_field, zendesk_mock_class, datadog_mock)
def _test_bad_request_empty_field(self, user, fields, empty_field, zendesk_mock_class, datadog_mock):
"""
Invoke the view with an empty field and assert correctness.
The request will be a POST request from the given `user`, with POST
fields taken from `fields`, replacing the entry specified by
`empty_field` with the empty string. The response should have a 400
(bad request) status code and specify the invalid field and an error
message, and the Zendesk API should not have been invoked.
"""
altered_fields = fields.copy()
altered_fields[empty_field] = ""
resp = self._build_and_run_request(user, altered_fields)
self._assert_bad_request(resp, empty_field, zendesk_mock_class, datadog_mock)
def _test_success(self, user, fields):
"""
Generate a request, invoke the view, and assert success.
The request will be a POST request from the given `user`, with the given
`fields` in the POST body. The response should have a 200 (success)
status code.
"""
resp = self._build_and_run_request(user, fields)
self.assertEqual(resp.status_code, 200)
def _assert_datadog_called(self, datadog_mock, with_tags):
expected_datadog_calls = [
mock.call.increment(
views.DATADOG_FEEDBACK_METRIC,
tags=(["course_id:test_course", "issue_type:test_issue"] if with_tags else [])
)
]
self.assertEqual(datadog_mock.mock_calls, expected_datadog_calls)
def test_bad_request_anon_user_no_name(self, zendesk_mock_class, datadog_mock):
"""Test a request from an anonymous user not specifying `name`."""
self._test_bad_request_omit_field(self._anon_user, self._anon_fields, "name", zendesk_mock_class, datadog_mock)
self._test_bad_request_empty_field(self._anon_user, self._anon_fields, "name", zendesk_mock_class, datadog_mock)
def test_bad_request_anon_user_no_email(self, zendesk_mock_class, datadog_mock):
"""Test a request from an anonymous user not specifying `email`."""
self._test_bad_request_omit_field(self._anon_user, self._anon_fields, "email", zendesk_mock_class, datadog_mock)
self._test_bad_request_empty_field(self._anon_user, self._anon_fields, "email", zendesk_mock_class, datadog_mock)
def test_bad_request_anon_user_invalid_email(self, zendesk_mock_class, datadog_mock):
"""Test a request from an anonymous user specifying an invalid `email`."""
fields = self._anon_fields.copy()
fields["email"] = "This is not a valid email address!"
resp = self._build_and_run_request(self._anon_user, fields)
self._assert_bad_request(resp, "email", zendesk_mock_class, datadog_mock)
def test_bad_request_anon_user_no_subject(self, zendesk_mock_class, datadog_mock):
"""Test a request from an anonymous user not specifying `subject`."""
self._test_bad_request_omit_field(self._anon_user, self._anon_fields, "subject", zendesk_mock_class, datadog_mock)
self._test_bad_request_empty_field(self._anon_user, self._anon_fields, "subject", zendesk_mock_class, datadog_mock)
def test_bad_request_anon_user_no_details(self, zendesk_mock_class, datadog_mock):
"""Test a request from an anonymous user not specifying `details`."""
self._test_bad_request_omit_field(self._anon_user, self._anon_fields, "details", zendesk_mock_class, datadog_mock)
self._test_bad_request_empty_field(self._anon_user, self._anon_fields, "details", zendesk_mock_class, datadog_mock)
def test_valid_request_anon_user(self, zendesk_mock_class, datadog_mock):
"""
Test a valid request from an anonymous user.
The response should have a 200 (success) status code, and a ticket with
the given information should have been submitted via the Zendesk API.
"""
zendesk_mock_instance = zendesk_mock_class.return_value
zendesk_mock_instance.create_ticket.return_value = 42
self._test_success(self._anon_user, self._anon_fields)
expected_zendesk_calls = [
mock.call.create_ticket(
{
"ticket": {
"requester": {"name": "Test User", "email": "test@edx.org"},
"subject": "a subject",
"comment": {"body": "some details"},
"tags": ["test_course", "test_issue", "LMS"]
}
}
),
mock.call.update_ticket(
42,
{
"ticket": {
"comment": {
"public": False,
"body":
"Additional information:\n\n"
"Client IP: 1.2.3.4\n"
"Host: test_server\n"
"Page: test_referer\n"
"Browser: test_user_agent"
}
}
}
)
]
self.assertEqual(zendesk_mock_instance.mock_calls, expected_zendesk_calls)
self._assert_datadog_called(datadog_mock, with_tags=True)
def test_bad_request_auth_user_no_subject(self, zendesk_mock_class, datadog_mock):
"""Test a request from an authenticated user not specifying `subject`."""
self._test_bad_request_omit_field(self._auth_user, self._auth_fields, "subject", zendesk_mock_class, datadog_mock)
self._test_bad_request_empty_field(self._auth_user, self._auth_fields, "subject", zendesk_mock_class, datadog_mock)
def test_bad_request_auth_user_no_details(self, zendesk_mock_class, datadog_mock):
"""Test a request from an authenticated user not specifying `details`."""
self._test_bad_request_omit_field(self._auth_user, self._auth_fields, "details", zendesk_mock_class, datadog_mock)
self._test_bad_request_empty_field(self._auth_user, self._auth_fields, "details", zendesk_mock_class, datadog_mock)
def test_valid_request_auth_user(self, zendesk_mock_class, datadog_mock):
"""
Test a valid request from an authenticated user.
The response should have a 200 (success) status code, and a ticket with
the given information should have been submitted via the Zendesk API.
"""
zendesk_mock_instance = zendesk_mock_class.return_value
zendesk_mock_instance.create_ticket.return_value = 42
self._test_success(self._auth_user, self._auth_fields)
expected_zendesk_calls = [
mock.call.create_ticket(
{
"ticket": {
"requester": {"name": "Test User", "email": "test@edx.org"},
"subject": "a subject",
"comment": {"body": "some details"},
"tags": ["LMS"]
}
}
),
mock.call.update_ticket(
42,
{
"ticket": {
"comment": {
"public": False,
"body":
"Additional information:\n\n"
"username: test\n"
"Client IP: 1.2.3.4\n"
"Host: test_server\n"
"Page: test_referer\n"
"Browser: test_user_agent"
}
}
}
)
]
self.assertEqual(zendesk_mock_instance.mock_calls, expected_zendesk_calls)
self._assert_datadog_called(datadog_mock, with_tags=False)
def test_get_request(self, zendesk_mock_class, datadog_mock):
"""Test that a GET results in a 405 even with all required fields"""
req = self._request_factory.get("/submit_feedback", data=self._anon_fields)
req.user = self._anon_user
resp = views.submit_feedback(req)
self.assertEqual(resp.status_code, 405)
self.assertIn("Allow", resp)
self.assertEqual(resp["Allow"], "POST")
# There should be absolutely no interaction with Zendesk
self.assertFalse(zendesk_mock_class.mock_calls)
self.assertFalse(datadog_mock.mock_calls)
def test_zendesk_error_on_create(self, zendesk_mock_class, datadog_mock):
"""
Test Zendesk returning an error on ticket creation.
We should return a 500 error with no body
"""
err = ZendeskError(msg="", error_code=404)
zendesk_mock_instance = zendesk_mock_class.return_value
zendesk_mock_instance.create_ticket.side_effect = err
resp = self._build_and_run_request(self._anon_user, self._anon_fields)
self.assertEqual(resp.status_code, 500)
self.assertFalse(resp.content)
self._assert_datadog_called(datadog_mock, with_tags=True)
def test_zendesk_error_on_update(self, zendesk_mock_class, datadog_mock):
"""
Test for Zendesk returning an error on ticket update.
If Zendesk returns any error on ticket update, we return a 200 to the
browser because the update contains additional information that is not
necessary for the user to have submitted their feedback.
"""
err = ZendeskError(msg="", error_code=500)
zendesk_mock_instance = zendesk_mock_class.return_value
zendesk_mock_instance.update_ticket.side_effect = err
resp = self._build_and_run_request(self._anon_user, self._anon_fields)
self.assertEqual(resp.status_code, 200)
self._assert_datadog_called(datadog_mock, with_tags=True)
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_FEEDBACK_SUBMISSION": False})
def test_not_enabled(self, zendesk_mock_class, datadog_mock):
"""
Test for Zendesk submission not enabled in `settings`.
We should raise Http404.
"""
with self.assertRaises(Http404):
self._build_and_run_request(self._anon_user, self._anon_fields)
def test_zendesk_not_configured(self, zendesk_mock_class, datadog_mock):
"""
Test for Zendesk not fully configured in `settings`.
For each required configuration parameter, test that setting it to
`None` causes an otherwise valid request to return a 500 error.
"""
def test_case(missing_config):
with mock.patch(missing_config, None):
with self.assertRaises(Exception):
self._build_and_run_request(self._anon_user, self._anon_fields)
test_case("django.conf.settings.ZENDESK_URL")
test_case("django.conf.settings.ZENDESK_USER")
test_case("django.conf.settings.ZENDESK_API_KEY")
| agpl-3.0 |
jbassen/edx-platform | openedx/core/djangoapps/call_stack_manager/core.py | 40 | 5250 | """
Get call stacks of Model Class
in three cases-
1. QuerySet API
2. save()
3. delete()
classes:
CallStackManager - stores all stacks in global dictionary and logs
CallStackMixin - used for Model save(), and delete() method
Functions:
capture_call_stack - global function used to store call stack
Decorators:
donottrack - mainly for the places where we know the calls. This decorator will let us not to track in specified cases
How to use-
1. Import following in the file where class to be tracked resides
from openedx.core.djangoapps.call_stack_manager import CallStackManager, CallStackMixin
2. Override objects of default manager by writing following in any model class which you want to track-
objects = CallStackManager()
3. For tracking Save and Delete events-
Use mixin called "CallStackMixin"
For ex.
class StudentModule(CallStackMixin, models.Model):
4. Decorator is a parameterized decorator with class name/s as argument
How to use -
1. Import following
import from openedx.core.djangoapps.call_stack_manager import donottrack
"""
import logging
import traceback
import re
import collections
from django.db.models import Manager
log = logging.getLogger(__name__)
# list of regular expressions acting as filters
REGULAR_EXPS = [re.compile(x) for x in ['^.*python2.7.*$', '^.*<exec_function>.*$', '^.*exec_code_object.*$',
'^.*edxapp/src.*$', '^.*call_stack_manager.*$']]
# Variable which decides whether to track calls in the function or not. Do it by default.
TRACK_FLAG = True
# List keeping track of Model classes not be tracked for special cases
# usually cases where we know that the function is calling Model classes.
HALT_TRACKING = []
# Module Level variables
# dictionary which stores call stacks.
# { "ModelClasses" : [ListOfFrames]}
# Frames - ('FilePath','LineNumber','Context')
# ex. {"<class 'courseware.models.StudentModule'>" : [[(file,line number,context),(---,---,---)],
# [(file,line number,context),(---,---,---)]]}
STACK_BOOK = collections.defaultdict(list)
def capture_call_stack(current_model):
""" logs customised call stacks in global dictionary `STACK_BOOK`, and logs it.
Args:
current_model - Name of the model class
"""
# holds temporary callstack
# frame[0][6:-1] -> File name along with path
# frame[1][6:] -> Line Number
# frame[2][3:] -> Context
temp_call_stack = [(frame[0][6:-1],
frame[1][6:],
frame[2][3:])
for frame in [stack.replace("\n", "").strip().split(',') for stack in traceback.format_stack()]
if not any(reg.match(frame[0]) for reg in REGULAR_EXPS)]
# avoid duplication.
if temp_call_stack not in STACK_BOOK[current_model] and TRACK_FLAG \
and not issubclass(current_model, tuple(HALT_TRACKING)):
STACK_BOOK[current_model].append(temp_call_stack)
log.info("logging new call stack for %s:\n %s", current_model, temp_call_stack)
class CallStackMixin(object):
""" A mixin class for getting call stacks when Save() and Delete() methods are called
"""
def save(self, *args, **kwargs):
"""
Logs before save and overrides respective model API save()
"""
capture_call_stack(type(self))
return super(CallStackMixin, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
"""
Logs before delete and overrides respective model API delete()
"""
capture_call_stack(type(self))
return super(CallStackMixin, self).delete(*args, **kwargs)
class CallStackManager(Manager):
""" A Manager class which overrides the default Manager class for getting call stacks
"""
def get_query_set(self):
"""overriding the default queryset API method
"""
capture_call_stack(self.model)
return super(CallStackManager, self).get_query_set()
def donottrack(*classes_not_to_be_tracked):
"""function decorator which deals with toggling call stack
Args:
classes_not_to_be_tracked: model classes where tracking is undesirable
Returns:
wrapped function
"""
def real_donottrack(function):
"""takes function to be decorated and returns wrapped function
Args:
function - wrapped function i.e. real_donottrack
"""
def wrapper(*args, **kwargs):
""" wrapper function for decorated function
Returns:
wrapper function i.e. wrapper
"""
if len(classes_not_to_be_tracked) == 0:
global TRACK_FLAG # pylint: disable=W0603
current_flag = TRACK_FLAG
TRACK_FLAG = False
function(*args, **kwargs)
TRACK_FLAG = current_flag
else:
global HALT_TRACKING # pylint: disable=W0603
current_halt_track = HALT_TRACKING
HALT_TRACKING = classes_not_to_be_tracked
function(*args, **kwargs)
HALT_TRACKING = current_halt_track
return wrapper
return real_donottrack
| agpl-3.0 |
gauribhoite/personfinder | env/site-packages/docutils/writers/docutils_xml.py | 108 | 6981 | # $Id: docutils_xml.py 7497 2012-08-16 15:17:29Z milde $
# Author: David Goodger, Paul Tremblay, Guenter Milde
# Maintainer: docutils-develop@lists.sourceforge.net
# Copyright: This module has been placed in the public domain.
"""
Simple document tree Writer, writes Docutils XML according to
http://docutils.sourceforge.net/docs/ref/docutils.dtd.
"""
__docformat__ = 'reStructuredText'
import sys
# Work around broken PyXML and obsolete python stdlib behaviour. (The stdlib
# replaces its own xml module with PyXML if the latter is installed. However,
# PyXML is no longer maintained and partially incompatible/buggy.) Reverse
# the order in which xml module and submodules are searched to import stdlib
# modules if they exist and PyXML modules if they do not exist in the stdlib.
#
# See http://sourceforge.net/tracker/index.php?func=detail&aid=3552403&group_id=38414&atid=422030
# and http://lists.fedoraproject.org/pipermail/python-devel/2012-July/000406.html
import xml
if "_xmlplus" in xml.__path__[0]: # PyXML sub-module
xml.__path__.reverse() # If both are available, prefer stdlib over PyXML
import xml.sax.saxutils
from StringIO import StringIO
import docutils
from docutils import frontend, writers, nodes
class RawXmlError(docutils.ApplicationError): pass
class Writer(writers.Writer):
supported = ('xml',)
"""Formats this writer supports."""
settings_spec = (
'"Docutils XML" Writer Options',
None,
(('Generate XML with newlines before and after tags.',
['--newlines'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Generate XML with indents and newlines.',
['--indents'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Omit the XML declaration. Use with caution.',
['--no-xml-declaration'],
{'dest': 'xml_declaration', 'default': 1, 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Omit the DOCTYPE declaration.',
['--no-doctype'],
{'dest': 'doctype_declaration', 'default': 1,
'action': 'store_false', 'validator': frontend.validate_boolean}),))
settings_defaults = {'output_encoding_error_handler': 'xmlcharrefreplace'}
config_section = 'docutils_xml writer'
config_section_dependencies = ('writers',)
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = XMLTranslator
def translate(self):
self.visitor = visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
self.output = ''.join(visitor.output)
class XMLTranslator(nodes.GenericNodeVisitor):
xml_declaration = '<?xml version="1.0" encoding="%s"?>\n'
# TODO: add stylesheet options similar to HTML and LaTeX writers?
#xml_stylesheet = '<?xml-stylesheet type="text/xsl" href="%s"?>\n'
doctype = (
'<!DOCTYPE document PUBLIC'
' "+//IDN docutils.sourceforge.net//DTD Docutils Generic//EN//XML"'
' "http://docutils.sourceforge.net/docs/ref/docutils.dtd">\n')
generator = '<!-- Generated by Docutils %s -->\n'
xmlparser = xml.sax.make_parser()
"""SAX parser instance to check/exctract raw XML."""
xmlparser.setFeature(
"http://xml.org/sax/features/external-general-entities", True)
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
# Reporter
self.warn = self.document.reporter.warning
self.error = self.document.reporter.error
# Settings
self.settings = settings = document.settings
self.indent = self.newline = ''
if settings.newlines:
self.newline = '\n'
if settings.indents:
self.newline = '\n'
self.indent = ' '
self.level = 0 # indentation level
self.in_simple = 0 # level of nesting inside mixed-content elements
# Output
self.output = []
if settings.xml_declaration:
self.output.append(
self.xml_declaration % settings.output_encoding)
if settings.doctype_declaration:
self.output.append(self.doctype)
self.output.append(self.generator % docutils.__version__)
# initialize XML parser
self.the_handle=TestXml()
self.xmlparser.setContentHandler(self.the_handle)
# generic visit and depart methods
# --------------------------------
def default_visit(self, node):
"""Default node visit method."""
if not self.in_simple:
self.output.append(self.indent*self.level)
self.output.append(node.starttag(xml.sax.saxutils.quoteattr))
self.level += 1
if isinstance(node, nodes.TextElement):
self.in_simple += 1
if not self.in_simple:
self.output.append(self.newline)
def default_departure(self, node):
"""Default node depart method."""
self.level -= 1
if not self.in_simple:
self.output.append(self.indent*self.level)
self.output.append(node.endtag())
if isinstance(node, nodes.TextElement):
self.in_simple -= 1
if not self.in_simple:
self.output.append(self.newline)
# specific visit and depart methods
# ---------------------------------
def visit_Text(self, node):
text = xml.sax.saxutils.escape(node.astext())
self.output.append(text)
def depart_Text(self, node):
pass
def visit_raw(self, node):
if 'xml' not in node.get('format', '').split():
# skip other raw content?
# raise nodes.SkipNode
self.default_visit(node)
return
# wrap in <raw> element
self.default_visit(node) # or not?
xml_string = node.astext()
self.output.append(xml_string)
self.default_departure(node) # or not?
# Check validity of raw XML:
if isinstance(xml_string, unicode) and sys.version_info < (3,):
xml_string = xml_string.encode('utf8')
try:
self.xmlparser.parse(StringIO(xml_string))
except xml.sax._exceptions.SAXParseException, error:
col_num = self.the_handle.locator.getColumnNumber()
line_num = self.the_handle.locator.getLineNumber()
srcline = node.line
if not isinstance(node.parent, nodes.TextElement):
srcline += 2 # directive content start line
msg = 'Invalid raw XML in column %d, line offset %d:\n%s' % (
col_num, line_num, node.astext())
self.warn(msg, source=node.source, line=srcline+line_num-1)
raise nodes.SkipNode # content already processed
class TestXml(xml.sax.ContentHandler):
def setDocumentLocator(self, locator):
self.locator = locator
| apache-2.0 |
Pablo126/SSBW | Tarea4/tarea4/lib/python3.5/site-packages/pkg_resources/_vendor/six.py | 2715 | 30098 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.10.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| gpl-3.0 |
mahak/keystone | keystone/limit/backends/base.py | 2 | 5766 | # Copyright 2017 SUSE Linux Gmbh
# Copyright 2017 Huawei
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import keystone.conf
from keystone import exception
CONF = keystone.conf.CONF
class UnifiedLimitDriverBase(object, metaclass=abc.ABCMeta):
def _get_list_limit(self):
return CONF.unified_limit.list_limit or CONF.list_limit
@abc.abstractmethod
def create_registered_limits(self, registered_limits):
"""Create new registered limits.
:param registered_limits: a list of dictionaries representing limits to
create.
:returns: all the newly created registered limits.
:raises keystone.exception.Conflict: If a duplicate registered limit
exists.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def update_registered_limit(self, registered_limit_id, registered_limit):
"""Update existing registered limits.
:param registered_limit_id: the id of the registered limit.
:param registered_limit: a dict containing the registered limit
attributes to update.
:returns: the updated registered limit.
:raises keystone.exception.RegisteredLimitNotFound: If registered limit
doesn't exist.
:raises keystone.exception.Conflict: If update to a duplicate
registered limit.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_registered_limits(self, hints):
"""List all registered limits.
:param hints: contains the list of filters yet to be satisfied.
Any filters satisfied here will be removed so that
the caller will know if any filters remain.
:returns: a list of dictionaries or an empty registered limit.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_registered_limit(self, registered_limit_id):
"""Get a registered limit.
:param registered_limit_id: the registered limit id to get.
:returns: a dictionary representing a registered limit reference.
:raises keystone.exception.RegisteredLimitNotFound: If registered limit
doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_registered_limit(self, registered_limit_id):
"""Delete an existing registered limit.
:param registered_limit_id: the registered limit id to delete.
:raises keystone.exception.RegisteredLimitNotFound: If registered limit
doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def create_limits(self, limits):
"""Create new limits.
:param limits: a list of dictionaries representing limits to create.
:returns: all the newly created limits.
:raises keystone.exception.Conflict: If a duplicate limit exists.
:raises keystone.exception.NoLimitReference: If no reference registered
limit exists.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def update_limit(self, limit_id, limit):
"""Update existing limits.
:param limit_id: the id of the limit.
:param limit: a dict containing the limit attributes to update.
:returns: the updated limit.
:raises keystone.exception.LimitNotFound: If limit doesn't
exist.
:raises keystone.exception.Conflict: If update to a duplicate limit.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_limits(self, hints):
"""List all limits.
:param hints: contains the list of filters yet to be satisfied.
Any filters satisfied here will be removed so that
the caller will know if any filters remain.
:returns: a list of dictionaries or an empty list.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_limit(self, limit_id):
"""Get a limit.
:param limit_id: the limit id to get.
:returns: a dictionary representing a limit reference.
:raises keystone.exception.LimitNotFound: If limit doesn't
exist.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_limit(self, limit_id):
"""Delete an existing limit.
:param limit_id: the limit id to delete.
:raises keystone.exception.LimitNotFound: If limit doesn't
exist.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_limits_for_project(self, project_id):
"""Delete the existing limits which belong to the specified project.
:param project_id: the limits' project id.
:returns: a dictionary representing the deleted limits id. Used for
cache invalidating.
"""
raise exception.NotImplemented() # pragma: no cover
| apache-2.0 |
sharbison3/python-docs-samples | bigtable/autoscaler/autoscaler_test.py | 1 | 2682 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit and system tests for autoscaler.py"""
import os
import time
from google.cloud import bigtable
from mock import patch
from autoscaler import get_cpu_load
from autoscaler import main
from autoscaler import scale_bigtable
# tests assume instance and cluster have the same ID
BIGTABLE_INSTANCE = os.environ['BIGTABLE_CLUSTER']
# System tests to verify API calls succeed
def test_get_cpu_load():
assert get_cpu_load() > 0.0
def test_scale_bigtable():
bigtable_client = bigtable.Client(admin=True)
instance = bigtable_client.instance(BIGTABLE_INSTANCE)
instance.reload()
cluster = instance.cluster(BIGTABLE_INSTANCE)
cluster.reload()
original_node_count = cluster.serve_nodes
scale_bigtable(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, True)
time.sleep(3)
cluster.reload()
new_node_count = cluster.serve_nodes
assert (new_node_count == (original_node_count + 2))
scale_bigtable(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, False)
time.sleep(3)
cluster.reload()
final_node_count = cluster.serve_nodes
assert final_node_count == original_node_count
# Unit test for logic
@patch('time.sleep')
@patch('autoscaler.get_cpu_load')
@patch('autoscaler.scale_bigtable')
def test_main(scale_bigtable, get_cpu_load, sleep):
SHORT_SLEEP = 5
LONG_SLEEP = 10
get_cpu_load.return_value = 0.5
main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, SHORT_SLEEP,
LONG_SLEEP)
scale_bigtable.assert_not_called()
scale_bigtable.reset_mock()
get_cpu_load.return_value = 0.7
main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, SHORT_SLEEP,
LONG_SLEEP)
scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE,
BIGTABLE_INSTANCE, True)
scale_bigtable.reset_mock()
get_cpu_load.return_value = 0.2
main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, SHORT_SLEEP,
LONG_SLEEP)
scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE,
BIGTABLE_INSTANCE, False)
scale_bigtable.reset_mock()
| apache-2.0 |
nelmiux/CarnotKE | jyhton/lib-python/2.7/compiler/syntax.py | 369 | 1444 | """Check for errs in the AST.
The Python parser does not catch all syntax errors. Others, like
assignments with invalid targets, are caught in the code generation
phase.
The compiler package catches some errors in the transformer module.
But it seems clearer to write checkers that use the AST to detect
errors.
"""
from compiler import ast, walk
def check(tree, multi=None):
v = SyntaxErrorChecker(multi)
walk(tree, v)
return v.errors
class SyntaxErrorChecker:
"""A visitor to find syntax errors in the AST."""
def __init__(self, multi=None):
"""Create new visitor object.
If optional argument multi is not None, then print messages
for each error rather than raising a SyntaxError for the
first.
"""
self.multi = multi
self.errors = 0
def error(self, node, msg):
self.errors = self.errors + 1
if self.multi is not None:
print "%s:%s: %s" % (node.filename, node.lineno, msg)
else:
raise SyntaxError, "%s (%s:%s)" % (msg, node.filename, node.lineno)
def visitAssign(self, node):
# the transformer module handles many of these
pass
## for target in node.nodes:
## if isinstance(target, ast.AssList):
## if target.lineno is None:
## target.lineno = node.lineno
## self.error(target, "can't assign to list comprehension")
| apache-2.0 |
infinyte/oppia | extensions/rules/set_of_unicode_string.py | 20 | 2309 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules for SetOfUnicodeString objects."""
__author__ = 'Sean Lip'
from extensions.rules import base
class Equals(base.SetOfUnicodeStringRule):
description = 'is equal to {{x|SetOfUnicodeString}}'
is_generic = False
def _evaluate(self, subject):
return set(subject) == set(self.x)
class IsSubsetOf(base.SetOfUnicodeStringRule):
description = 'is a proper subset of {{x|SetOfUnicodeString}}'
is_generic = False
def _evaluate(self, subject):
return set(subject) < set(self.x)
class IsSupersetOf(base.SetOfUnicodeStringRule):
description = 'is a proper superset of {{x|SetOfUnicodeString}}'
is_generic = True
def _evaluate(self, subject):
return set(subject) > set(self.x)
class HasElementsIn(base.SetOfUnicodeStringRule):
description = 'has elements in common with {{x|SetOfUnicodeString}}'
is_generic = True
def _evaluate(self, subject):
return bool(set(subject).intersection(set(self.x)))
class HasElementsNotIn(base.SetOfUnicodeStringRule):
description = 'has elements not in {{x|SetOfUnicodeString}}'
is_generic = True
def _evaluate(self, subject):
return bool(set(subject) - set(self.x))
class OmitsElementsIn(base.SetOfUnicodeStringRule):
description = 'omits some elements of {{x|SetOfUnicodeString}}'
is_generic = False
def _evaluate(self, subject):
return bool(set(self.x) - set(subject))
class IsDisjointFrom(base.SetOfUnicodeStringRule):
description = 'has no elements in common with {{x|SetOfUnicodeString}}'
is_generic = True
def _evaluate(self, subject):
return not bool(set(subject).intersection(set(self.x)))
| apache-2.0 |
HesselTjeerdsma/Cyber-Physical-Pacman-Game | Algor/flask/lib/python2.7/site-packages/unidecode/x0a1.py | 253 | 4469 | data = (
'dit', # 0x00
'dix', # 0x01
'di', # 0x02
'dip', # 0x03
'diex', # 0x04
'die', # 0x05
'diep', # 0x06
'dat', # 0x07
'dax', # 0x08
'da', # 0x09
'dap', # 0x0a
'duox', # 0x0b
'duo', # 0x0c
'dot', # 0x0d
'dox', # 0x0e
'do', # 0x0f
'dop', # 0x10
'dex', # 0x11
'de', # 0x12
'dep', # 0x13
'dut', # 0x14
'dux', # 0x15
'du', # 0x16
'dup', # 0x17
'durx', # 0x18
'dur', # 0x19
'tit', # 0x1a
'tix', # 0x1b
'ti', # 0x1c
'tip', # 0x1d
'tiex', # 0x1e
'tie', # 0x1f
'tiep', # 0x20
'tat', # 0x21
'tax', # 0x22
'ta', # 0x23
'tap', # 0x24
'tuot', # 0x25
'tuox', # 0x26
'tuo', # 0x27
'tuop', # 0x28
'tot', # 0x29
'tox', # 0x2a
'to', # 0x2b
'top', # 0x2c
'tex', # 0x2d
'te', # 0x2e
'tep', # 0x2f
'tut', # 0x30
'tux', # 0x31
'tu', # 0x32
'tup', # 0x33
'turx', # 0x34
'tur', # 0x35
'ddit', # 0x36
'ddix', # 0x37
'ddi', # 0x38
'ddip', # 0x39
'ddiex', # 0x3a
'ddie', # 0x3b
'ddiep', # 0x3c
'ddat', # 0x3d
'ddax', # 0x3e
'dda', # 0x3f
'ddap', # 0x40
'dduox', # 0x41
'dduo', # 0x42
'dduop', # 0x43
'ddot', # 0x44
'ddox', # 0x45
'ddo', # 0x46
'ddop', # 0x47
'ddex', # 0x48
'dde', # 0x49
'ddep', # 0x4a
'ddut', # 0x4b
'ddux', # 0x4c
'ddu', # 0x4d
'ddup', # 0x4e
'ddurx', # 0x4f
'ddur', # 0x50
'ndit', # 0x51
'ndix', # 0x52
'ndi', # 0x53
'ndip', # 0x54
'ndiex', # 0x55
'ndie', # 0x56
'ndat', # 0x57
'ndax', # 0x58
'nda', # 0x59
'ndap', # 0x5a
'ndot', # 0x5b
'ndox', # 0x5c
'ndo', # 0x5d
'ndop', # 0x5e
'ndex', # 0x5f
'nde', # 0x60
'ndep', # 0x61
'ndut', # 0x62
'ndux', # 0x63
'ndu', # 0x64
'ndup', # 0x65
'ndurx', # 0x66
'ndur', # 0x67
'hnit', # 0x68
'hnix', # 0x69
'hni', # 0x6a
'hnip', # 0x6b
'hniet', # 0x6c
'hniex', # 0x6d
'hnie', # 0x6e
'hniep', # 0x6f
'hnat', # 0x70
'hnax', # 0x71
'hna', # 0x72
'hnap', # 0x73
'hnuox', # 0x74
'hnuo', # 0x75
'hnot', # 0x76
'hnox', # 0x77
'hnop', # 0x78
'hnex', # 0x79
'hne', # 0x7a
'hnep', # 0x7b
'hnut', # 0x7c
'nit', # 0x7d
'nix', # 0x7e
'ni', # 0x7f
'nip', # 0x80
'niex', # 0x81
'nie', # 0x82
'niep', # 0x83
'nax', # 0x84
'na', # 0x85
'nap', # 0x86
'nuox', # 0x87
'nuo', # 0x88
'nuop', # 0x89
'not', # 0x8a
'nox', # 0x8b
'no', # 0x8c
'nop', # 0x8d
'nex', # 0x8e
'ne', # 0x8f
'nep', # 0x90
'nut', # 0x91
'nux', # 0x92
'nu', # 0x93
'nup', # 0x94
'nurx', # 0x95
'nur', # 0x96
'hlit', # 0x97
'hlix', # 0x98
'hli', # 0x99
'hlip', # 0x9a
'hliex', # 0x9b
'hlie', # 0x9c
'hliep', # 0x9d
'hlat', # 0x9e
'hlax', # 0x9f
'hla', # 0xa0
'hlap', # 0xa1
'hluox', # 0xa2
'hluo', # 0xa3
'hluop', # 0xa4
'hlox', # 0xa5
'hlo', # 0xa6
'hlop', # 0xa7
'hlex', # 0xa8
'hle', # 0xa9
'hlep', # 0xaa
'hlut', # 0xab
'hlux', # 0xac
'hlu', # 0xad
'hlup', # 0xae
'hlurx', # 0xaf
'hlur', # 0xb0
'hlyt', # 0xb1
'hlyx', # 0xb2
'hly', # 0xb3
'hlyp', # 0xb4
'hlyrx', # 0xb5
'hlyr', # 0xb6
'lit', # 0xb7
'lix', # 0xb8
'li', # 0xb9
'lip', # 0xba
'liet', # 0xbb
'liex', # 0xbc
'lie', # 0xbd
'liep', # 0xbe
'lat', # 0xbf
'lax', # 0xc0
'la', # 0xc1
'lap', # 0xc2
'luot', # 0xc3
'luox', # 0xc4
'luo', # 0xc5
'luop', # 0xc6
'lot', # 0xc7
'lox', # 0xc8
'lo', # 0xc9
'lop', # 0xca
'lex', # 0xcb
'le', # 0xcc
'lep', # 0xcd
'lut', # 0xce
'lux', # 0xcf
'lu', # 0xd0
'lup', # 0xd1
'lurx', # 0xd2
'lur', # 0xd3
'lyt', # 0xd4
'lyx', # 0xd5
'ly', # 0xd6
'lyp', # 0xd7
'lyrx', # 0xd8
'lyr', # 0xd9
'git', # 0xda
'gix', # 0xdb
'gi', # 0xdc
'gip', # 0xdd
'giet', # 0xde
'giex', # 0xdf
'gie', # 0xe0
'giep', # 0xe1
'gat', # 0xe2
'gax', # 0xe3
'ga', # 0xe4
'gap', # 0xe5
'guot', # 0xe6
'guox', # 0xe7
'guo', # 0xe8
'guop', # 0xe9
'got', # 0xea
'gox', # 0xeb
'go', # 0xec
'gop', # 0xed
'get', # 0xee
'gex', # 0xef
'ge', # 0xf0
'gep', # 0xf1
'gut', # 0xf2
'gux', # 0xf3
'gu', # 0xf4
'gup', # 0xf5
'gurx', # 0xf6
'gur', # 0xf7
'kit', # 0xf8
'kix', # 0xf9
'ki', # 0xfa
'kip', # 0xfb
'kiex', # 0xfc
'kie', # 0xfd
'kiep', # 0xfe
'kat', # 0xff
)
| apache-2.0 |
pjg101/SickRage | lib/babelfish/exceptions.py | 88 | 2332 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2013 the BabelFish authors. All rights reserved.
# Use of this source code is governed by the 3-clause BSD license
# that can be found in the LICENSE file.
#
from __future__ import unicode_literals
class Error(Exception):
"""Base class for all exceptions in babelfish"""
pass
class LanguageError(Error, AttributeError):
"""Base class for all language exceptions in babelfish"""
pass
class LanguageConvertError(LanguageError):
"""Exception raised by converters when :meth:`~babelfish.converters.LanguageConverter.convert` fails
:param string alpha3: alpha3 code that failed conversion
:param country: country code that failed conversion, if any
:type country: string or None
:param script: script code that failed conversion, if any
:type script: string or None
"""
def __init__(self, alpha3, country=None, script=None):
self.alpha3 = alpha3
self.country = country
self.script = script
def __str__(self):
s = self.alpha3
if self.country is not None:
s += '-' + self.country
if self.script is not None:
s += '-' + self.script
return s
class LanguageReverseError(LanguageError):
"""Exception raised by converters when :meth:`~babelfish.converters.LanguageReverseConverter.reverse` fails
:param string code: code that failed reverse conversion
"""
def __init__(self, code):
self.code = code
def __str__(self):
return repr(self.code)
class CountryError(Error, AttributeError):
"""Base class for all country exceptions in babelfish"""
pass
class CountryConvertError(CountryError):
"""Exception raised by converters when :meth:`~babelfish.converters.CountryConverter.convert` fails
:param string alpha2: alpha2 code that failed conversion
"""
def __init__(self, alpha2):
self.alpha2 = alpha2
def __str__(self):
return self.alpha2
class CountryReverseError(CountryError):
"""Exception raised by converters when :meth:`~babelfish.converters.CountryReverseConverter.reverse` fails
:param string code: code that failed reverse conversion
"""
def __init__(self, code):
self.code = code
def __str__(self):
return repr(self.code)
| gpl-3.0 |
sidartaoliveira/ansible | lib/ansible/modules/cloud/pubnub/pubnub_blocks.py | 42 | 24312 | #!/usr/bin/python
#
# PubNub Real-time Cloud-Hosted Push API and Push Notification Client
# Frameworks
# Copyright (C) 2016 PubNub Inc.
# http://www.pubnub.com/
# http://www.pubnub.com/terms
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pubnub_blocks
version_added: '2.2'
short_description: PubNub blocks management module.
description:
- "This module allows Ansible to interface with the PubNub BLOCKS
infrastructure by providing the following operations: create / remove,
start / stop and rename for blocks and create / modify / remove for event
handlers"
author:
- PubNub <support@pubnub.com> (@pubnub)
- Sergey Mamontov <sergey@pubnub.com> (@parfeon)
requirements:
- "python >= 2.7"
- "pubnub_blocks_client >= 1.0"
options:
email:
description:
- Email from account for which new session should be started.
- "Not required if C(cache) contains result of previous module call (in
same play)."
required: false
password:
description:
- Password which match to account to which specified C(email) belong.
- "Not required if C(cache) contains result of previous module call (in
same play)."
required: false
cache:
description: >
In case if single play use blocks management module few times it is
preferred to enabled 'caching' by making previous module to share
gathered artifacts and pass them to this parameter.
required: false
default: {}
account:
description:
- "Name of PubNub account for from which C(application) will be used to
manage blocks."
- "User\'s account will be used if value not set or empty."
required: false
version_added: '2.4'
application:
description:
- "Name of target PubNub application for which blocks configuration on
specific C(keyset) will be done."
required: true
keyset:
description:
- Name of application's keys set which is bound to managed blocks.
required: true
state:
description:
- "Intended block state after event handlers creation / update process
will be completed."
required: false
default: 'started'
choices: ['started', 'stopped', 'present', 'absent']
name:
description:
- Name of managed block which will be later visible on admin.pubnub.com.
required: true
description:
description:
- "Short block description which will be later visible on
admin.pubnub.com. Used only if block doesn\'t exists and won\'t change
description for existing block."
required: false
default: 'New block'
event_handlers:
description:
- "List of event handlers which should be updated for specified block
C(name)."
- "Each entry for new event handler should contain: C(name), C(src),
C(channels), C(event). C(name) used as event handler name which can be
used later to make changes to it."
- C(src) is full path to file with event handler code.
- "C(channels) is name of channel from which event handler is waiting
for events."
- "C(event) is type of event which is able to trigger event handler:
I(js-before-publish), I(js-after-publish), I(js-after-presence)."
- "Each entry for existing handlers should contain C(name) (so target
handler can be identified). Rest parameters (C(src), C(channels) and
C(event)) can be added if changes required for them."
- "It is possible to rename event handler by adding C(changes) key to
event handler payload and pass dictionary, which will contain single key
C(name), where new name should be passed."
- "To remove particular event handler it is possible to set C(state) for
it to C(absent) and it will be removed."
required: false
default: []
changes:
description:
- "List of fields which should be changed by block itself (doesn't
affect any event handlers)."
- "Possible options for change is: C(name)."
required: false
default: {}
validate_certs:
description:
- "This key allow to try skip certificates check when performing REST API
calls. Sometimes host may have issues with certificates on it and this
will cause problems to call PubNub REST API."
- If check should be ignored C(False) should be passed to this parameter.
required: false
default: true
'''
EXAMPLES = '''
# Event handler create example.
- name: Create single event handler
pubnub_blocks:
email: '{{ email }}'
password: '{{ password }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
event_handlers:
-
src: '{{ path_to_handler_source }}'
name: '{{ handler_name }}'
event: 'js-before-publish'
channels: '{{ handler_channel }}'
# Change event handler trigger event type.
- name: Change event handler 'event'
pubnub_blocks:
email: '{{ email }}'
password: '{{ password }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
event_handlers:
-
name: '{{ handler_name }}'
event: 'js-after-publish'
# Stop block and event handlers.
- name: Stopping block
pubnub_blocks:
email: '{{ email }}'
password: '{{ password }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
state: stop
# Multiple module calls with cached result passing
- name: Create '{{ block_name }}' block
register: module_cache
pubnub_blocks:
email: '{{ email }}'
password: '{{ password }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
state: present
- name: Add '{{ event_handler_1_name }}' handler to '{{ block_name }}'
register: module_cache
pubnub_blocks:
cache: '{{ module_cache }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
state: present
event_handlers:
-
src: '{{ path_to_handler_1_source }}'
name: '{{ event_handler_1_name }}'
channels: '{{ event_handler_1_channel }}'
event: 'js-before-publish'
- name: Add '{{ event_handler_2_name }}' handler to '{{ block_name }}'
register: module_cache
pubnub_blocks:
cache: '{{ module_cache }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
state: present
event_handlers:
-
src: '{{ path_to_handler_2_source }}'
name: '{{ event_handler_2_name }}'
channels: '{{ event_handler_2_channel }}'
event: 'js-before-publish'
- name: Start '{{ block_name }}' block
register: module_cache
pubnub_blocks:
cache: '{{ module_cache }}'
application: '{{ app_name }}'
keyset: '{{ keyset_name }}'
name: '{{ block_name }}'
state: started
'''
RETURN = '''
module_cache:
description: "Cached account information. In case if with single play module
used few times it is better to pass cached data to next module calls to speed
up process."
type: dict
returned: always
'''
import copy
import os
# Import module snippets
from ansible.module_utils.basic import AnsibleModule
# noinspection PyProtectedMember
from ansible.module_utils._text import *
try:
# Import PubNub BLOCKS client.
from pubnub_blocks_client import User, Account, Owner, Application, Keyset
from pubnub_blocks_client import Block, EventHandler
import pubnub_blocks_client.exceptions as exceptions
HAS_PUBNUB_BLOCKS_CLIENT = True
except ImportError:
HAS_PUBNUB_BLOCKS_CLIENT = False
User = None
Account = None
Owner = None
Application = None
Keyset = None
Block = None
EventHandler = None
exceptions = None
def pubnub_user(module):
"""Create and configure user model if it possible.
:type module: AnsibleModule
:param module: Reference on module which contain module launch
information and status report methods.
:rtype: User
:return: Reference on initialized and ready to use user or 'None' in
case if not all required information has been passed to block.
"""
user = None
params = module.params
if params.get('cache') and params['cache'].get('module_cache'):
cache = params['cache']['module_cache']
user = User()
user.restore(cache=copy.deepcopy(cache['pnm_user']))
elif params.get('email') and params.get('password'):
user = User(email=params.get('email'), password=params.get('password'))
else:
err_msg = 'It looks like not account credentials has been passed or ' \
'\'cache\' field doesn\'t have result of previous module ' \
'call.'
module.fail_json(msg='Missing account credentials.',
description=err_msg, changed=False)
return user
def pubnub_account(module, user):
"""Create and configure account if it is possible.
:type module: AnsibleModule
:param module: Reference on module which contain module launch
information and status report methods.
:type user: User
:param user: Reference on authorized user for which one of accounts
should be used during manipulations with block.
:rtype: Account
:return: Reference on initialized and ready to use account or 'None' in
case if not all required information has been passed to block.
"""
params = module.params
if params.get('account'):
account_name = params.get('account')
account = user.account(name=params.get('account'))
if account is None:
err_frmt = 'It looks like there is no \'{0}\' account for ' \
'authorized user. Please make sure what correct ' \
'name has been passed during module configuration.'
module.fail_json(msg='Missing account.',
description=err_frmt.format(account_name),
changed=False)
else:
account = user.accounts()[0]
return account
def pubnub_application(module, account):
"""Retrieve reference on target application from account model.
NOTE: In case if account authorization will fail or there is no
application with specified name, module will exit with error.
:type module: AnsibleModule
:param module: Reference on module which contain module launch
information and status report methods.
:type account: Account
:param account: Reference on PubNub account model from which reference
on application should be fetched.
:rtype: Application
:return: Reference on initialized and ready to use application model.
"""
application = None
params = module.params
try:
application = account.application(params['application'])
except (exceptions.AccountError, exceptions.GeneralPubNubError) as exc:
exc_msg = _failure_title_from_exception(exc)
exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
module.fail_json(msg=exc_msg, description=exc_descr,
changed=account.changed,
module_cache=dict(account))
if application is None:
err_fmt = 'There is no \'{0}\' application for {1}. Make sure what ' \
'correct application name has been passed. If application ' \
'doesn\'t exist you can create it on admin.pubnub.com.'
email = account.owner.email
module.fail_json(msg=err_fmt.format(params['application'], email),
changed=account.changed, module_cache=dict(account))
return application
def pubnub_keyset(module, account, application):
"""Retrieve reference on target keyset from application model.
NOTE: In case if there is no keyset with specified name, module will
exit with error.
:type module: AnsibleModule
:param module: Reference on module which contain module launch
information and status report methods.
:type account: Account
:param account: Reference on PubNub account model which will be
used in case of error to export cached data.
:type application: Application
:param application: Reference on PubNub application model from which
reference on keyset should be fetched.
:rtype: Keyset
:return: Reference on initialized and ready to use keyset model.
"""
params = module.params
keyset = application.keyset(params['keyset'])
if keyset is None:
err_fmt = 'There is no \'{0}\' keyset for \'{1}\' application. Make ' \
'sure what correct keyset name has been passed. If keyset ' \
'doesn\'t exist you can create it on admin.pubnub.com.'
module.fail_json(msg=err_fmt.format(params['keyset'],
application.name),
changed=account.changed, module_cache=dict(account))
return keyset
def pubnub_block(module, account, keyset):
"""Retrieve reference on target keyset from application model.
NOTE: In case if there is no block with specified name and module
configured to start/stop it, module will exit with error.
:type module: AnsibleModule
:param module: Reference on module which contain module launch
information and status report methods.
:type account: Account
:param account: Reference on PubNub account model which will be used in
case of error to export cached data.
:type keyset: Keyset
:param keyset: Reference on keyset model from which reference on block
should be fetched.
:rtype: Block
:return: Reference on initialized and ready to use keyset model.
"""
block = None
params = module.params
try:
block = keyset.block(params['name'])
except (exceptions.KeysetError, exceptions.GeneralPubNubError) as exc:
exc_msg = _failure_title_from_exception(exc)
exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
module.fail_json(msg=exc_msg, description=exc_descr,
changed=account.changed, module_cache=dict(account))
# Report error because block doesn't exists and at the same time
# requested to start/stop.
if block is None and params['state'] in ['started', 'stopped']:
block_name = params.get('name')
module.fail_json(msg="'{0}' block doesn't exists.".format(block_name),
changed=account.changed, module_cache=dict(account))
if block is None and params['state'] == 'present':
block = Block(name=params.get('name'),
description=params.get('description'))
keyset.add_block(block)
if block:
# Update block information if required.
if params.get('changes') and params['changes'].get('name'):
block.name = params['changes']['name']
if params.get('description'):
block.description = params.get('description')
return block
def pubnub_event_handler(block, data):
"""Retrieve reference on target event handler from application model.
:type block: Block
:param block: Reference on block model from which reference on event
handlers should be fetched.
:type data: dict
:param data: Reference on dictionary which contain information about
event handler and whether it should be created or not.
:rtype: EventHandler
:return: Reference on initialized and ready to use event handler model.
'None' will be returned in case if there is no handler with
specified name and no request to create it.
"""
event_handler = block.event_handler(data['name'])
# Prepare payload for event handler update.
changed_name = (data.pop('changes').get('name')
if 'changes' in data else None)
name = data.get('name') or changed_name
channels = data.get('channels')
event = data.get('event')
code = _content_of_file_at_path(data.get('src'))
state = data.get('state') or 'present'
# Create event handler if required.
if event_handler is None and state == 'present':
event_handler = EventHandler(name=name, channels=channels, event=event,
code=code)
block.add_event_handler(event_handler)
# Update event handler if required.
if event_handler is not None and state == 'present':
if name is not None:
event_handler.name = name
if channels is not None:
event_handler.channels = channels
if event is not None:
event_handler.event = event
if code is not None:
event_handler.code = code
return event_handler
def _failure_title_from_exception(exception):
"""Compose human-readable title for module error title.
Title will be based on status codes if they has been provided.
:type exception: exceptions.GeneralPubNubError
:param exception: Reference on exception for which title should be
composed.
:rtype: str
:return: Reference on error tile which should be shown on module
failure.
"""
title = 'General REST API access error.'
if exception.code == exceptions.PN_AUTHORIZATION_MISSING_CREDENTIALS:
title = 'Authorization error: missing credentials.'
elif exception.code == exceptions.PN_AUTHORIZATION_WRONG_CREDENTIALS:
title = 'Authorization error: wrong credentials.'
elif exception.code == exceptions.PN_USER_INSUFFICIENT_RIGHTS:
title = 'API access error: insufficient access rights.'
elif exception.code == exceptions.PN_API_ACCESS_TOKEN_EXPIRED:
title = 'API access error: time token expired.'
elif exception.code == exceptions.PN_KEYSET_BLOCK_EXISTS:
title = 'Block create did fail: block with same name already exists).'
elif exception.code == exceptions.PN_KEYSET_BLOCKS_FETCH_DID_FAIL:
title = 'Unable fetch list of blocks for keyset.'
elif exception.code == exceptions.PN_BLOCK_CREATE_DID_FAIL:
title = 'Block creation did fail.'
elif exception.code == exceptions.PN_BLOCK_UPDATE_DID_FAIL:
title = 'Block update did fail.'
elif exception.code == exceptions.PN_BLOCK_REMOVE_DID_FAIL:
title = 'Block removal did fail.'
elif exception.code == exceptions.PN_BLOCK_START_STOP_DID_FAIL:
title = 'Block start/stop did fail.'
elif exception.code == exceptions.PN_EVENT_HANDLER_MISSING_FIELDS:
title = 'Event handler creation did fail: missing fields.'
elif exception.code == exceptions.PN_BLOCK_EVENT_HANDLER_EXISTS:
title = 'Event handler creation did fail: missing fields.'
elif exception.code == exceptions.PN_EVENT_HANDLER_CREATE_DID_FAIL:
title = 'Event handler creation did fail.'
elif exception.code == exceptions.PN_EVENT_HANDLER_UPDATE_DID_FAIL:
title = 'Event handler update did fail.'
elif exception.code == exceptions.PN_EVENT_HANDLER_REMOVE_DID_FAIL:
title = 'Event handler removal did fail.'
return title
def _content_of_file_at_path(path):
"""Read file content.
Try read content of file at specified path.
:type path: str
:param path: Full path to location of file which should be read'ed.
:rtype: content
:return: File content or 'None'
"""
content = None
if path and os.path.exists(path):
with open(path, mode="rt") as opened_file:
b_content = opened_file.read()
try:
content = to_text(b_content, errors='surrogate_or_strict')
except UnicodeError:
pass
return content
def main():
fields = dict(
email=dict(default='', required=False, type='str'),
password=dict(default='', required=False, type='str', no_log=True),
account=dict(default='', required=False, type='str'),
application=dict(required=True, type='str'),
keyset=dict(required=True, type='str'),
state=dict(default='present', type='str',
choices=['started', 'stopped', 'present', 'absent']),
name=dict(required=True, type='str'), description=dict(type='str'),
event_handlers=dict(default=list(), type='list'),
changes=dict(default=dict(), type='dict'),
cache=dict(default=dict(), type='dict'),
validate_certs=dict(default=True, type='bool'))
module = AnsibleModule(argument_spec=fields, supports_check_mode=True)
if not HAS_PUBNUB_BLOCKS_CLIENT:
module.fail_json(msg='pubnub_blocks_client required for this module.')
params = module.params
# Authorize user.
user = pubnub_user(module)
# Initialize PubNub account instance.
account = pubnub_account(module, user=user)
# Try fetch application with which module should work.
application = pubnub_application(module, account=account)
# Try fetch keyset with which module should work.
keyset = pubnub_keyset(module, account=account, application=application)
# Try fetch block with which module should work.
block = pubnub_block(module, account=account, keyset=keyset)
is_new_block = block is not None and block.uid == -1
# Check whether block should be removed or not.
if block is not None and params['state'] == 'absent':
keyset.remove_block(block)
block = None
if block is not None:
# Update block information if required.
if params.get('changes') and params['changes'].get('name'):
block.name = params['changes']['name']
# Process event changes to event handlers.
for event_handler_data in params.get('event_handlers') or list():
state = event_handler_data.get('state') or 'present'
event_handler = pubnub_event_handler(data=event_handler_data,
block=block)
if state == 'absent' and event_handler:
block.delete_event_handler(event_handler)
# Update block operation state if required.
if block and not is_new_block:
if params['state'] == 'started':
block.start()
elif params['state'] == 'stopped':
block.stop()
# Save current account state.
if not module.check_mode:
try:
account.save()
except (exceptions.APIAccessError, exceptions.KeysetError,
exceptions.BlockError, exceptions.EventHandlerError,
exceptions.GeneralPubNubError) as exc:
module_cache = dict(account)
module_cache.update(dict(pnm_user=dict(user)))
exc_msg = _failure_title_from_exception(exc)
exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
module.fail_json(msg=exc_msg, description=exc_descr,
changed=account.changed,
module_cache=module_cache)
# Report module execution results.
module_cache = dict(account)
module_cache.update(dict(pnm_user=dict(user)))
changed_will_change = account.changed or account.will_change
module.exit_json(changed=changed_will_change, module_cache=module_cache)
if __name__ == '__main__':
main()
| gpl-3.0 |
havard024/prego | venv/lib/python2.7/site-packages/django/contrib/localflavor/nl/forms.py | 100 | 2867 | """
NL-specific Form helpers
"""
from __future__ import absolute_import, unicode_literals
import re
from django.contrib.localflavor.nl.nl_provinces import PROVINCE_CHOICES
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, Select
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
pc_re = re.compile('^\d{4}[A-Z]{2}$')
sofi_re = re.compile('^\d{9}$')
numeric_re = re.compile('^\d+$')
class NLZipCodeField(Field):
"""
A Dutch postal code field.
"""
default_error_messages = {
'invalid': _('Enter a valid postal code'),
}
def clean(self, value):
super(NLZipCodeField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = value.strip().upper().replace(' ', '')
if not pc_re.search(value):
raise ValidationError(self.error_messages['invalid'])
if int(value[:4]) < 1000:
raise ValidationError(self.error_messages['invalid'])
return '%s %s' % (value[:4], value[4:])
class NLProvinceSelect(Select):
"""
A Select widget that uses a list of provinces of the Netherlands as its
choices.
"""
def __init__(self, attrs=None):
super(NLProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class NLPhoneNumberField(Field):
"""
A Dutch telephone number field.
"""
default_error_messages = {
'invalid': _('Enter a valid phone number'),
}
def clean(self, value):
super(NLPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
phone_nr = re.sub('[\-\s\(\)]', '', smart_text(value))
if len(phone_nr) == 10 and numeric_re.search(phone_nr):
return value
if phone_nr[:3] == '+31' and len(phone_nr) == 12 and \
numeric_re.search(phone_nr[3:]):
return value
raise ValidationError(self.error_messages['invalid'])
class NLSoFiNumberField(Field):
"""
A Dutch social security number (SoFi/BSN) field.
http://nl.wikipedia.org/wiki/Sofinummer
"""
default_error_messages = {
'invalid': _('Enter a valid SoFi number'),
}
def clean(self, value):
super(NLSoFiNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
if not sofi_re.search(value):
raise ValidationError(self.error_messages['invalid'])
if int(value) == 0:
raise ValidationError(self.error_messages['invalid'])
checksum = 0
for i in range(9, 1, -1):
checksum += int(value[9-i]) * i
checksum -= int(value[-1])
if checksum % 11 != 0:
raise ValidationError(self.error_messages['invalid'])
return value
| mit |
frederica07/Dragon_Programming_Process | PyOpenGL-3.0.2/OpenGL/GL/NV/gpu_program5.py | 2 | 5971 | '''OpenGL extension NV.gpu_program5
This module customises the behaviour of the
OpenGL.raw.GL.NV.gpu_program5 to provide a more
Python-friendly API
Overview (from the spec)
This specification documents the common instruction set and basic
functionality provided by NVIDIA's 5th generation of assembly instruction
sets supporting programmable graphics pipeline stages.
The instruction set builds upon the basic framework provided by the
ARB_vertex_program and ARB_fragment_program extensions to expose
considerably more capable hardware. In addition to new capabilities for
vertex and fragment programs, this extension provides new functionality
for geometry programs as originally described in the NV_geometry_program4
specification, and serves as the basis for the new tessellation control
and evaluation programs described in the NV_tessellation_program5
extension.
Programs using the functionality provided by this extension should begin
with the program headers "!!NVvp5.0" (vertex programs), "!!NVtcp5.0"
(tessellation control programs), "!!NVtep5.0" (tessellation evaluation
programs), "!!NVgp5.0" (geometry programs), and "!!NVfp5.0" (fragment
programs).
This extension provides a variety of new features, including:
* support for 64-bit integer operations;
* the ability to dynamically index into an array of texture units or
program parameter buffers;
* extending texel offset support to allow loading texel offsets from
regular integer operands computed at run-time, instead of requiring
that the offsets be constants encoded in texture instructions;
* extending TXG (texture gather) support to return the 2x2 footprint
from any component of the texture image instead of always returning
the first (x) component;
* extending TXG to support shadow comparisons in conjunction with a
depth texture, via the SHADOW* targets;
* further extending texture gather support to provide a new opcode
(TXGO) that applies a separate texel offset vector to each of the four
samples returned by the instruction;
* bit manipulation instructions, including ones to find the position of
the most or least significant set bit, bitfield insertion and
extraction, and bit reversal;
* a general data conversion instruction (CVT) supporting conversion
between any two data types supported by this extension; and
* new instructions to compute the composite of a set of boolean
conditions a group of shader threads.
This extension also provides some new capabilities for individual program
types, including:
* support for instanced geometry programs, where a geometry program may
be run multiple times for each primitive;
* support for emitting vertices in a geometry program where each vertex
emitted may be directed at a specified vertex stream and captured
using the ARB_transform_feedback3 extension;
* support for interpolating an attribute at a programmable offset
relative to the pixel center (IPAO), at a programmable sample number
(IPAS), or at the fragment's centroid location (IPAC) in a fragment
program;
* support for reading a mask of covered samples in a fragment program;
* support for reading a point sprite coordinate directly in a fragment
program, without overriding a texture coordinate;
* support for reading patch primitives and per-patch attributes
(introduced by ARB_tessellation_shader) in a geometry program; and
* support for multiple output vectors for a single color output in a
fragment program (as used by ARB_blend_func_extended).
This extension also provides optional support for 64-bit-per-component
variables and 64-bit floating-point arithmetic. These features are
supported if and only if "NV_gpu_program_fp64" is found in the extension
string.
This extension incorporates the memory access operations from the
NV_shader_buffer_load and NV_parameter_buffer_object2 extensions,
originally built as add-ons to NV_gpu_program4. It also provides the
following new capabilities:
* support for the features without requiring a separate OPTION keyword;
* support for indexing into an array of constant buffers using the LDC
opcode added by NV_parameter_buffer_object2;
* support for storing into buffer objects at a specified GPU address
using the STORE opcode, an allowing applications to create READ_WRITE
and WRITE_ONLY mappings when making a buffer object resident using the
API mechanisms in the NV_shader_buffer_store extension;
* storage instruction modifiers to allow loading and storing 64-bit
component values;
* support for atomic memory transactions using the ATOM opcode, where
the instruction atomically reads the memory pointed to by a pointer,
performs a specified computation, stores the results of that
computation, and returns the original value read;
* support for memory barrier transactions using the MEMBAR opcode, which
ensures that all memory stores issued prior to the opcode complete
prior to any subsequent memory transactions; and
* a fragment program option to specify that depth and stencil tests are
performed prior to fragment program execution.
Additionally, the assembly program languages supported by this extension
include support for reading, writing, and performing atomic memory
operations on texture image data using the opcodes and mechanisms
documented in the "Dependencies on NV_gpu_program5" section of the
EXT_shader_image_load_store extension.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/gpu_program5.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.NV.gpu_program5 import *
### END AUTOGENERATED SECTION | bsd-2-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.