repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
endlessm/chromium-browser
third_party/catapult/third_party/gsutil/gslib/vendored/boto/tests/integration/s3/test_bucket.py
14
12530
# -*- coding: utf-8 -*- # Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """ Some unit tests for the S3 Bucket """ from mock import patch, Mock import unittest import time from boto.exception import S3ResponseError from boto.s3.connection import S3Connection from boto.s3.bucketlogging import BucketLogging from boto.s3.lifecycle import Lifecycle from boto.s3.lifecycle import Transition from boto.s3.lifecycle import Expiration from boto.s3.lifecycle import Rule from boto.s3.acl import Grant from boto.s3.tagging import Tags, TagSet from boto.s3.website import RedirectLocation from boto.compat import unquote_str class S3BucketTest (unittest.TestCase): s3 = True def setUp(self): self.conn = S3Connection() self.bucket_name = 'bucket-%d' % int(time.time()) self.bucket = self.conn.create_bucket(self.bucket_name) def tearDown(self): for key in self.bucket: key.delete() self.bucket.delete() def test_next_marker(self): expected = ["a/", "b", "c"] for key_name in expected: key = self.bucket.new_key(key_name) key.set_contents_from_string(key_name) # Normal list of first 2 keys will have # no NextMarker set, so we use last key to iterate # last element will be "b" so no issue. rs = self.bucket.get_all_keys(max_keys=2) for element in rs: pass self.assertEqual(element.name, "b") self.assertEqual(rs.next_marker, None) # list using delimiter of first 2 keys will have # a NextMarker set (when truncated). As prefixes # are grouped together at the end, we get "a/" as # last element, but luckily we have next_marker. rs = self.bucket.get_all_keys(max_keys=2, delimiter="/") for element in rs: pass self.assertEqual(element.name, "a/") self.assertEqual(rs.next_marker, "b") # ensure bucket.list() still works by just # popping elements off the front of expected. rs = self.bucket.list() for element in rs: self.assertEqual(element.name, expected.pop(0)) self.assertEqual(expected, []) def test_list_with_url_encoding(self): expected = [u"α", u"β", u"γ"] for key_name in expected: key = self.bucket.new_key(key_name) key.set_contents_from_string(key_name) # ensure bucket.list() still works by just # popping elements off the front of expected. orig_getall = self.bucket._get_all getall = lambda *a, **k: orig_getall(*a, max_keys=2, **k) with patch.object(self.bucket, '_get_all', getall): rs = self.bucket.list(encoding_type="url") for element in rs: name = unquote_str(element.name) self.assertEqual(name, expected.pop(0)) self.assertEqual(expected, []) def test_logging(self): # use self.bucket as the target bucket so that teardown # will delete any log files that make it into the bucket # automatically and all we have to do is delete the # source bucket. sb_name = "src-" + self.bucket_name sb = self.conn.create_bucket(sb_name) # grant log write perms to target bucket using canned-acl self.bucket.set_acl("log-delivery-write") target_bucket = self.bucket_name target_prefix = u"jp/ログ/" # Check existing status is disabled bls = sb.get_logging_status() self.assertEqual(bls.target, None) # Create a logging status and grant auth users READ PERM authuri = "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" authr = Grant(permission="READ", type="Group", uri=authuri) sb.enable_logging(target_bucket, target_prefix=target_prefix, grants=[authr]) # Check the status and confirm its set. bls = sb.get_logging_status() self.assertEqual(bls.target, target_bucket) self.assertEqual(bls.prefix, target_prefix) self.assertEqual(len(bls.grants), 1) self.assertEqual(bls.grants[0].type, "Group") self.assertEqual(bls.grants[0].uri, authuri) # finally delete the src bucket sb.delete() def test_tagging(self): tagging = """ <Tagging> <TagSet> <Tag> <Key>tagkey</Key> <Value>tagvalue</Value> </Tag> </TagSet> </Tagging> """ self.bucket.set_xml_tags(tagging) response = self.bucket.get_tags() self.assertEqual(response[0][0].key, 'tagkey') self.assertEqual(response[0][0].value, 'tagvalue') self.bucket.delete_tags() try: self.bucket.get_tags() except S3ResponseError as e: self.assertEqual(e.code, 'NoSuchTagSet') except Exception as e: self.fail("Wrong exception raised (expected S3ResponseError): %s" % e) else: self.fail("Expected S3ResponseError, but no exception raised.") def test_tagging_from_objects(self): """Create tags from python objects rather than raw xml.""" t = Tags() tag_set = TagSet() tag_set.add_tag('akey', 'avalue') tag_set.add_tag('anotherkey', 'anothervalue') t.add_tag_set(tag_set) self.bucket.set_tags(t) response = self.bucket.get_tags() tags = sorted(response[0], key=lambda tag: tag.key) self.assertEqual(tags[0].key, 'akey') self.assertEqual(tags[0].value, 'avalue') self.assertEqual(tags[1].key, 'anotherkey') self.assertEqual(tags[1].value, 'anothervalue') def test_website_configuration(self): response = self.bucket.configure_website('index.html') self.assertTrue(response) config = self.bucket.get_website_configuration() self.assertEqual(config, {'WebsiteConfiguration': {'IndexDocument': {'Suffix': 'index.html'}}}) config2, xml = self.bucket.get_website_configuration_with_xml() self.assertEqual(config, config2) self.assertTrue('<Suffix>index.html</Suffix>' in xml, xml) def test_website_redirect_all_requests(self): response = self.bucket.configure_website( redirect_all_requests_to=RedirectLocation('example.com')) config = self.bucket.get_website_configuration() self.assertEqual(config, { 'WebsiteConfiguration': { 'RedirectAllRequestsTo': { 'HostName': 'example.com'}}}) # Can configure the protocol as well. response = self.bucket.configure_website( redirect_all_requests_to=RedirectLocation('example.com', 'https')) config = self.bucket.get_website_configuration() self.assertEqual(config, { 'WebsiteConfiguration': {'RedirectAllRequestsTo': { 'HostName': 'example.com', 'Protocol': 'https', }}} ) def test_lifecycle(self): lifecycle = Lifecycle() lifecycle.add_rule('myid', '', 'Enabled', 30) self.assertTrue(self.bucket.configure_lifecycle(lifecycle)) response = self.bucket.get_lifecycle_config() self.assertEqual(len(response), 1) actual_lifecycle = response[0] self.assertEqual(actual_lifecycle.id, 'myid') self.assertEqual(actual_lifecycle.prefix, '') self.assertEqual(actual_lifecycle.status, 'Enabled') self.assertEqual(actual_lifecycle.transition, []) def test_lifecycle_with_glacier_transition(self): lifecycle = Lifecycle() transition = Transition(days=30, storage_class='GLACIER') rule = Rule('myid', prefix='', status='Enabled', expiration=None, transition=transition) lifecycle.append(rule) self.assertTrue(self.bucket.configure_lifecycle(lifecycle)) response = self.bucket.get_lifecycle_config() transition = response[0].transition self.assertEqual(transition.days, 30) self.assertEqual(transition.storage_class, 'GLACIER') self.assertEqual(transition.date, None) def test_lifecycle_multi(self): date = '2022-10-12T00:00:00.000Z' sc = 'GLACIER' lifecycle = Lifecycle() lifecycle.add_rule("1", "1/", "Enabled", 1) lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2)) lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date)) lifecycle.add_rule("4", "4/", "Enabled", None, Transition(days=4, storage_class=sc)) lifecycle.add_rule("5", "5/", "Enabled", None, Transition(date=date, storage_class=sc)) # set the lifecycle self.bucket.configure_lifecycle(lifecycle) # read the lifecycle back readlifecycle = self.bucket.get_lifecycle_config(); for rule in readlifecycle: if rule.id == "1": self.assertEqual(rule.prefix, "1/") self.assertEqual(rule.expiration.days, 1) elif rule.id == "2": self.assertEqual(rule.prefix, "2/") self.assertEqual(rule.expiration.days, 2) elif rule.id == "3": self.assertEqual(rule.prefix, "3/") self.assertEqual(rule.expiration.date, date) elif rule.id == "4": self.assertEqual(rule.prefix, "4/") self.assertEqual(rule.transition.days, 4) self.assertEqual(rule.transition.storage_class, sc) elif rule.id == "5": self.assertEqual(rule.prefix, "5/") self.assertEqual(rule.transition.date, date) self.assertEqual(rule.transition.storage_class, sc) else: self.fail("unexpected id %s" % rule.id) def test_lifecycle_jp(self): # test lifecycle with Japanese prefix name = "Japanese files" prefix = "日本語/" days = 30 lifecycle = Lifecycle() lifecycle.add_rule(name, prefix, "Enabled", days) # set the lifecycle self.bucket.configure_lifecycle(lifecycle) # read the lifecycle back readlifecycle = self.bucket.get_lifecycle_config(); for rule in readlifecycle: self.assertEqual(rule.id, name) self.assertEqual(rule.expiration.days, days) #Note: Boto seems correct? AWS seems broken? #self.assertEqual(rule.prefix, prefix) def test_lifecycle_with_defaults(self): lifecycle = Lifecycle() lifecycle.add_rule(expiration=30) self.assertTrue(self.bucket.configure_lifecycle(lifecycle)) response = self.bucket.get_lifecycle_config() self.assertEqual(len(response), 1) actual_lifecycle = response[0] self.assertNotEqual(len(actual_lifecycle.id), 0) self.assertEqual(actual_lifecycle.prefix, '') def test_lifecycle_rule_xml(self): # create a rule directly with id, prefix defaults rule = Rule(status='Enabled', expiration=30) s = rule.to_xml() # Confirm no ID is set in the rule. self.assertEqual(s.find("<ID>"), -1) # Confirm Prefix is '' and not set to 'None' self.assertNotEqual(s.find("<Prefix></Prefix>"), -1)
bsd-3-clause
mcgoddard/widgetr
env/Lib/site-packages/pip/_vendor/distlib/database.py
203
49199
# -*- coding: utf-8 -*- # # Copyright (C) 2012-2014 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # """PEP 376 implementation.""" from __future__ import unicode_literals import base64 import codecs import contextlib import hashlib import logging import os import posixpath import sys import zipimport from . import DistlibException, resources from .compat import StringIO from .version import get_scheme, UnsupportedVersionError from .metadata import Metadata, METADATA_FILENAME from .util import (parse_requirement, cached_property, parse_name_and_version, read_exports, write_exports, CSVReader, CSVWriter) __all__ = ['Distribution', 'BaseInstalledDistribution', 'InstalledDistribution', 'EggInfoDistribution', 'DistributionPath'] logger = logging.getLogger(__name__) EXPORTS_FILENAME = 'pydist-exports.json' COMMANDS_FILENAME = 'pydist-commands.json' DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED', 'RESOURCES', EXPORTS_FILENAME, 'SHARED') DISTINFO_EXT = '.dist-info' class _Cache(object): """ A simple cache mapping names and .dist-info paths to distributions """ def __init__(self): """ Initialise an instance. There is normally one for each DistributionPath. """ self.name = {} self.path = {} self.generated = False def clear(self): """ Clear the cache, setting it to its initial state. """ self.name.clear() self.path.clear() self.generated = False def add(self, dist): """ Add a distribution to the cache. :param dist: The distribution to add. """ if dist.path not in self.path: self.path[dist.path] = dist self.name.setdefault(dist.key, []).append(dist) class DistributionPath(object): """ Represents a set of distributions installed on a path (typically sys.path). """ def __init__(self, path=None, include_egg=False): """ Create an instance from a path, optionally including legacy (distutils/ setuptools/distribute) distributions. :param path: The path to use, as a list of directories. If not specified, sys.path is used. :param include_egg: If True, this instance will look for and return legacy distributions as well as those based on PEP 376. """ if path is None: path = sys.path self.path = path self._include_dist = True self._include_egg = include_egg self._cache = _Cache() self._cache_egg = _Cache() self._cache_enabled = True self._scheme = get_scheme('default') def _get_cache_enabled(self): return self._cache_enabled def _set_cache_enabled(self, value): self._cache_enabled = value cache_enabled = property(_get_cache_enabled, _set_cache_enabled) def clear_cache(self): """ Clears the internal cache. """ self._cache.clear() self._cache_egg.clear() def _yield_distributions(self): """ Yield .dist-info and/or .egg(-info) distributions. """ # We need to check if we've seen some resources already, because on # some Linux systems (e.g. some Debian/Ubuntu variants) there are # symlinks which alias other files in the environment. seen = set() for path in self.path: finder = resources.finder_for_path(path) if finder is None: continue r = finder.find('') if not r or not r.is_container: continue rset = sorted(r.resources) for entry in rset: r = finder.find(entry) if not r or r.path in seen: continue if self._include_dist and entry.endswith(DISTINFO_EXT): metadata_path = posixpath.join(entry, METADATA_FILENAME) pydist = finder.find(metadata_path) if not pydist: continue metadata = Metadata(fileobj=pydist.as_stream(), scheme='legacy') logger.debug('Found %s', r.path) seen.add(r.path) yield new_dist_class(r.path, metadata=metadata, env=self) elif self._include_egg and entry.endswith(('.egg-info', '.egg')): logger.debug('Found %s', r.path) seen.add(r.path) yield old_dist_class(r.path, self) def _generate_cache(self): """ Scan the path for distributions and populate the cache with those that are found. """ gen_dist = not self._cache.generated gen_egg = self._include_egg and not self._cache_egg.generated if gen_dist or gen_egg: for dist in self._yield_distributions(): if isinstance(dist, InstalledDistribution): self._cache.add(dist) else: self._cache_egg.add(dist) if gen_dist: self._cache.generated = True if gen_egg: self._cache_egg.generated = True @classmethod def distinfo_dirname(cls, name, version): """ The *name* and *version* parameters are converted into their filename-escaped form, i.e. any ``'-'`` characters are replaced with ``'_'`` other than the one in ``'dist-info'`` and the one separating the name from the version number. :parameter name: is converted to a standard distribution name by replacing any runs of non- alphanumeric characters with a single ``'-'``. :type name: string :parameter version: is converted to a standard version string. Spaces become dots, and all other non-alphanumeric characters (except dots) become dashes, with runs of multiple dashes condensed to a single dash. :type version: string :returns: directory name :rtype: string""" name = name.replace('-', '_') return '-'.join([name, version]) + DISTINFO_EXT def get_distributions(self): """ Provides an iterator that looks for distributions and returns :class:`InstalledDistribution` or :class:`EggInfoDistribution` instances for each one of them. :rtype: iterator of :class:`InstalledDistribution` and :class:`EggInfoDistribution` instances """ if not self._cache_enabled: for dist in self._yield_distributions(): yield dist else: self._generate_cache() for dist in self._cache.path.values(): yield dist if self._include_egg: for dist in self._cache_egg.path.values(): yield dist def get_distribution(self, name): """ Looks for a named distribution on the path. This function only returns the first result found, as no more than one value is expected. If nothing is found, ``None`` is returned. :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution` or ``None`` """ result = None name = name.lower() if not self._cache_enabled: for dist in self._yield_distributions(): if dist.key == name: result = dist break else: self._generate_cache() if name in self._cache.name: result = self._cache.name[name][0] elif self._include_egg and name in self._cache_egg.name: result = self._cache_egg.name[name][0] return result def provides_distribution(self, name, version=None): """ Iterates over all distributions to find which distributions provide *name*. If a *version* is provided, it will be used to filter the results. This function only returns the first result found, since no more than one values are expected. If the directory is not found, returns ``None``. :parameter version: a version specifier that indicates the version required, conforming to the format in ``PEP-345`` :type name: string :type version: string """ matcher = None if not version is None: try: matcher = self._scheme.matcher('%s (%s)' % (name, version)) except ValueError: raise DistlibException('invalid name or version: %r, %r' % (name, version)) for dist in self.get_distributions(): provided = dist.provides for p in provided: p_name, p_ver = parse_name_and_version(p) if matcher is None: if p_name == name: yield dist break else: if p_name == name and matcher.match(p_ver): yield dist break def get_file_path(self, name, relative_path): """ Return the path to a resource file. """ dist = self.get_distribution(name) if dist is None: raise LookupError('no distribution named %r found' % name) return dist.get_resource_path(relative_path) def get_exported_entries(self, category, name=None): """ Return all of the exported entries in a particular category. :param category: The category to search for entries. :param name: If specified, only entries with that name are returned. """ for dist in self.get_distributions(): r = dist.exports if category in r: d = r[category] if name is not None: if name in d: yield d[name] else: for v in d.values(): yield v class Distribution(object): """ A base class for distributions, whether installed or from indexes. Either way, it must have some metadata, so that's all that's needed for construction. """ build_time_dependency = False """ Set to True if it's known to be only a build-time dependency (i.e. not needed after installation). """ requested = False """A boolean that indicates whether the ``REQUESTED`` metadata file is present (in other words, whether the package was installed by user request or it was installed as a dependency).""" def __init__(self, metadata): """ Initialise an instance. :param metadata: The instance of :class:`Metadata` describing this distribution. """ self.metadata = metadata self.name = metadata.name self.key = self.name.lower() # for case-insensitive comparisons self.version = metadata.version self.locator = None self.digest = None self.extras = None # additional features requested self.context = None # environment marker overrides self.download_urls = set() self.digests = {} @property def source_url(self): """ The source archive download URL for this distribution. """ return self.metadata.source_url download_url = source_url # Backward compatibility @property def name_and_version(self): """ A utility property which displays the name and version in parentheses. """ return '%s (%s)' % (self.name, self.version) @property def provides(self): """ A set of distribution names and versions provided by this distribution. :return: A set of "name (version)" strings. """ plist = self.metadata.provides s = '%s (%s)' % (self.name, self.version) if s not in plist: plist.append(s) return plist def _get_requirements(self, req_attr): reqts = getattr(self.metadata, req_attr) return set(self.metadata.get_requirements(reqts, extras=self.extras, env=self.context)) @property def run_requires(self): return self._get_requirements('run_requires') @property def meta_requires(self): return self._get_requirements('meta_requires') @property def build_requires(self): return self._get_requirements('build_requires') @property def test_requires(self): return self._get_requirements('test_requires') @property def dev_requires(self): return self._get_requirements('dev_requires') def matches_requirement(self, req): """ Say if this instance matches (fulfills) a requirement. :param req: The requirement to match. :rtype req: str :return: True if it matches, else False. """ # Requirement may contain extras - parse to lose those # from what's passed to the matcher r = parse_requirement(req) scheme = get_scheme(self.metadata.scheme) try: matcher = scheme.matcher(r.requirement) except UnsupportedVersionError: # XXX compat-mode if cannot read the version logger.warning('could not read version %r - using name only', req) name = req.split()[0] matcher = scheme.matcher(name) name = matcher.key # case-insensitive result = False for p in self.provides: p_name, p_ver = parse_name_and_version(p) if p_name != name: continue try: result = matcher.match(p_ver) break except UnsupportedVersionError: pass return result def __repr__(self): """ Return a textual representation of this instance, """ if self.source_url: suffix = ' [%s]' % self.source_url else: suffix = '' return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix) def __eq__(self, other): """ See if this distribution is the same as another. :param other: The distribution to compare with. To be equal to one another. distributions must have the same type, name, version and source_url. :return: True if it is the same, else False. """ if type(other) is not type(self): result = False else: result = (self.name == other.name and self.version == other.version and self.source_url == other.source_url) return result def __hash__(self): """ Compute hash in a way which matches the equality test. """ return hash(self.name) + hash(self.version) + hash(self.source_url) class BaseInstalledDistribution(Distribution): """ This is the base class for installed distributions (whether PEP 376 or legacy). """ hasher = None def __init__(self, metadata, path, env=None): """ Initialise an instance. :param metadata: An instance of :class:`Metadata` which describes the distribution. This will normally have been initialised from a metadata file in the ``path``. :param path: The path of the ``.dist-info`` or ``.egg-info`` directory for the distribution. :param env: This is normally the :class:`DistributionPath` instance where this distribution was found. """ super(BaseInstalledDistribution, self).__init__(metadata) self.path = path self.dist_path = env def get_hash(self, data, hasher=None): """ Get the hash of some data, using a particular hash algorithm, if specified. :param data: The data to be hashed. :type data: bytes :param hasher: The name of a hash implementation, supported by hashlib, or ``None``. Examples of valid values are ``'sha1'``, ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and ``'sha512'``. If no hasher is specified, the ``hasher`` attribute of the :class:`InstalledDistribution` instance is used. If the hasher is determined to be ``None``, MD5 is used as the hashing algorithm. :returns: The hash of the data. If a hasher was explicitly specified, the returned hash will be prefixed with the specified hasher followed by '='. :rtype: str """ if hasher is None: hasher = self.hasher if hasher is None: hasher = hashlib.md5 prefix = '' else: hasher = getattr(hashlib, hasher) prefix = '%s=' % self.hasher digest = hasher(data).digest() digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii') return '%s%s' % (prefix, digest) class InstalledDistribution(BaseInstalledDistribution): """ Created with the *path* of the ``.dist-info`` directory provided to the constructor. It reads the metadata contained in ``pydist.json`` when it is instantiated., or uses a passed in Metadata instance (useful for when dry-run mode is being used). """ hasher = 'sha256' def __init__(self, path, metadata=None, env=None): self.finder = finder = resources.finder_for_path(path) if finder is None: import pdb; pdb.set_trace () if env and env._cache_enabled and path in env._cache.path: metadata = env._cache.path[path].metadata elif metadata is None: r = finder.find(METADATA_FILENAME) # Temporary - for legacy support if r is None: r = finder.find('METADATA') if r is None: raise ValueError('no %s found in %s' % (METADATA_FILENAME, path)) with contextlib.closing(r.as_stream()) as stream: metadata = Metadata(fileobj=stream, scheme='legacy') super(InstalledDistribution, self).__init__(metadata, path, env) if env and env._cache_enabled: env._cache.add(self) try: r = finder.find('REQUESTED') except AttributeError: import pdb; pdb.set_trace () self.requested = r is not None def __repr__(self): return '<InstalledDistribution %r %s at %r>' % ( self.name, self.version, self.path) def __str__(self): return "%s %s" % (self.name, self.version) def _get_records(self): """ Get the list of installed files for the distribution :return: A list of tuples of path, hash and size. Note that hash and size might be ``None`` for some entries. The path is exactly as stored in the file (which is as in PEP 376). """ results = [] r = self.get_distinfo_resource('RECORD') with contextlib.closing(r.as_stream()) as stream: with CSVReader(stream=stream) as record_reader: # Base location is parent dir of .dist-info dir #base_location = os.path.dirname(self.path) #base_location = os.path.abspath(base_location) for row in record_reader: missing = [None for i in range(len(row), 3)] path, checksum, size = row + missing #if not os.path.isabs(path): # path = path.replace('/', os.sep) # path = os.path.join(base_location, path) results.append((path, checksum, size)) return results @cached_property def exports(self): """ Return the information exported by this distribution. :return: A dictionary of exports, mapping an export category to a dict of :class:`ExportEntry` instances describing the individual export entries, and keyed by name. """ result = {} r = self.get_distinfo_resource(EXPORTS_FILENAME) if r: result = self.read_exports() return result def read_exports(self): """ Read exports data from a file in .ini format. :return: A dictionary of exports, mapping an export category to a list of :class:`ExportEntry` instances describing the individual export entries. """ result = {} r = self.get_distinfo_resource(EXPORTS_FILENAME) if r: with contextlib.closing(r.as_stream()) as stream: result = read_exports(stream) return result def write_exports(self, exports): """ Write a dictionary of exports to a file in .ini format. :param exports: A dictionary of exports, mapping an export category to a list of :class:`ExportEntry` instances describing the individual export entries. """ rf = self.get_distinfo_file(EXPORTS_FILENAME) with open(rf, 'w') as f: write_exports(exports, f) def get_resource_path(self, relative_path): """ NOTE: This API may change in the future. Return the absolute path to a resource file with the given relative path. :param relative_path: The path, relative to .dist-info, of the resource of interest. :return: The absolute path where the resource is to be found. """ r = self.get_distinfo_resource('RESOURCES') with contextlib.closing(r.as_stream()) as stream: with CSVReader(stream=stream) as resources_reader: for relative, destination in resources_reader: if relative == relative_path: return destination raise KeyError('no resource file with relative path %r ' 'is installed' % relative_path) def list_installed_files(self): """ Iterates over the ``RECORD`` entries and returns a tuple ``(path, hash, size)`` for each line. :returns: iterator of (path, hash, size) """ for result in self._get_records(): yield result def write_installed_files(self, paths, prefix, dry_run=False): """ Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any existing ``RECORD`` file is silently overwritten. prefix is used to determine when to write absolute paths. """ prefix = os.path.join(prefix, '') base = os.path.dirname(self.path) base_under_prefix = base.startswith(prefix) base = os.path.join(base, '') record_path = self.get_distinfo_file('RECORD') logger.info('creating %s', record_path) if dry_run: return None with CSVWriter(record_path) as writer: for path in paths: if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')): # do not put size and hash, as in PEP-376 hash_value = size = '' else: size = '%d' % os.path.getsize(path) with open(path, 'rb') as fp: hash_value = self.get_hash(fp.read()) if path.startswith(base) or (base_under_prefix and path.startswith(prefix)): path = os.path.relpath(path, base) writer.writerow((path, hash_value, size)) # add the RECORD file itself if record_path.startswith(base): record_path = os.path.relpath(record_path, base) writer.writerow((record_path, '', '')) return record_path def check_installed_files(self): """ Checks that the hashes and sizes of the files in ``RECORD`` are matched by the files themselves. Returns a (possibly empty) list of mismatches. Each entry in the mismatch list will be a tuple consisting of the path, 'exists', 'size' or 'hash' according to what didn't match (existence is checked first, then size, then hash), the expected value and the actual value. """ mismatches = [] base = os.path.dirname(self.path) record_path = self.get_distinfo_file('RECORD') for path, hash_value, size in self.list_installed_files(): if not os.path.isabs(path): path = os.path.join(base, path) if path == record_path: continue if not os.path.exists(path): mismatches.append((path, 'exists', True, False)) elif os.path.isfile(path): actual_size = str(os.path.getsize(path)) if size and actual_size != size: mismatches.append((path, 'size', size, actual_size)) elif hash_value: if '=' in hash_value: hasher = hash_value.split('=', 1)[0] else: hasher = None with open(path, 'rb') as f: actual_hash = self.get_hash(f.read(), hasher) if actual_hash != hash_value: mismatches.append((path, 'hash', hash_value, actual_hash)) return mismatches @cached_property def shared_locations(self): """ A dictionary of shared locations whose keys are in the set 'prefix', 'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'. The corresponding value is the absolute path of that category for this distribution, and takes into account any paths selected by the user at installation time (e.g. via command-line arguments). In the case of the 'namespace' key, this would be a list of absolute paths for the roots of namespace packages in this distribution. The first time this property is accessed, the relevant information is read from the SHARED file in the .dist-info directory. """ result = {} shared_path = os.path.join(self.path, 'SHARED') if os.path.isfile(shared_path): with codecs.open(shared_path, 'r', encoding='utf-8') as f: lines = f.read().splitlines() for line in lines: key, value = line.split('=', 1) if key == 'namespace': result.setdefault(key, []).append(value) else: result[key] = value return result def write_shared_locations(self, paths, dry_run=False): """ Write shared location information to the SHARED file in .dist-info. :param paths: A dictionary as described in the documentation for :meth:`shared_locations`. :param dry_run: If True, the action is logged but no file is actually written. :return: The path of the file written to. """ shared_path = os.path.join(self.path, 'SHARED') logger.info('creating %s', shared_path) if dry_run: return None lines = [] for key in ('prefix', 'lib', 'headers', 'scripts', 'data'): path = paths[key] if os.path.isdir(paths[key]): lines.append('%s=%s' % (key, path)) for ns in paths.get('namespace', ()): lines.append('namespace=%s' % ns) with codecs.open(shared_path, 'w', encoding='utf-8') as f: f.write('\n'.join(lines)) return shared_path def get_distinfo_resource(self, path): if path not in DIST_FILES: raise DistlibException('invalid path for a dist-info file: ' '%r at %r' % (path, self.path)) finder = resources.finder_for_path(self.path) if finder is None: raise DistlibException('Unable to get a finder for %s' % self.path) return finder.find(path) def get_distinfo_file(self, path): """ Returns a path located under the ``.dist-info`` directory. Returns a string representing the path. :parameter path: a ``'/'``-separated path relative to the ``.dist-info`` directory or an absolute path; If *path* is an absolute path and doesn't start with the ``.dist-info`` directory path, a :class:`DistlibException` is raised :type path: str :rtype: str """ # Check if it is an absolute path # XXX use relpath, add tests if path.find(os.sep) >= 0: # it's an absolute path? distinfo_dirname, path = path.split(os.sep)[-2:] if distinfo_dirname != self.path.split(os.sep)[-1]: raise DistlibException( 'dist-info file %r does not belong to the %r %s ' 'distribution' % (path, self.name, self.version)) # The file must be relative if path not in DIST_FILES: raise DistlibException('invalid path for a dist-info file: ' '%r at %r' % (path, self.path)) return os.path.join(self.path, path) def list_distinfo_files(self): """ Iterates over the ``RECORD`` entries and returns paths for each line if the path is pointing to a file located in the ``.dist-info`` directory or one of its subdirectories. :returns: iterator of paths """ base = os.path.dirname(self.path) for path, checksum, size in self._get_records(): # XXX add separator or use real relpath algo if not os.path.isabs(path): path = os.path.join(base, path) if path.startswith(self.path): yield path def __eq__(self, other): return (isinstance(other, InstalledDistribution) and self.path == other.path) # See http://docs.python.org/reference/datamodel#object.__hash__ __hash__ = object.__hash__ class EggInfoDistribution(BaseInstalledDistribution): """Created with the *path* of the ``.egg-info`` directory or file provided to the constructor. It reads the metadata contained in the file itself, or if the given path happens to be a directory, the metadata is read from the file ``PKG-INFO`` under that directory.""" requested = True # as we have no way of knowing, assume it was shared_locations = {} def __init__(self, path, env=None): def set_name_and_version(s, n, v): s.name = n s.key = n.lower() # for case-insensitive comparisons s.version = v self.path = path self.dist_path = env if env and env._cache_enabled and path in env._cache_egg.path: metadata = env._cache_egg.path[path].metadata set_name_and_version(self, metadata.name, metadata.version) else: metadata = self._get_metadata(path) # Need to be set before caching set_name_and_version(self, metadata.name, metadata.version) if env and env._cache_enabled: env._cache_egg.add(self) super(EggInfoDistribution, self).__init__(metadata, path, env) def _get_metadata(self, path): requires = None def parse_requires_data(data): """Create a list of dependencies from a requires.txt file. *data*: the contents of a setuptools-produced requires.txt file. """ reqs = [] lines = data.splitlines() for line in lines: line = line.strip() if line.startswith('['): logger.warning('Unexpected line: quitting requirement scan: %r', line) break r = parse_requirement(line) if not r: logger.warning('Not recognised as a requirement: %r', line) continue if r.extras: logger.warning('extra requirements in requires.txt are ' 'not supported') if not r.constraints: reqs.append(r.name) else: cons = ', '.join('%s%s' % c for c in r.constraints) reqs.append('%s (%s)' % (r.name, cons)) return reqs def parse_requires_path(req_path): """Create a list of dependencies from a requires.txt file. *req_path*: the path to a setuptools-produced requires.txt file. """ reqs = [] try: with codecs.open(req_path, 'r', 'utf-8') as fp: reqs = parse_requires_data(fp.read()) except IOError: pass return reqs if path.endswith('.egg'): if os.path.isdir(path): meta_path = os.path.join(path, 'EGG-INFO', 'PKG-INFO') metadata = Metadata(path=meta_path, scheme='legacy') req_path = os.path.join(path, 'EGG-INFO', 'requires.txt') requires = parse_requires_path(req_path) else: # FIXME handle the case where zipfile is not available zipf = zipimport.zipimporter(path) fileobj = StringIO( zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8')) metadata = Metadata(fileobj=fileobj, scheme='legacy') try: data = zipf.get_data('EGG-INFO/requires.txt') requires = parse_requires_data(data.decode('utf-8')) except IOError: requires = None elif path.endswith('.egg-info'): if os.path.isdir(path): req_path = os.path.join(path, 'requires.txt') requires = parse_requires_path(req_path) path = os.path.join(path, 'PKG-INFO') metadata = Metadata(path=path, scheme='legacy') else: raise DistlibException('path must end with .egg-info or .egg, ' 'got %r' % path) if requires: metadata.add_requirements(requires) return metadata def __repr__(self): return '<EggInfoDistribution %r %s at %r>' % ( self.name, self.version, self.path) def __str__(self): return "%s %s" % (self.name, self.version) def check_installed_files(self): """ Checks that the hashes and sizes of the files in ``RECORD`` are matched by the files themselves. Returns a (possibly empty) list of mismatches. Each entry in the mismatch list will be a tuple consisting of the path, 'exists', 'size' or 'hash' according to what didn't match (existence is checked first, then size, then hash), the expected value and the actual value. """ mismatches = [] record_path = os.path.join(self.path, 'installed-files.txt') if os.path.exists(record_path): for path, _, _ in self.list_installed_files(): if path == record_path: continue if not os.path.exists(path): mismatches.append((path, 'exists', True, False)) return mismatches def list_installed_files(self): """ Iterates over the ``installed-files.txt`` entries and returns a tuple ``(path, hash, size)`` for each line. :returns: a list of (path, hash, size) """ def _md5(path): f = open(path, 'rb') try: content = f.read() finally: f.close() return hashlib.md5(content).hexdigest() def _size(path): return os.stat(path).st_size record_path = os.path.join(self.path, 'installed-files.txt') result = [] if os.path.exists(record_path): with codecs.open(record_path, 'r', encoding='utf-8') as f: for line in f: line = line.strip() p = os.path.normpath(os.path.join(self.path, line)) # "./" is present as a marker between installed files # and installation metadata files if not os.path.exists(p): logger.warning('Non-existent file: %s', p) if p.endswith(('.pyc', '.pyo')): continue #otherwise fall through and fail if not os.path.isdir(p): result.append((p, _md5(p), _size(p))) result.append((record_path, None, None)) return result def list_distinfo_files(self, absolute=False): """ Iterates over the ``installed-files.txt`` entries and returns paths for each line if the path is pointing to a file located in the ``.egg-info`` directory or one of its subdirectories. :parameter absolute: If *absolute* is ``True``, each returned path is transformed into a local absolute path. Otherwise the raw value from ``installed-files.txt`` is returned. :type absolute: boolean :returns: iterator of paths """ record_path = os.path.join(self.path, 'installed-files.txt') skip = True with codecs.open(record_path, 'r', encoding='utf-8') as f: for line in f: line = line.strip() if line == './': skip = False continue if not skip: p = os.path.normpath(os.path.join(self.path, line)) if p.startswith(self.path): if absolute: yield p else: yield line def __eq__(self, other): return (isinstance(other, EggInfoDistribution) and self.path == other.path) # See http://docs.python.org/reference/datamodel#object.__hash__ __hash__ = object.__hash__ new_dist_class = InstalledDistribution old_dist_class = EggInfoDistribution class DependencyGraph(object): """ Represents a dependency graph between distributions. The dependency relationships are stored in an ``adjacency_list`` that maps distributions to a list of ``(other, label)`` tuples where ``other`` is a distribution and the edge is labeled with ``label`` (i.e. the version specifier, if such was provided). Also, for more efficient traversal, for every distribution ``x``, a list of predecessors is kept in ``reverse_list[x]``. An edge from distribution ``a`` to distribution ``b`` means that ``a`` depends on ``b``. If any missing dependencies are found, they are stored in ``missing``, which is a dictionary that maps distributions to a list of requirements that were not provided by any other distributions. """ def __init__(self): self.adjacency_list = {} self.reverse_list = {} self.missing = {} def add_distribution(self, distribution): """Add the *distribution* to the graph. :type distribution: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` """ self.adjacency_list[distribution] = [] self.reverse_list[distribution] = [] #self.missing[distribution] = [] def add_edge(self, x, y, label=None): """Add an edge from distribution *x* to distribution *y* with the given *label*. :type x: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` :type y: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` :type label: ``str`` or ``None`` """ self.adjacency_list[x].append((y, label)) # multiple edges are allowed, so be careful if x not in self.reverse_list[y]: self.reverse_list[y].append(x) def add_missing(self, distribution, requirement): """ Add a missing *requirement* for the given *distribution*. :type distribution: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` :type requirement: ``str`` """ logger.debug('%s missing %r', distribution, requirement) self.missing.setdefault(distribution, []).append(requirement) def _repr_dist(self, dist): return '%s %s' % (dist.name, dist.version) def repr_node(self, dist, level=1): """Prints only a subgraph""" output = [self._repr_dist(dist)] for other, label in self.adjacency_list[dist]: dist = self._repr_dist(other) if label is not None: dist = '%s [%s]' % (dist, label) output.append(' ' * level + str(dist)) suboutput = self.repr_node(other, level + 1) subs = suboutput.split('\n') output.extend(subs[1:]) return '\n'.join(output) def to_dot(self, f, skip_disconnected=True): """Writes a DOT output for the graph to the provided file *f*. If *skip_disconnected* is set to ``True``, then all distributions that are not dependent on any other distribution are skipped. :type f: has to support ``file``-like operations :type skip_disconnected: ``bool`` """ disconnected = [] f.write("digraph dependencies {\n") for dist, adjs in self.adjacency_list.items(): if len(adjs) == 0 and not skip_disconnected: disconnected.append(dist) for other, label in adjs: if not label is None: f.write('"%s" -> "%s" [label="%s"]\n' % (dist.name, other.name, label)) else: f.write('"%s" -> "%s"\n' % (dist.name, other.name)) if not skip_disconnected and len(disconnected) > 0: f.write('subgraph disconnected {\n') f.write('label = "Disconnected"\n') f.write('bgcolor = red\n') for dist in disconnected: f.write('"%s"' % dist.name) f.write('\n') f.write('}\n') f.write('}\n') def topological_sort(self): """ Perform a topological sort of the graph. :return: A tuple, the first element of which is a topologically sorted list of distributions, and the second element of which is a list of distributions that cannot be sorted because they have circular dependencies and so form a cycle. """ result = [] # Make a shallow copy of the adjacency list alist = {} for k, v in self.adjacency_list.items(): alist[k] = v[:] while True: # See what we can remove in this run to_remove = [] for k, v in list(alist.items())[:]: if not v: to_remove.append(k) del alist[k] if not to_remove: # What's left in alist (if anything) is a cycle. break # Remove from the adjacency list of others for k, v in alist.items(): alist[k] = [(d, r) for d, r in v if d not in to_remove] logger.debug('Moving to result: %s', ['%s (%s)' % (d.name, d.version) for d in to_remove]) result.extend(to_remove) return result, list(alist.keys()) def __repr__(self): """Representation of the graph""" output = [] for dist, adjs in self.adjacency_list.items(): output.append(self.repr_node(dist)) return '\n'.join(output) def make_graph(dists, scheme='default'): """Makes a dependency graph from the given distributions. :parameter dists: a list of distributions :type dists: list of :class:`distutils2.database.InstalledDistribution` and :class:`distutils2.database.EggInfoDistribution` instances :rtype: a :class:`DependencyGraph` instance """ scheme = get_scheme(scheme) graph = DependencyGraph() provided = {} # maps names to lists of (version, dist) tuples # first, build the graph and find out what's provided for dist in dists: graph.add_distribution(dist) for p in dist.provides: name, version = parse_name_and_version(p) logger.debug('Add to provided: %s, %s, %s', name, version, dist) provided.setdefault(name, []).append((version, dist)) # now make the edges for dist in dists: requires = (dist.run_requires | dist.meta_requires | dist.build_requires | dist.dev_requires) for req in requires: try: matcher = scheme.matcher(req) except UnsupportedVersionError: # XXX compat-mode if cannot read the version logger.warning('could not read version %r - using name only', req) name = req.split()[0] matcher = scheme.matcher(name) name = matcher.key # case-insensitive matched = False if name in provided: for version, provider in provided[name]: try: match = matcher.match(version) except UnsupportedVersionError: match = False if match: graph.add_edge(dist, provider, req) matched = True break if not matched: graph.add_missing(dist, req) return graph def get_dependent_dists(dists, dist): """Recursively generate a list of distributions from *dists* that are dependent on *dist*. :param dists: a list of distributions :param dist: a distribution, member of *dists* for which we are interested """ if dist not in dists: raise DistlibException('given distribution %r is not a member ' 'of the list' % dist.name) graph = make_graph(dists) dep = [dist] # dependent distributions todo = graph.reverse_list[dist] # list of nodes we should inspect while todo: d = todo.pop() dep.append(d) for succ in graph.reverse_list[d]: if succ not in dep: todo.append(succ) dep.pop(0) # remove dist from dep, was there to prevent infinite loops return dep def get_required_dists(dists, dist): """Recursively generate a list of distributions from *dists* that are required by *dist*. :param dists: a list of distributions :param dist: a distribution, member of *dists* for which we are interested """ if dist not in dists: raise DistlibException('given distribution %r is not a member ' 'of the list' % dist.name) graph = make_graph(dists) req = [] # required distributions todo = graph.adjacency_list[dist] # list of nodes we should inspect while todo: d = todo.pop()[0] req.append(d) for pred in graph.adjacency_list[d]: if pred not in req: todo.append(pred) return req def make_dist(name, version, **kwargs): """ A convenience method for making a dist given just a name and version. """ summary = kwargs.pop('summary', 'Placeholder for summary') md = Metadata(**kwargs) md.name = name md.version = version md.summary = summary or 'Plaeholder for summary' return Distribution(md)
mit
DaveGeneral/python-data-mining-platform
example/naive_bayes_only_test.py
8
1130
""" this is a example shows load saved model and test new samples notice in GlobalInfo.Init and other initialize functions the second parameter - LoadFromFile is true, means load trained result """ import sys, os sys.path.append(os.path.join(os.getcwd(), '../')) from pymining.math.matrix import Matrix from pymining.math.text2matrix import Text2Matrix from pymining.nlp.segmenter import Segmenter from pymining.common.global_info import GlobalInfo from pymining.common.configuration import Configuration from pymining.preprocessor.chisquare_filter import ChiSquareFilter from pymining.classifier.naive_bayes import NaiveBayes if __name__ == "__main__": config = Configuration.FromFile("conf/test.xml") GlobalInfo.Init(config, "__global__", True) txt2mat = Text2Matrix(config, "__matrix__", True) chiFilter = ChiSquareFilter(config, "__filter__", True) nbModel = NaiveBayes(config, "naive_bayes", True) [testx, testy] = txt2mat.CreatePredictMatrix("data/test.txt") [testx, testy] = chiFilter.MatrixFilter(testx, testy) [resultY, precision] = nbModel.Test(testx, testy) print precision
bsd-3-clause
eleonrk/SickRage
sickbeard/logger.py
2
20839
# coding=utf-8 # Author: Nic Wolfe <nic@wolfeden.ca> # URL: https://sickchill.github.io # Git: https://github.com/SickChill/SickChill.git # # This file is part of SickChill. # # SickChill is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # SickChill is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SickChill. If not, see <http://www.gnu.org/licenses/>. """ Custom Logger for SickChill """ from __future__ import print_function, unicode_literals import io import locale import logging import logging.handlers import os import platform import re import sys import threading import traceback from logging import NullHandler import six from github import InputFileContent from github.GithubException import RateLimitExceededException, TwoFactorException # noinspection PyUnresolvedReferences from requests.compat import quote import sickbeard from sickbeard import classes from sickchill.helper.common import dateTimeFormat from sickchill.helper.encoding import ek, ss from sickchill.helper.exceptions import ex # pylint: disable=line-too-long # log levels ERROR = logging.ERROR WARNING = logging.WARNING INFO = logging.INFO DEBUG = logging.DEBUG DB = 5 LOGGING_LEVELS = { 'ERROR': ERROR, 'WARNING': WARNING, 'INFO': INFO, 'DEBUG': DEBUG, 'DB': DB, } censored_items = {} # pylint: disable=invalid-name class CensoredFormatter(logging.Formatter, object): """ Censor information such as API keys, user names, and passwords from the Log """ def __init__(self, fmt=None, datefmt=None, encoding='utf-8'): super(CensoredFormatter, self).__init__(fmt, datefmt) self.encoding = encoding def format(self, record): """ Strips censored items from string :param record: to censor """ msg = super(CensoredFormatter, self).format(record) if not isinstance(msg, six.text_type): msg = msg.decode(self.encoding, 'replace') # Convert to unicode # set of censored items censored = {item for _, item in six.iteritems(censored_items) if item} # set of censored items and urlencoded counterparts censored = censored | {quote(item) for item in censored} # convert set items to unicode and typecast to list censored = list({ item.decode(self.encoding, 'replace') if not isinstance(item, six.text_type) else item for item in censored }) # sort the list in order of descending length so that entire item is censored # e.g. password and password_1 both get censored instead of getting ********_1 censored.sort(key=len, reverse=True) for item in censored: msg = msg.replace(item, len(item) * '*') # Needed because Newznab apikey isn't stored as key=value in a section. msg = re.sub(r'([&?]r|[&?]apikey|[&?]api_key)(?:=|%3D)[^&]*([&\w]?)', r'\1=**********\2', msg, re.I) return msg class Logger(object): # pylint: disable=too-many-instance-attributes """ Logger to create log entries """ def __init__(self): self.logger = logging.getLogger('sickchill') self.loggers = [ logging.getLogger('sickchill'), logging.getLogger('tornado.general'), logging.getLogger('tornado.application'), # logging.getLogger('subliminal'), # logging.getLogger('tornado.access'), # logging.getLogger('tvdb_api'), # logging.getLogger("requests.packages.urllib3") ] self.console_logging = False self.file_logging = False self.debug_logging = False self.database_logging = False self.log_file = None self.submitter_running = False def init_logging(self, console_logging=False, file_logging=False, debug_logging=False, database_logging=False): """ Initialize logging :param console_logging: True if logging to console :param file_logging: True if logging to file :param debug_logging: True if debug logging is enabled :param database_logging: True if logging database access """ self.log_file = self.log_file or ek(os.path.join, sickbeard.LOG_DIR, 'sickchill.log') global log_file log_file = self.log_file self.debug_logging = debug_logging self.console_logging = console_logging self.file_logging = file_logging self.database_logging = database_logging logging.addLevelName(DB, 'DB') # add a new logging level DB logging.getLogger().addHandler(NullHandler()) # nullify root logger # set custom root logger for logger in self.loggers: if logger is not self.logger: logger.root = self.logger logger.parent = self.logger log_level = DB if self.database_logging else DEBUG if self.debug_logging else INFO # set minimum logging level allowed for loggers for logger in self.loggers: logger.setLevel(log_level) logging.getLogger("tornado.general").setLevel('ERROR') # console log handler if self.console_logging: console = logging.StreamHandler() console.setFormatter(CensoredFormatter('%(asctime)s %(levelname)s::%(message)s', '%H:%M:%S')) console.setLevel(log_level) for logger in self.loggers: logger.addHandler(console) # rotating log file handler if self.file_logging: rfh = logging.handlers.RotatingFileHandler( self.log_file, maxBytes=int(sickbeard.LOG_SIZE * 1048576), backupCount=sickbeard.LOG_NR, encoding='utf-8' ) rfh.setFormatter(CensoredFormatter('%(asctime)s %(levelname)-8s %(message)s', dateTimeFormat)) rfh.setLevel(log_level) for logger in self.loggers: logger.addHandler(rfh) def set_level(self): self.debug_logging = sickbeard.DEBUG self.database_logging = sickbeard.DBDEBUG level = DB if self.database_logging else DEBUG if self.debug_logging else INFO for logger in self.loggers: logger.setLevel(level) for handler in logger.handlers: handler.setLevel(level) @staticmethod def shutdown(): """ Shut down the logger """ logging.shutdown() def log(self, msg, level=INFO, *args, **kwargs): """ Create log entry :param msg: to log :param level: of log, e.g. DEBUG, INFO, etc. :param args: to pass to logger :param kwargs: to pass to logger """ cur_thread = threading.currentThread().getName() cur_hash = '' if level == ERROR and sickbeard.CUR_COMMIT_HASH and len(sickbeard.CUR_COMMIT_HASH) > 6: cur_hash = '[{0}] '.format( sickbeard.CUR_COMMIT_HASH[:7] ) message = '{thread} :: {hash}{message}'.format( thread=cur_thread, hash=cur_hash, message=msg) # Change the SSL error to a warning with a link to information about how to fix it. # Check for 'error [SSL: SSLV3_ALERT_HANDSHAKE_FAILURE] sslv3 alert handshake failure (_ssl.c:590)' ssl_errors = [ r'error \[Errno \d+\] _ssl.c:\d+: error:\d+\s*:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert internal error', r'error \[SSL: SSLV3_ALERT_HANDSHAKE_FAILURE\] sslv3 alert handshake failure \(_ssl\.c:\d+\)', ] for ssl_error in ssl_errors: check = re.sub(ssl_error, 'See: http://git.io/vuU5V', message) if check != message: message = check level = WARNING if level == ERROR: classes.ErrorViewer.add(classes.UIError(message)) elif level == WARNING: classes.WarningViewer.add(classes.UIError(message)) try: if level == ERROR: self.logger.exception(message, *args, **kwargs) else: self.logger.log(level, message, *args, **kwargs) except Exception: if msg and msg.strip(): # Otherwise creates empty messages in log... print(msg.strip()) def log_error_and_exit(self, error_msg, *args, **kwargs): self.log(error_msg, ERROR, *args, **kwargs) if not self.console_logging: sys.exit(error_msg.encode(sickbeard.SYS_ENCODING, 'xmlcharrefreplace')) else: sys.exit(1) def submit_errors(self): # pylint: disable=too-many-branches,too-many-locals submitter_result = '' issue_id = None gh_credentials = (sickbeard.GIT_AUTH_TYPE == 0 and sickbeard.GIT_USERNAME and sickbeard.GIT_PASSWORD) \ or (sickbeard.GIT_AUTH_TYPE == 1 and sickbeard.GIT_TOKEN) if not all((gh_credentials, sickbeard.DEBUG, sickbeard.gh, classes.ErrorViewer.errors)): submitter_result = 'Please set your GitHub token or username and password in the config and enable debug. Unable to submit issue ticket to GitHub!' return submitter_result, issue_id try: from sickbeard.versionChecker import CheckVersion checkversion = CheckVersion() checkversion.check_for_new_version() commits_behind = checkversion.updater.get_num_commits_behind() except Exception: # pylint: disable=broad-except submitter_result = 'Could not check if your SickChill is updated, unable to submit issue ticket to GitHub!' return submitter_result, issue_id if commits_behind is None or commits_behind > 0: submitter_result = 'Please update SickChill, unable to submit issue ticket to GitHub with an outdated version!' return submitter_result, issue_id if self.submitter_running: submitter_result = 'Issue submitter is running, please wait for it to complete' return submitter_result, issue_id self.submitter_running = True try: # read log file __log_data = None if ek(os.path.isfile, self.log_file): with io.open(self.log_file, encoding='utf-8') as log_f: __log_data = log_f.readlines() for i in range(1, int(sickbeard.LOG_NR)): f_name = '{0}.{1:d}'.format(self.log_file, i) if ek(os.path.isfile, f_name) and (len(__log_data) <= 500): with io.open(f_name, encoding='utf-8') as log_f: __log_data += log_f.readlines() __log_data = list(reversed(__log_data)) # parse and submit errors to issue tracker for cur_error in sorted(classes.ErrorViewer.errors, key=lambda error: error.time, reverse=True)[:500]: try: title_error = ss(str(cur_error.title)) if not title_error or title_error == 'None': title_error = re.match(r'^[A-Za-z0-9\-\[\] :]+::\s(?:\[[\w]{7}\])\s*(.*)$', ss(cur_error.message)).group(1) if len(title_error) > 1000: title_error = title_error[0:1000] except Exception as err_msg: # pylint: disable=broad-except self.log('Unable to get error title : {0}'.format(ex(err_msg)), ERROR) title_error = 'UNKNOWN' gist = None regex = r'^(?P<time>{time})\s+(?P<level>[A-Z]+)\s+[A-Za-z0-9\-\[\] :]+::.*$'.format(time=re.escape(cur_error.time)) for i, data in enumerate(__log_data): match = re.match(regex, data) if match: level = match.group('level') if LOGGING_LEVELS[level] == ERROR: paste_data = ''.join(__log_data[i:i + 50]) if paste_data: gist = sickbeard.gh.get_user().create_gist(False, {'sickchill.log': InputFileContent(paste_data)}) break else: gist = 'No ERROR found' try: locale_name = locale.getdefaultlocale()[1] except Exception: # pylint: disable=broad-except locale_name = 'unknown' if gist and gist != 'No ERROR found': log_link = 'Link to Log: {0}'.format(gist.html_url) else: log_link = 'No Log available with ERRORS:' msg = [ '### INFO', 'Python Version: **{0}**'.format(sys.version[:120].replace('\n', '')), 'Operating System: **{0}**'.format(platform.platform()), 'Locale: {0}'.format(locale_name), 'Branch: **{0}**'.format(sickbeard.BRANCH), 'Commit: SickChill/SickChill@{0}'.format(sickbeard.CUR_COMMIT_HASH), log_link, '### ERROR', '```', cur_error.message, '```', '---', '_STAFF NOTIFIED_: @SickChill/owners @SickChill/moderators', ] message = '\n'.join(msg) title_error = '[APP SUBMITTED]: {0}'.format(title_error) repo = sickbeard.gh.get_organization(sickbeard.GIT_ORG).get_repo(sickbeard.GIT_REPO) reports = repo.get_issues(state='all') def is_ascii_error(title): # [APP SUBMITTED]: 'ascii' codec can't encode characters in position 00-00: ordinal not in range(128) # [APP SUBMITTED]: 'charmap' codec can't decode byte 0x00 in position 00: character maps to <undefined> return re.search(r'.* codec can\'t .*code .* in position .*:', title) is not None def is_malformed_error(title): # [APP SUBMITTED]: not well-formed (invalid token): line 0, column 0 return re.search(r'.* not well-formed \(invalid token\): line .* column .*', title) is not None ascii_error = is_ascii_error(title_error) malformed_error = is_malformed_error(title_error) issue_found = False for report in reports: if title_error.rsplit(' :: ')[-1] in report.title or \ (malformed_error and is_malformed_error(report.title)) or \ (ascii_error and is_ascii_error(report.title)): issue_id = report.number if not report.raw_data['locked']: if report.create_comment(message): submitter_result = 'Commented on existing issue #{0} successfully!'.format(issue_id) else: submitter_result = 'Failed to comment on found issue #{0}!'.format(issue_id) else: submitter_result = 'Issue #{0} is locked, check GitHub to find info about the error.'.format(issue_id) issue_found = True break if not issue_found: issue = repo.create_issue(title_error, message) if issue: issue_id = issue.number submitter_result = 'Your issue ticket #{0} was submitted successfully!'.format(issue_id) else: submitter_result = 'Failed to create a new issue!' if issue_id and cur_error in classes.ErrorViewer.errors: # clear error from error list classes.ErrorViewer.errors.remove(cur_error) except RateLimitExceededException: submitter_result = 'Your Github user has exceeded its API rate limit, please try again later' issue_id = None except TwoFactorException: submitter_result = ('Your Github account requires Two-Factor Authentication, ' 'please change your auth method in the config') issue_id = None except Exception: # pylint: disable=broad-except self.log(traceback.format_exc(), ERROR) submitter_result = 'Exception generated in issue submitter, please check the log' issue_id = None finally: self.submitter_running = False return submitter_result, issue_id # pylint: disable=too-few-public-methods class Wrapper(object): instance = Logger() def __init__(self, wrapped): self.wrapped = wrapped def __getattr__(self, name): try: return getattr(self.wrapped, name) except AttributeError: return getattr(self.instance, name) _globals = sys.modules[__name__] = Wrapper(sys.modules[__name__]) # pylint: disable=invalid-name def init_logging(*args, **kwargs): return Wrapper.instance.init_logging(*args, **kwargs) def log(*args, **kwargs): return Wrapper.instance.log(*args, **kwargs) def log_error_and_exit(*args, **kwargs): return Wrapper.instance.log_error_and_exit(*args, **kwargs) def set_level(*args, **kwargs): return Wrapper.instance.set_level(*args, **kwargs) def shutdown(): return Wrapper.instance.shutdown() def submit_errors(*args, **kwargs): return Wrapper.instance.submit_errors(*args, **kwargs) log_file = None LOG_FILTERS = { '<NONE>': _('&lt;No Filter&gt;'), 'DAILYSEARCHER': _('Daily Searcher'), 'BACKLOG': _('Backlog'), 'SHOWUPDATER': _('Show Updater'), 'CHECKVERSION': _('Check Version'), 'SHOWQUEUE': _('Show Queue'), 'SEARCHQUEUE': _('Search Queue (All)'), 'SEARCHQUEUE-DAILY-SEARCH': _('Search Queue (Daily Searcher)'), 'SEARCHQUEUE-BACKLOG': _('Search Queue (Backlog)'), 'SEARCHQUEUE-MANUAL': _('Search Queue (Manual)'), 'SEARCHQUEUE-RETRY': _('Search Queue (Retry/Failed)'), 'SEARCHQUEUE-RSS': _('Search Queue (RSS)'), 'FINDPROPERS': _('Find Propers'), 'POSTPROCESSOR': _('Postprocessor'), 'FINDSUBTITLES': _('Find Subtitles'), 'TRAKTCHECKER': _('Trakt Checker'), 'EVENT': _('Event'), 'ERROR': _('Error'), 'TORNADO': _('Tornado'), 'Thread': _('Thread'), 'MAIN': _('Main'), } def log_data(min_level, log_filter, log_search, max_lines): regex = r"^(\d\d\d\d)\-(\d\d)\-(\d\d)\s*(\d\d)\:(\d\d):(\d\d)\s*([A-Z]+)\s*(.+?)\s*\:\:\s*(.*)$" if log_filter not in LOG_FILTERS: log_filter = '<NONE>' final_data = [] log_files = [] if ek(os.path.isfile, Wrapper.instance.log_file): log_files.append(Wrapper.instance.log_file) for i in range(1, int(sickbeard.LOG_NR)): name = Wrapper.instance.log_file + "." + str(i) if not ek(os.path.isfile, name): break log_files.append(name) else: return final_data data = [] for _log_file in log_files: if len(data) < max_lines: with io.open(_log_file, 'r', encoding='utf-8') as f: data += [line.strip() + '\n' for line in reversed(f.readlines()) if line.strip()] else: break found_lines = 0 for x in data: match = re.match(regex, x) if match: level = match.group(7) log_name = match.group(8) if not sickbeard.DEBUG and level == 'DEBUG': continue if not sickbeard.DBDEBUG and level == 'DB': continue if level not in LOGGING_LEVELS: final_data.append('AA ' + x) found_lines += 1 elif log_search and log_search.lower() in x.lower(): final_data.append(x) found_lines += 1 elif not log_search and LOGGING_LEVELS[level] >= int(min_level) and (log_filter == '<NONE>' or log_name.startswith(log_filter)): final_data.append(x) found_lines += 1 else: final_data.append('AA ' + x) found_lines += 1 if found_lines >= max_lines: break return final_data
gpl-3.0
ReconCell/smacha
smacha_ros/test/executive_smach_tutorials_diff_test.py
1
3665
#!/usr/bin/env python import sys import argparse import os import unittest2 as unittest from ruamel import yaml from smacha.util import Tester import rospy import rospkg import rostest ROS_TEMPLATES_DIR = '../src/smacha_ros/templates' TEMPLATES_DIR = 'smacha_templates/executive_smach_tutorials' WRITE_OUTPUT_FILES = False OUTPUT_PY_DIR = '/tmp/smacha/executive_smach_tutorials/smacha_generated_py' OUTPUT_YML_DIR = '/tmp/smacha/executive_smach_tutorials/smacha_generated_scripts' CONF_FILE = 'executive_smach_tutorials_config.yml' DEBUG_LEVEL = 1 CONF_DICT = {} class TestGenerate(Tester): """Tester class for general unit testing of various SMACHA tool functionalities. The tests run by this class are performed by generating code using SMACHA scripts and templates and comparing the generated output code to the expected code from hand-written code samples. This includes testing both SMACHA YAML scripts generated by, e.g. the :func:`smacha.parser.contain` and :func:`smacha.parser.extract` methods, and Python code generated by the :func:`smacha.generator.run` method. """ def __init__(self, *args, **kwargs): # Set Tester member variables self.set_write_output_files(WRITE_OUTPUT_FILES) self.set_output_py_dir(OUTPUT_PY_DIR) self.set_output_yml_dir(OUTPUT_YML_DIR) self.set_debug_level(DEBUG_LEVEL) # Store the base path self._base_path = os.path.dirname(os.path.abspath(__file__)) # Call the parent constructor super(TestGenerate, self).__init__( *args, script_dirs=[os.path.join(self._base_path, 'smacha_scripts/smacha_test_examples')], template_dirs=[ os.path.join(self._base_path, ROS_TEMPLATES_DIR), os.path.join(self._base_path, TEMPLATES_DIR) ], **kwargs) def test_generate(self): """Test generating against baseline files""" for test_case in CONF_DICT['TEST_GENERATE']: with self.subTest(test_case=test_case): test_params = test_case.values()[0] script_file = test_params['script'] baseline = test_params['baseline'] with open(os.path.join(self._base_path, 'executive_smach_tutorials/smach_tutorials/examples/{}'.format(baseline))) as original_file: generated_code = self._strip_uuids(self._generate(os.path.join(self._base_path, 'smacha_scripts/executive_smach_tutorials/{}'.format(script_file)))) original_code = original_file.read() self.assertTrue(self._compare(generated_code, original_code, file_a='generated', file_b='original')) if __name__=="__main__": # Read the configuration file before parsing arguments, try: base_path = os.path.dirname(os.path.abspath(__file__)) conf_file_loc = os.path.join(base_path, CONF_FILE) f = open(conf_file_loc) CONF_DICT = yaml.load(f) except Exception as e: print('Failed to read the configuration file. See error:\n{}'.format(e)) exit() if CONF_DICT.has_key('WRITE_OUTPUT_FILES'): WRITE_OUTPUT_FILES = CONF_DICT['WRITE_OUTPUT_FILES'] if CONF_DICT.has_key('OUTPUT_PY_DIR'): OUTPUT_PY_DIR = CONF_DICT['OUTPUT_PY_DIR'] if CONF_DICT.has_key('OUTPUT_YML_DIR'): OUTPUT_YML_DIR = CONF_DICT['OUTPUT_YML_DIR'] if CONF_DICT.has_key('DEBUG_LEVEL'): DEBUG_LEVEL = CONF_DICT['DEBUG_LEVEL'] rospy.init_node('test_smacha_ros_generate',log_level=rospy.DEBUG) rostest.rosrun('smacha_ros', 'test_smacha_ros_generate', TestGenerate)
bsd-3-clause
louiskun/flaskGIT
venv/lib/python2.7/site-packages/setuptools/command/install.py
496
4685
from distutils.errors import DistutilsArgError import inspect import glob import warnings import platform import distutils.command.install as orig import setuptools # Prior to numpy 1.9, NumPy relies on the '_install' name, so provide it for # now. See https://bitbucket.org/pypa/setuptools/issue/199/ _install = orig.install class install(orig.install): """Use easy_install to install the package, w/dependencies""" user_options = orig.install.user_options + [ ('old-and-unmanageable', None, "Try not to use this!"), ('single-version-externally-managed', None, "used by system package builders to create 'flat' eggs"), ] boolean_options = orig.install.boolean_options + [ 'old-and-unmanageable', 'single-version-externally-managed', ] new_commands = [ ('install_egg_info', lambda self: True), ('install_scripts', lambda self: True), ] _nc = dict(new_commands) def initialize_options(self): orig.install.initialize_options(self) self.old_and_unmanageable = None self.single_version_externally_managed = None def finalize_options(self): orig.install.finalize_options(self) if self.root: self.single_version_externally_managed = True elif self.single_version_externally_managed: if not self.root and not self.record: raise DistutilsArgError( "You must specify --record or --root when building system" " packages" ) def handle_extra_path(self): if self.root or self.single_version_externally_managed: # explicit backward-compatibility mode, allow extra_path to work return orig.install.handle_extra_path(self) # Ignore extra_path when installing an egg (or being run by another # command without --root or --single-version-externally-managed self.path_file = None self.extra_dirs = '' def run(self): # Explicit request for old-style install? Just do it if self.old_and_unmanageable or self.single_version_externally_managed: return orig.install.run(self) if not self._called_from_setup(inspect.currentframe()): # Run in backward-compatibility mode to support bdist_* commands. orig.install.run(self) else: self.do_egg_install() @staticmethod def _called_from_setup(run_frame): """ Attempt to detect whether run() was called from setup() or by another command. If called by setup(), the parent caller will be the 'run_command' method in 'distutils.dist', and *its* caller will be the 'run_commands' method. If called any other way, the immediate caller *might* be 'run_command', but it won't have been called by 'run_commands'. Return True in that case or if a call stack is unavailable. Return False otherwise. """ if run_frame is None: msg = "Call stack not available. bdist_* commands may fail." warnings.warn(msg) if platform.python_implementation() == 'IronPython': msg = "For best results, pass -X:Frames to enable call stack." warnings.warn(msg) return True res = inspect.getouterframes(run_frame)[2] caller, = res[:1] info = inspect.getframeinfo(caller) caller_module = caller.f_globals.get('__name__', '') return ( caller_module == 'distutils.dist' and info.function == 'run_commands' ) def do_egg_install(self): easy_install = self.distribution.get_command_class('easy_install') cmd = easy_install( self.distribution, args="x", root=self.root, record=self.record, ) cmd.ensure_finalized() # finalize before bdist_egg munges install cmd cmd.always_copy_from = '.' # make sure local-dir eggs get installed # pick up setup-dir .egg files only: no .egg-info cmd.package_index.scan(glob.glob('*.egg')) self.run_command('bdist_egg') args = [self.distribution.get_command_obj('bdist_egg').egg_output] if setuptools.bootstrap_install_from: # Bootstrap self-installation of setuptools args.insert(0, setuptools.bootstrap_install_from) cmd.args = args cmd.run() setuptools.bootstrap_install_from = None # XXX Python 3.1 doesn't see _nc if this is inside the class install.sub_commands = ( [cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc] + install.new_commands )
mit
simone/django-gb
tests/str/tests.py
16
1252
# -*- coding: utf-8 -*- from __future__ import unicode_literals import datetime from unittest import skipIf from django.test import TestCase from django.utils import six from .models import Article, InternationalArticle class SimpleTests(TestCase): @skipIf(six.PY3, "tests a __str__ method returning unicode under Python 2") def test_basic(self): a = Article.objects.create( headline=b'Area man programs in Python', pub_date=datetime.datetime(2005, 7, 28) ) self.assertEqual(str(a), str('Area man programs in Python')) self.assertEqual(repr(a), str('<Article: Area man programs in Python>')) def test_international(self): a = InternationalArticle.objects.create( headline='Girl wins €12.500 in lottery', pub_date=datetime.datetime(2005, 7, 28) ) if six.PY3: self.assertEqual(str(a), 'Girl wins €12.500 in lottery') else: # On Python 2, the default str() output will be the UTF-8 encoded # output of __unicode__() -- or __str__() when the # python_2_unicode_compatible decorator is used. self.assertEqual(str(a), b'Girl wins \xe2\x82\xac12.500 in lottery')
bsd-3-clause
TNT-Samuel/Coding-Projects
DNS Server/Source/Lib/site-packages/dnslib/test_decode.py
5
9624
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Test dnslib packet encoding/decoding Reads test files from dnslib/test (by default) containing dump of DNS exchange (packet dump & parse output) and test round-trip parsing - specifically: - Parse packet data and zone format data and compare - Repack parsed packet data and compare with original This should test the 'parse', 'fromZone' and 'pack' methods of the associated record types. The original parsed output is created using dnslib by default so systematic encode/decode errors will not be found. By default the test data is checked against 'dig' to ensure that it is correct when generated using the --new option. By default the module runs in 'unittest' mode (and supports unittest --verbose/--failfast options) The module can also be run in interactive mode (--interactive) and inspect failed tests (--debug) New test data files can be automatically created using the: --new <domain> <type> option. The data is checked against dig output and an error raised if this does not match. This is effectively the same as running: python -m dnslib.client --query --hex --dig <domain> <type> It is possible to manually generate test data files using dnslib.client even if the dig data doesn't match (this is usually due to an unsupported RDATA type which dnslib will output in hex rather then parsing contents). The roundtrip tests will still work in this case (the unknown RDATA is handled as an opaque blob). In some cases the tests will fail as a result of the zone file parser being more fragile than the packet parser (especially with broken data) Note - unittests are dynamically generated from the test directory contents (matched against the --glob parameter) """ from __future__ import print_function from dnslib.dns import DNSRecord from dnslib.digparser import DigParser import argparse,binascii,code,glob,os,os.path,sys,unittest try: from subprocess import getoutput except ImportError: from commands import getoutput try: input = raw_input except NameError: pass class TestContainer(unittest.TestCase): pass def new_test(domain,qtype,address="8.8.8.8",port=53,nodig=False): tcp = False q = DNSRecord.question(domain,qtype) a_pkt = q.send(address,port) a = DNSRecord.parse(a_pkt) if a.header.tc: tcp = True a_pkt = q.send(address,port,tcp) a = DNSRecord.parse(a_pkt) if not nodig: dig = getoutput("dig +qr -p %d %s %s @%s" % ( port, domain, qtype, address)) dig_reply = list(iter(DigParser(dig))) # DiG might have retried in TCP mode so get last q/a q_dig = dig_reply[-2] a_dig = dig_reply[-1] if q != q_dig or a != a_dig: if q != q_dig: print(";;; ERROR: Diff Question differs") for (d1,d2) in q.diff(q_dig): if d1: print(";; - %s" % d1) if d2: print(";; + %s" % d2) if a != a_dig: print(";;; ERROR: Diff Response differs") for (d1,d2) in a.diff(a_dig): if d1: print(";; - %s" % d1) if d2: print(";; + %s" % d2) return print("Writing test file: %s-%s" % (domain,qtype)) with open("%s-%s" % (domain,qtype),"w") as f: print(";; Sending:",file=f) print(";; QUERY:",binascii.hexlify(q.pack()).decode(),file=f) print(q,file=f) print(file=f) print(";; Got answer:",file=f) print(";; RESPONSE:",binascii.hexlify(a_pkt).decode(),file=f) print(a,file=f) print(file=f) def check_decode(f,debug=False): errors = [] # Parse the q/a records with open(f) as x: q,r = DigParser(x) # Grab the hex data with open(f,'rb') as x: for l in x.readlines(): if l.startswith(b';; QUERY:'): qdata = binascii.unhexlify(l.split()[-1]) elif l.startswith(b';; RESPONSE:'): rdata = binascii.unhexlify(l.split()[-1]) # Parse the hex data qparse = DNSRecord.parse(qdata) rparse = DNSRecord.parse(rdata) # Check records generated from DiG input matches # records parsed from packet data if q != qparse: errors.append(('Question',q.diff(qparse))) if r != rparse: errors.append(('Reply',r.diff(rparse))) # Repack the data qpack = qparse.pack() rpack = rparse.pack() # Check if repacked question data matches original # We occasionally get issues where original packet did not # compress all labels - in this case we reparse packed # record, repack this and compare with the packed data if qpack != qdata: if len(qpack) < len(qdata): # Shorter - possibly compression difference if DNSRecord.parse(qpack).pack() != qpack: errors.append(('Question Pack',(qdata,qpack))) else: errors.append(('Question Pack',(qdata,qpack))) if rpack != rdata: if len(rpack) < len(rdata): if DNSRecord.parse(rpack).pack() != rpack: errors.append(('Reply Pack',(rdata,rpack))) else: errors.append(('Reply Pack',(rdata,rpack))) if debug: if errors: print("ERROR\n") print_errors(errors) print() if input(">>> Inspect [y/n]? ").lower().startswith('y'): code.interact(local=locals()) print() else: print("OK") return errors def print_errors(errors): for err,err_data in errors: if err == 'Question': print("Question error:") for (d1,d2) in err_data: if d1: print(";; - %s" % d1) if d2: print(";; + %s" % d2) elif err == 'Reply': print("Reply error:") for (d1,d2) in err_data: if d1: print(";; - %s" % d1) if d2: print(";; + %s" % d2) elif err == 'Question Pack': print("Question pack error") print("QDATA:",binascii.hexlify(err_data[0])) print(DNSRecord.parse(err_data[0])) print("QPACK:",binascii.hexlify(err_data[1])) print(DNSRecord.parse(err_data[1])) elif err == 'Reply Pack': print("Response pack error") print("RDATA:",binascii.hexlify(err_data[0])) print(DNSRecord.parse(err_data[0])) print("RPACK:",binascii.hexlify(err_data[1])) print(DNSRecord.parse(err_data[1])) def test_generator(f): def test(self): self.assertEqual(check_decode(f),[]) return test if __name__ == '__main__': testdir = os.path.join(os.path.dirname(__file__),"test") p = argparse.ArgumentParser(description="Test Decode") p.add_argument("--new","-n",nargs=2, metavar="<domain/type>", help="Create new test case (args: <domain> <type>)") p.add_argument("--nodig",action='store_true',default=False, help="Don't test new data against DiG") p.add_argument("--unittest",action='store_true',default=True, help="Run unit tests") p.add_argument("--verbose",action='store_true',default=False, help="Verbose unit test output") p.add_argument("--failfast",action='store_true',default=False, help="Stop unit tests on first failure") p.add_argument("--interactive",action='store_true',default=False, help="Run in interactive mode") p.add_argument("--debug",action='store_true',default=False, help="Debug errors (interactive mode)") p.add_argument("--glob","-g",default="*", help="Glob pattern") p.add_argument("--testdir","-t",default=testdir, help="Test dir (%s)" % testdir) p.add_argument("--testfile","-f",default=None, help="Test single file") args = p.parse_args() if args.testfile: e = check_decode(args.testfile,args.debug) if not args.debug: if e: print("ERROR\n") print_errors(e) print() else: print("OK") else: os.chdir(args.testdir) if args.new: new_test(*args.new,nodig=args.nodig) elif args.interactive: for f in glob.iglob(args.glob): if os.path.isfile(f): print("-- %s: " % f,end='') e = check_decode(f,args.debug) if not args.debug: if e: print("ERROR\n") print_errors(e) print() else: print("OK") elif args.unittest: for f in glob.iglob(args.glob): if os.path.isfile(f): test_name = 'test_%s' % f test = test_generator(f) setattr(TestContainer,test_name,test) unittest.main(argv=[__name__], verbosity=2 if args.verbose else 1, failfast=args.failfast)
gpl-3.0
ncsulug/ncsulug-website
lug_markup/markup.py
1
3965
# -*- coding: utf-8 -*- from creoleparser.core import Parser from creoleparser.dialects import create_dialect, creole11_base, parse_args from creoleparser.elements import PreBlock, Table as BaseTable from django.core.urlresolvers import reverse from django.utils.safestring import mark_safe from genshi import builder, Markup from pygments import highlight from pygments.formatters import HtmlFormatter from pygments.lexers import get_lexer_by_name from pygments.styles.autumn import AutumnStyle from pygments.util import ClassNotFound from lug_people.models import MemberProfile global_cache = {} # This includes Markdown-style backticks for inline code. INLINE_MARKUP = [ ('**','strong'), ('//','em'), (',,','sub'), ('^^','sup'), ('__','u'), ('##','code'), ('`', 'code'), ('--','del'), ] def build_interwikis(): from django.conf import settings bases, spaces, classes = {}, {}, {} for name, (base, space) in getattr(settings, 'INTERWIKIS', {}).items(): bases[name] = base spaces[name] = space classes[name] = lambda page: name + '-link' return bases, spaces, classes def wiki_link_path(link): if link.startswith("~"): # User profile return reverse('profile', args=[MemberProfile.make_username(link[1:])]) else: # The only disallowed character is /. # We turn it into - because it's most commonly used in dates. return reverse('wiki_view', kwargs={'title': link.replace('/', '-')}) def wiki_link_class(link): if link.startswith("~"): return 'user-link' else: return 'wiki-link' def get_pygments_formatter(): if 'formatter' not in global_cache: global_cache['formatter'] = HtmlFormatter(style=AutumnStyle) return global_cache['formatter'] class Table(BaseTable): def __init__(self, classes): self.classes = classes super(Table, self).__init__('table', '|') def _build(self, mo, element_store, environ): return super(Table, self)._build(mo, element_store, environ) \ (class_=self.classes) class CodeBlock(PreBlock): # Code borrowed from Flask Website: # https://github.com/mitsuhiko/flask/blob/website/flask_website/utils.py def __init__(self): super(CodeBlock, self).__init__('pre', ['{{{', '}}}']) def _build(self, mo, element_store, environ): lines = self.regexp2.sub(r'\1', mo.group(1)).splitlines() if lines and lines[0].startswith('#!'): try: lexer = get_lexer_by_name(lines.pop(0)[2:].strip()) except ClassNotFound: pass else: return Markup(highlight(u'\n'.join(lines), lexer, get_pygments_formatter())) return builder.tag.pre(u'\n'.join(lines)) def create_lug_dialect(): iw_bases, iw_spaces, iw_classes = build_interwikis() dialect = create_dialect(creole11_base, # Markup customizations simple_markup = INLINE_MARKUP, indent_style = '', indent_class = 'quote', no_wiki_monospace = False, # Internal links wiki_links_base_url = "", wiki_links_path_func = wiki_link_path, wiki_links_class_func = wiki_link_class, # Everyone else's links external_links_class = 'external-link', interwiki_links_base_urls = iw_bases, interwiki_links_class_funcs = iw_classes, interwiki_links_space_chars = iw_spaces ) dialect.pre = CodeBlock() dialect.table = Table('table table-striped table-bordered') return dialect def get_parser(): if 'parser' not in global_cache: global_cache['parser'] = Parser(dialect=create_lug_dialect(), method='html') return global_cache['parser'] def render_markup(text): return mark_safe(get_parser().render(text))
gpl-3.0
behzadnouri/numpy
numpy/core/setup_common.py
16
15285
from __future__ import division, absolute_import, print_function # Code common to build tools import sys import warnings import copy import binascii from numpy.distutils.misc_util import mingw32 #------------------- # Versioning support #------------------- # How to change C_API_VERSION ? # - increase C_API_VERSION value # - record the hash for the new C API with the script cversions.py # and add the hash to cversions.txt # The hash values are used to remind developers when the C API number was not # updated - generates a MismatchCAPIWarning warning which is turned into an # exception for released version. # Binary compatibility version number. This number is increased whenever the # C-API is changed such that binary compatibility is broken, i.e. whenever a # recompile of extension modules is needed. C_ABI_VERSION = 0x01000009 # Minor API version. This number is increased whenever a change is made to the # C-API -- whether it breaks binary compatibility or not. Some changes, such # as adding a function pointer to the end of the function table, can be made # without breaking binary compatibility. In this case, only the C_API_VERSION # (*not* C_ABI_VERSION) would be increased. Whenever binary compatibility is # broken, both C_API_VERSION and C_ABI_VERSION should be increased. # # 0x00000008 - 1.7.x # 0x00000009 - 1.8.x # 0x00000009 - 1.9.x # 0x0000000a - 1.10.x # 0x0000000a - 1.11.x # 0x0000000a - 1.12.x # 0x0000000b - 1.13.x C_API_VERSION = 0x0000000b class MismatchCAPIWarning(Warning): pass def is_released(config): """Return True if a released version of numpy is detected.""" from distutils.version import LooseVersion v = config.get_version('../version.py') if v is None: raise ValueError("Could not get version") pv = LooseVersion(vstring=v).version if len(pv) > 3: return False return True def get_api_versions(apiversion, codegen_dir): """ Return current C API checksum and the recorded checksum. Return current C API checksum and the recorded checksum for the given version of the C API version. """ # Compute the hash of the current API as defined in the .txt files in # code_generators sys.path.insert(0, codegen_dir) try: m = __import__('genapi') numpy_api = __import__('numpy_api') curapi_hash = m.fullapi_hash(numpy_api.full_api) apis_hash = m.get_versions_hash() finally: del sys.path[0] return curapi_hash, apis_hash[apiversion] def check_api_version(apiversion, codegen_dir): """Emits a MismacthCAPIWarning if the C API version needs updating.""" curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir) # If different hash, it means that the api .txt files in # codegen_dir have been updated without the API version being # updated. Any modification in those .txt files should be reflected # in the api and eventually abi versions. # To compute the checksum of the current API, use # code_generators/cversions.py script if not curapi_hash == api_hash: msg = ("API mismatch detected, the C API version " "numbers have to be updated. Current C api version is %d, " "with checksum %s, but recorded checksum for C API version %d in " "codegen_dir/cversions.txt is %s. If functions were added in the " "C API, you have to update C_API_VERSION in %s." ) warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash, __file__), MismatchCAPIWarning, stacklevel=2) # Mandatory functions: if not found, fail the build MANDATORY_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", "ceil", "sqrt", "log10", "log", "exp", "asin", "acos", "atan", "fmod", 'modf', 'frexp', 'ldexp'] # Standard functions which may not be available and for which we have a # replacement implementation. Note that some of these are C99 functions. OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh", "rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow", "copysign", "nextafter", "ftello", "fseeko", "strtoll", "strtoull", "cbrt", "strtold_l", "fallocate", "backtrace"] OPTIONAL_HEADERS = [ # sse headers only enabled automatically on amd64/x32 builds "xmmintrin.h", # SSE "emmintrin.h", # SSE2 "features.h", # for glibc version linux "xlocale.h", # see GH#8367 "dlfcn.h", # dladdr ] # optional gcc compiler builtins and their call arguments and optional a # required header and definition name (HAVE_ prepended) # call arguments are required as the compiler will do strict signature checking OPTIONAL_INTRINSICS = [("__builtin_isnan", '5.'), ("__builtin_isinf", '5.'), ("__builtin_isfinite", '5.'), ("__builtin_bswap32", '5u'), ("__builtin_bswap64", '5u'), ("__builtin_expect", '5, 0'), ("__builtin_mul_overflow", '5, 5, (int*)5'), # broken on OSX 10.11, make sure its not optimized away ("volatile int r = __builtin_cpu_supports", '"sse"', "stdio.h", "__BUILTIN_CPU_SUPPORTS"), # MMX only needed for icc, but some clangs don't have it ("_m_from_int64", '0', "emmintrin.h"), ("_mm_load_ps", '(float*)0', "xmmintrin.h"), # SSE ("_mm_prefetch", '(float*)0, _MM_HINT_NTA', "xmmintrin.h"), # SSE ("_mm_load_pd", '(double*)0', "emmintrin.h"), # SSE2 ("__builtin_prefetch", "(float*)0, 0, 3"), # check that the linker can handle avx ("__asm__ volatile", '"vpand %xmm1, %xmm2, %xmm3"', "stdio.h", "LINK_AVX"), ("__asm__ volatile", '"vpand %ymm1, %ymm2, %ymm3"', "stdio.h", "LINK_AVX2"), ] # function attributes # tested via "int %s %s(void *);" % (attribute, name) # function name will be converted to HAVE_<upper-case-name> preprocessor macro OPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))', 'attribute_optimize_unroll_loops'), ('__attribute__((optimize("O3")))', 'attribute_optimize_opt_3'), ('__attribute__((nonnull (1)))', 'attribute_nonnull'), ('__attribute__((target ("avx")))', 'attribute_target_avx'), ('__attribute__((target ("avx2")))', 'attribute_target_avx2'), ] # variable attributes tested via "int %s a" % attribute OPTIONAL_VARIABLE_ATTRIBUTES = ["__thread", "__declspec(thread)"] # Subset of OPTIONAL_STDFUNCS which may alreay have HAVE_* defined by Python.h OPTIONAL_STDFUNCS_MAYBE = [ "expm1", "log1p", "acosh", "atanh", "asinh", "hypot", "copysign", "ftello", "fseeko" ] # C99 functions: float and long double versions C99_FUNCS = [ "sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", "ceil", "rint", "trunc", "sqrt", "log10", "log", "log1p", "exp", "expm1", "asin", "acos", "atan", "asinh", "acosh", "atanh", "hypot", "atan2", "pow", "fmod", "modf", 'frexp', 'ldexp', "exp2", "log2", "copysign", "nextafter", "cbrt" ] C99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS] C99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS] C99_COMPLEX_TYPES = [ 'complex double', 'complex float', 'complex long double' ] C99_COMPLEX_FUNCS = [ "cabs", "cacos", "cacosh", "carg", "casin", "casinh", "catan", "catanh", "ccos", "ccosh", "cexp", "cimag", "clog", "conj", "cpow", "cproj", "creal", "csin", "csinh", "csqrt", "ctan", "ctanh" ] def fname2def(name): return "HAVE_%s" % name.upper() def sym2def(symbol): define = symbol.replace(' ', '') return define.upper() def type2def(symbol): define = symbol.replace(' ', '_') return define.upper() # Code to detect long double representation taken from MPFR m4 macro def check_long_double_representation(cmd): cmd._check_compiler() body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'} # Disable whole program optimization (the default on vs2015, with python 3.5+) # which generates intermediary object files and prevents checking the # float representation. if sys.platform == "win32" and not mingw32(): try: cmd.compiler.compile_options.remove("/GL") except (AttributeError, ValueError): pass # We need to use _compile because we need the object filename src, obj = cmd._compile(body, None, None, 'c') try: ltype = long_double_representation(pyod(obj)) return ltype except ValueError: # try linking to support CC="gcc -flto" or icc -ipo # struct needs to be volatile so it isn't optimized away body = body.replace('struct', 'volatile struct') body += "int main(void) { return 0; }\n" src, obj = cmd._compile(body, None, None, 'c') cmd.temp_files.append("_configtest") cmd.compiler.link_executable([obj], "_configtest") ltype = long_double_representation(pyod("_configtest")) return ltype finally: cmd._clean() LONG_DOUBLE_REPRESENTATION_SRC = r""" /* "before" is 16 bytes to ensure there's no padding between it and "x". * We're not expecting any "long double" bigger than 16 bytes or with * alignment requirements stricter than 16 bytes. */ typedef %(type)s test_type; struct { char before[16]; test_type x; char after[8]; } foo = { { '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\001', '\043', '\105', '\147', '\211', '\253', '\315', '\357' }, -123456789.0, { '\376', '\334', '\272', '\230', '\166', '\124', '\062', '\020' } }; """ def pyod(filename): """Python implementation of the od UNIX utility (od -b, more exactly). Parameters ---------- filename : str name of the file to get the dump from. Returns ------- out : seq list of lines of od output Note ---- We only implement enough to get the necessary information for long double representation, this is not intended as a compatible replacement for od. """ def _pyod2(): out = [] fid = open(filename, 'rb') try: yo = [int(oct(int(binascii.b2a_hex(o), 16))) for o in fid.read()] for i in range(0, len(yo), 16): line = ['%07d' % int(oct(i))] line.extend(['%03d' % c for c in yo[i:i+16]]) out.append(" ".join(line)) return out finally: fid.close() def _pyod3(): out = [] fid = open(filename, 'rb') try: yo2 = [oct(o)[2:] for o in fid.read()] for i in range(0, len(yo2), 16): line = ['%07d' % int(oct(i)[2:])] line.extend(['%03d' % int(c) for c in yo2[i:i+16]]) out.append(" ".join(line)) return out finally: fid.close() if sys.version_info[0] < 3: return _pyod2() else: return _pyod3() _BEFORE_SEQ = ['000', '000', '000', '000', '000', '000', '000', '000', '001', '043', '105', '147', '211', '253', '315', '357'] _AFTER_SEQ = ['376', '334', '272', '230', '166', '124', '062', '020'] _IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000'] _IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1] _INTEL_EXTENDED_12B = ['000', '000', '000', '000', '240', '242', '171', '353', '031', '300', '000', '000'] _INTEL_EXTENDED_16B = ['000', '000', '000', '000', '240', '242', '171', '353', '031', '300', '000', '000', '000', '000', '000', '000'] _MOTOROLA_EXTENDED_12B = ['300', '031', '000', '000', '353', '171', '242', '240', '000', '000', '000', '000'] _IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000', '000', '000', '000', '000', '000', '000', '000', '000'] _IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1] _DOUBLE_DOUBLE_BE = (['301', '235', '157', '064', '124', '000', '000', '000'] + ['000'] * 8) _DOUBLE_DOUBLE_LE = (['000', '000', '000', '124', '064', '157', '235', '301'] + ['000'] * 8) def long_double_representation(lines): """Given a binary dump as given by GNU od -b, look for long double representation.""" # Read contains a list of 32 items, each item is a byte (in octal # representation, as a string). We 'slide' over the output until read is of # the form before_seq + content + after_sequence, where content is the long double # representation: # - content is 12 bytes: 80 bits Intel representation # - content is 16 bytes: 80 bits Intel representation (64 bits) or quad precision # - content is 8 bytes: same as double (not implemented yet) read = [''] * 32 saw = None for line in lines: # we skip the first word, as od -b output an index at the beginning of # each line for w in line.split()[1:]: read.pop(0) read.append(w) # If the end of read is equal to the after_sequence, read contains # the long double if read[-8:] == _AFTER_SEQ: saw = copy.copy(read) if read[:12] == _BEFORE_SEQ[4:]: if read[12:-8] == _INTEL_EXTENDED_12B: return 'INTEL_EXTENDED_12_BYTES_LE' if read[12:-8] == _MOTOROLA_EXTENDED_12B: return 'MOTOROLA_EXTENDED_12_BYTES_BE' elif read[:8] == _BEFORE_SEQ[8:]: if read[8:-8] == _INTEL_EXTENDED_16B: return 'INTEL_EXTENDED_16_BYTES_LE' elif read[8:-8] == _IEEE_QUAD_PREC_BE: return 'IEEE_QUAD_BE' elif read[8:-8] == _IEEE_QUAD_PREC_LE: return 'IEEE_QUAD_LE' elif read[8:-8] == _DOUBLE_DOUBLE_BE: return 'DOUBLE_DOUBLE_BE' elif read[8:-8] == _DOUBLE_DOUBLE_LE: return 'DOUBLE_DOUBLE_LE' elif read[:16] == _BEFORE_SEQ: if read[16:-8] == _IEEE_DOUBLE_LE: return 'IEEE_DOUBLE_LE' elif read[16:-8] == _IEEE_DOUBLE_BE: return 'IEEE_DOUBLE_BE' if saw is not None: raise ValueError("Unrecognized format (%s)" % saw) else: # We never detected the after_sequence raise ValueError("Could not lock sequences (%s)" % saw)
bsd-3-clause
chromium/chromium
tools/metrics/histograms/presubmit_bad_message_reasons.py
11
1468
# Copyright 2017 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Check to see if the various BadMessage enums in histograms.xml need to be updated. This can be called from a chromium PRESUBMIT.py to ensure updates to bad_message.h also include the generated changes to histograms.xml """ import update_histogram_enum def PrecheckBadMessage(input_api, output_api, histogram_name): source_path = '' # This function is called once per bad_message.h-containing directory. Check # for the |bad_message.h| file, and if present, remember its path. for f in input_api.AffectedFiles(): if f.LocalPath().endswith('bad_message.h'): source_path = f.LocalPath() break # If the |bad_message.h| wasn't found in this change, then there is nothing to # do and histogram.xml does not need to be updated. if source_path == '': return [] START_MARKER='^enum (class )?BadMessageReason {' END_MARKER='^BAD_MESSAGE_MAX' presubmit_error = update_histogram_enum.CheckPresubmitErrors( histogram_enum_name=histogram_name, update_script_name='update_bad_message_reasons.py', source_enum_path=source_path, start_marker=START_MARKER, end_marker=END_MARKER) if presubmit_error: return [output_api.PresubmitPromptWarning(presubmit_error, items=[source_path])] return []
bsd-3-clause
SUSE/azure-storage-python
azure/storage/queue/_error.py
4
1381
#------------------------------------------------------------------------- # Copyright (c) Microsoft. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #-------------------------------------------------------------------------- import sys from .._error import ( _validate_type_bytes, ) _ERROR_MESSAGE_SHOULD_BE_UNICODE = 'message should be of type unicode.' _ERROR_MESSAGE_SHOULD_BE_STR = 'message should be of type str.' _ERROR_MESSAGE_NOT_BASE64 = 'message is not a valid base64 value.' def _validate_message_type_text(param): if sys.version_info < (3,): if not isinstance(param, unicode): raise TypeError(_ERROR_MESSAGE_SHOULD_BE_UNICODE) else: if not isinstance(param, str): raise TypeError(_ERROR_MESSAGE_SHOULD_BE_STR) def _validate_message_type_bytes(param): _validate_type_bytes('message', param)
apache-2.0
pbirsinger/asp_backup
tests/platform_detector_test.py
1
1791
import unittest from asp.config import * class CompilerDetectorTests(unittest.TestCase): def test_detect(self): self.assertTrue(CompilerDetector().detect("gcc")) self.assertFalse(CompilerDetector().detect("lkasdfj")) class CPUInfoTests(unittest.TestCase): def test_num_cores(self): def read_cpu_info(self): return open("tests/cpuinfo").readlines() PlatformDetector.read_cpu_info = read_cpu_info pd = PlatformDetector() info = pd.get_cpu_info() self.assertEqual(info['numCores'], 8) def test_vendor_and_model(self): def read_cpu_info(self): return open("tests/cpuinfo").readlines() PlatformDetector.read_cpu_info = read_cpu_info pd = PlatformDetector() info = pd.get_cpu_info() self.assertEqual(info['vendorID'], "GenuineIntel") self.assertEqual(info['model'], 30) self.assertEqual(info['cpuFamily'], 6) def test_cache_size(self): def read_cpu_info(self): return open("tests/cpuinfo").readlines() PlatformDetector.read_cpu_info = read_cpu_info pd = PlatformDetector() info = pd.get_cpu_info() self.assertEqual(info['cacheSize'], 8192) def test_capabilities(self): def read_cpu_info(self): return open("tests/cpuinfo").readlines() PlatformDetector.read_cpu_info = read_cpu_info pd = PlatformDetector() info = pd.get_cpu_info() self.assertEqual(info['capabilities'].count("sse"), 1) def test_compilers(self): compilers = PlatformDetector().get_compilers() self.assertTrue("gcc" in compilers) if __name__ == '__main__': unittest.main()
bsd-3-clause
impowski/servo
tests/wpt/web-platform-tests/tools/py/testing/root/test_xmlgen.py
162
4008
import py from py._xmlgen import unicode, html, raw class ns(py.xml.Namespace): pass def test_escape(): uvalue = py.builtin._totext('\xc4\x85\xc4\x87\n\xe2\x82\xac\n', 'utf-8') class A: def __unicode__(self): return uvalue def __str__(self): x = self.__unicode__() if py.std.sys.version_info[0] < 3: return x.encode('utf-8') return x y = py.xml.escape(uvalue) assert y == uvalue x = py.xml.escape(A()) assert x == uvalue if py.std.sys.version_info[0] < 3: assert isinstance(x, unicode) assert isinstance(y, unicode) y = py.xml.escape(uvalue.encode('utf-8')) assert y == uvalue def test_tag_with_text(): x = ns.hello("world") u = unicode(x) assert u == "<hello>world</hello>" def test_class_identity(): assert ns.hello is ns.hello def test_tag_with_text_and_attributes(): x = ns.some(name="hello", value="world") assert x.attr.name == 'hello' assert x.attr.value == 'world' u = unicode(x) assert u == '<some name="hello" value="world"/>' def test_tag_with_subclassed_attr_simple(): class my(ns.hello): class Attr(ns.hello.Attr): hello="world" x = my() assert x.attr.hello == 'world' assert unicode(x) == '<my hello="world"/>' def test_tag_with_raw_attr(): x = html.object(data=raw('&')) assert unicode(x) == '<object data="&"></object>' def test_tag_nested(): x = ns.hello(ns.world()) unicode(x) # triggers parentifying assert x[0].parent is x u = unicode(x) assert u == '<hello><world/></hello>' def test_list_nested(): x = ns.hello([ns.world()]) #pass in a list here u = unicode(x) assert u == '<hello><world/></hello>' def test_tag_xmlname(): class my(ns.hello): xmlname = 'world' u = unicode(my()) assert u == '<world/>' def test_tag_with_text_entity(): x = ns.hello('world & rest') u = unicode(x) assert u == "<hello>world &amp; rest</hello>" def test_tag_with_text_and_attributes_entity(): x = ns.some(name="hello & world") assert x.attr.name == "hello & world" u = unicode(x) assert u == '<some name="hello &amp; world"/>' def test_raw(): x = ns.some(py.xml.raw("<p>literal</p>")) u = unicode(x) assert u == "<some><p>literal</p></some>" def test_html_name_stickyness(): class my(html.p): pass x = my("hello") assert unicode(x) == '<p>hello</p>' def test_stylenames(): class my: class body(html.body): style = html.Style(font_size = "12pt") u = unicode(my.body()) assert u == '<body style="font-size: 12pt"></body>' def test_class_None(): t = html.body(class_=None) u = unicode(t) assert u == '<body></body>' def test_alternating_style(): alternating = ( html.Style(background="white"), html.Style(background="grey"), ) class my(html): class li(html.li): def style(self): i = self.parent.index(self) return alternating[i%2] style = property(style) x = my.ul( my.li("hello"), my.li("world"), my.li("42")) u = unicode(x) assert u == ('<ul><li style="background: white">hello</li>' '<li style="background: grey">world</li>' '<li style="background: white">42</li>' '</ul>') def test_singleton(): h = html.head(html.link(href="foo")) assert unicode(h) == '<head><link href="foo"/></head>' h = html.head(html.script(src="foo")) assert unicode(h) == '<head><script src="foo"></script></head>' def test_inline(): h = html.div(html.span('foo'), html.span('bar')) assert (h.unicode(indent=2) == '<div><span>foo</span><span>bar</span></div>') def test_object_tags(): o = html.object(html.object()) assert o.unicode(indent=0) == '<object><object></object></object>'
mpl-2.0
JCA-Developpement/Odoo
addons/account_asset/wizard/__init__.py
445
1122
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import account_asset_change_duration import wizard_asset_compute # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
Boyang--Li/ardupilot
mk/PX4/Tools/genmsg/src/genmsg/srvs.py
216
3017
# Software License Agreement (BSD License) # # Copyright (c) 2008, Willow Garage, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Willow Garage, Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ ROS Service Description Language Spec Implements http://ros.org/wiki/srv """ import os import sys from . names import is_legal_resource_name, is_legal_resource_base_name, package_resource_name, resource_name class SrvSpec(object): def __init__(self, request, response, text, full_name = '', short_name = '', package = ''): alt_package, alt_short_name = package_resource_name(full_name) if not package: package = alt_package if not short_name: short_name = alt_short_name self.request = request self.response = response self.text = text self.full_name = full_name self.short_name = short_name self.package = package def __eq__(self, other): if not other or not isinstance(other, SrvSpec): return False return self.request == other.request and \ self.response == other.response and \ self.text == other.text and \ self.full_name == other.full_name and \ self.short_name == other.short_name and \ self.package == other.package def __ne__(self, other): if not other or not isinstance(other, SrvSpec): return True return not self.__eq__(other) def __repr__(self): return "SrvSpec[%s, %s]"%(repr(self.request), repr(self.response))
gpl-3.0
neurord/pysb
pysb/annotation.py
4
1332
from pysb.core import SelfExporter, Component class Annotation(object): """ A lightweight annotation mechanism for model elements. Based loosely on MIRIAM (http://co.mbine.org/standards/miriam) which is in turn based on RDF. An Annotation is equivalent to an RDF triple. This is still an experimental feature! Parameters ---------- subject Element to annotate, typically a Component. object_ Annotation, typically a string containing an identifiers.org URL. predicate : string, optional Relationship of `object_` to `subject`, typically a string containing a biomodels.net qualifier. If not specified, defaults to 'is'. """ def __init__(self, subject, object_, predicate="is"): self.subject = subject self.object = object_ self.predicate = predicate # if SelfExporter is in use, add the annotation to the model if SelfExporter.do_export: SelfExporter.default_model.add_annotation(self) def __repr__(self): if isinstance(self.subject, Component): subject = self.subject.name else: subject = self.subject return "%s(%s, %s, %s)" % (self.__class__.__name__, subject, repr(self.object), repr(self.predicate))
bsd-2-clause
wri/gfw-api
gfw/cdb.py
1
2802
# Global Forest Watch API # Copyright (C) 2013 World Resource Institute # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. """This module supports executing CartoDB queries.""" import copy import urllib import logging from appengine_config import runtime_config from google.appengine.api import urlfetch # CartoDB endpoint: if runtime_config.get('cdb_endpoint'): ENDPOINT = runtime_config.get('cdb_endpoint') else: ENDPOINT = 'https://wri-01.cartodb.com/api/v2/sql' def _get_api_key(): """Return CartoDB API key stored in cdb.txt file.""" return runtime_config.get('cdb_api_key') def get_format(media_type): """Return CartoDB format for supplied GFW custorm media type.""" tokens = media_type.split('.') if len(tokens) == 2: return '' else: return tokens[2].split('+')[0] def get_url(query, params, auth=False): """Return CartoDB query URL for supplied params.""" params = copy.copy(params) params['q'] = query if auth: params['api_key'] = _get_api_key() clean_params = {} for key, value in params.iteritems(): if key in ['api_key', 'format', 'q', 'version']: clean_params[key] = value url = '%s?%s' % (ENDPOINT, urllib.urlencode(clean_params)) # TODO: Hack if 'version' in clean_params: url = url.replace('v2', clean_params['version']) if runtime_config.get('IS_DEV'): logging.info(url) return str(url) def get_body(query, params, auth=False): """Return CartoDB payload body for supplied params.""" params['q'] = query if auth: params['api_key'] = _get_api_key() body = urllib.urlencode(params) return body def execute(query, params={}, auth=False): """Exectues supplied query on CartoDB and returns response body as JSON.""" #import logging #logging.info(query) rpc = urlfetch.create_rpc(deadline=50) payload = get_body(query, params, auth=auth) if runtime_config.get('IS_DEV'): logging.info(query) logging.info(payload) urlfetch.make_fetch_call(rpc, ENDPOINT, method='POST', payload=payload) return rpc.get_result()
gpl-2.0
nvoron23/scipy
scipy/sparse/coo.py
17
18025
""" A sparse matrix in COOrdinate or 'triplet' format""" from __future__ import division, print_function, absolute_import __docformat__ = "restructuredtext en" __all__ = ['coo_matrix', 'isspmatrix_coo'] from warnings import warn import numpy as np from scipy._lib.six import xrange, zip as izip from ._sparsetools import coo_tocsr, coo_todense, coo_matvec from .base import isspmatrix from .data import _data_matrix, _minmax_mixin from .sputils import (upcast, upcast_char, to_native, isshape, getdtype, isintlike, get_index_dtype, downcast_intp_index) class coo_matrix(_data_matrix, _minmax_mixin): """ A sparse matrix in COOrdinate format. Also known as the 'ijv' or 'triplet' format. This can be instantiated in several ways: coo_matrix(D) with a dense matrix D coo_matrix(S) with another sparse matrix S (equivalent to S.tocoo()) coo_matrix((M, N), [dtype]) to construct an empty matrix with shape (M, N) dtype is optional, defaulting to dtype='d'. coo_matrix((data, (i, j)), [shape=(M, N)]) to construct from three arrays: 1. data[:] the entries of the matrix, in any order 2. i[:] the row indices of the matrix entries 3. j[:] the column indices of the matrix entries Where ``A[i[k], j[k]] = data[k]``. When shape is not specified, it is inferred from the index arrays Attributes ---------- dtype : dtype Data type of the matrix shape : 2-tuple Shape of the matrix ndim : int Number of dimensions (this is always 2) nnz Number of nonzero elements data COO format data array of the matrix row COO format row index array of the matrix col COO format column index array of the matrix Notes ----- Sparse matrices can be used in arithmetic operations: they support addition, subtraction, multiplication, division, and matrix power. Advantages of the COO format - facilitates fast conversion among sparse formats - permits duplicate entries (see example) - very fast conversion to and from CSR/CSC formats Disadvantages of the COO format - does not directly support: + arithmetic operations + slicing Intended Usage - COO is a fast format for constructing sparse matrices - Once a matrix has been constructed, convert to CSR or CSC format for fast arithmetic and matrix vector operations - By default when converting to CSR or CSC format, duplicate (i,j) entries will be summed together. This facilitates efficient construction of finite element matrices and the like. (see example) Examples -------- >>> from scipy.sparse import coo_matrix >>> coo_matrix((3, 4), dtype=np.int8).toarray() array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=int8) >>> row = np.array([0, 3, 1, 0]) >>> col = np.array([0, 3, 1, 2]) >>> data = np.array([4, 5, 7, 9]) >>> coo_matrix((data, (row, col)), shape=(4, 4)).toarray() array([[4, 0, 9, 0], [0, 7, 0, 0], [0, 0, 0, 0], [0, 0, 0, 5]]) >>> # example with duplicates >>> row = np.array([0, 0, 1, 3, 1, 0, 0]) >>> col = np.array([0, 2, 1, 3, 1, 0, 0]) >>> data = np.array([1, 1, 1, 1, 1, 1, 1]) >>> coo_matrix((data, (row, col)), shape=(4, 4)).toarray() array([[3, 0, 1, 0], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]]) """ def __init__(self, arg1, shape=None, dtype=None, copy=False): _data_matrix.__init__(self) if isinstance(arg1, tuple): if isshape(arg1): M, N = arg1 self.shape = (M,N) idx_dtype = get_index_dtype(maxval=max(M, N)) self.row = np.array([], dtype=idx_dtype) self.col = np.array([], dtype=idx_dtype) self.data = np.array([], getdtype(dtype, default=float)) self.has_canonical_format = True else: try: obj, ij = arg1 except: raise TypeError('invalid input format') try: if len(ij) != 2: raise TypeError except TypeError: raise TypeError('invalid input format') self.row = np.array(ij[0], copy=copy) self.col = np.array(ij[1], copy=copy) self.data = np.array(obj, copy=copy) if shape is None: if len(self.row) == 0 or len(self.col) == 0: raise ValueError('cannot infer dimensions from zero ' 'sized index arrays') M = self.row.max() + 1 N = self.col.max() + 1 self.shape = (M, N) else: # Use 2 steps to ensure shape has length 2. M, N = shape self.shape = (M, N) idx_dtype = get_index_dtype(maxval=max(self.shape)) self.row = self.row.astype(idx_dtype) self.col = self.col.astype(idx_dtype) self.has_canonical_format = False elif arg1 is None: # Initialize an empty matrix. if not isinstance(shape, tuple) or not isintlike(shape[0]): raise TypeError('dimensions not understood') warn('coo_matrix(None, shape=(M,N)) is deprecated, ' 'use coo_matrix( (M,N) ) instead', DeprecationWarning) idx_dtype = get_index_dtype(maxval=max(M, N)) self.shape = shape self.data = np.array([], getdtype(dtype, default=float)) self.row = np.array([], dtype=idx_dtype) self.col = np.array([], dtype=idx_dtype) self.has_canonical_format = True else: if isspmatrix(arg1): if isspmatrix_coo(arg1) and copy: self.row = arg1.row.copy() self.col = arg1.col.copy() self.data = arg1.data.copy() self.shape = arg1.shape else: coo = arg1.tocoo() self.row = coo.row self.col = coo.col self.data = coo.data self.shape = coo.shape self.has_canonical_format = False else: #dense argument try: M = np.atleast_2d(np.asarray(arg1)) except: raise TypeError('invalid input format') if M.ndim != 2: raise TypeError('expected dimension <= 2 array or matrix') else: self.shape = M.shape self.row, self.col = M.nonzero() self.data = M[self.row, self.col] self.has_canonical_format = True if dtype is not None: self.data = self.data.astype(dtype) self._check() def getnnz(self, axis=None): """Get the count of explicitly-stored values (nonzeros) Parameters ---------- axis : None, 0, or 1 Select between the number of values across the whole matrix, in each column, or in each row. """ if axis is None: nnz = len(self.data) if nnz != len(self.row) or nnz != len(self.col): raise ValueError('row, column, and data array must all be the ' 'same length') if self.data.ndim != 1 or self.row.ndim != 1 or \ self.col.ndim != 1: raise ValueError('row, column, and data arrays must be 1-D') return int(nnz) if axis < 0: axis += 2 if axis == 0: return np.bincount(downcast_intp_index(self.col), minlength=self.shape[1]) elif axis == 1: return np.bincount(downcast_intp_index(self.row), minlength=self.shape[0]) else: raise ValueError('axis out of bounds') nnz = property(fget=getnnz) def _check(self): """ Checks data structure for consistency """ nnz = self.nnz # index arrays should have integer data types if self.row.dtype.kind != 'i': warn("row index array has non-integer dtype (%s) " % self.row.dtype.name) if self.col.dtype.kind != 'i': warn("col index array has non-integer dtype (%s) " % self.col.dtype.name) idx_dtype = get_index_dtype(maxval=max(self.shape)) self.row = np.asarray(self.row, dtype=idx_dtype) self.col = np.asarray(self.col, dtype=idx_dtype) self.data = to_native(self.data) if nnz > 0: if self.row.max() >= self.shape[0]: raise ValueError('row index exceeds matrix dimensions') if self.col.max() >= self.shape[1]: raise ValueError('column index exceeds matrix dimensions') if self.row.min() < 0: raise ValueError('negative row index found') if self.col.min() < 0: raise ValueError('negative column index found') def transpose(self, copy=False): M,N = self.shape return coo_matrix((self.data, (self.col, self.row)), shape=(N,M), copy=copy) def toarray(self, order=None, out=None): """See the docstring for `spmatrix.toarray`.""" B = self._process_toarray_args(order, out) fortran = int(B.flags.f_contiguous) if not fortran and not B.flags.c_contiguous: raise ValueError("Output array must be C or F contiguous") M,N = self.shape coo_todense(M, N, self.nnz, self.row, self.col, self.data, B.ravel('A'), fortran) return B def tocsc(self): """Return a copy of this matrix in Compressed Sparse Column format Duplicate entries will be summed together. Examples -------- >>> from numpy import array >>> from scipy.sparse import coo_matrix >>> row = array([0, 0, 1, 3, 1, 0, 0]) >>> col = array([0, 2, 1, 3, 1, 0, 0]) >>> data = array([1, 1, 1, 1, 1, 1, 1]) >>> A = coo_matrix((data, (row, col)), shape=(4, 4)).tocsc() >>> A.toarray() array([[3, 0, 1, 0], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]]) """ from .csc import csc_matrix if self.nnz == 0: return csc_matrix(self.shape, dtype=self.dtype) else: M,N = self.shape idx_dtype = get_index_dtype((self.col, self.row), maxval=max(self.nnz, M)) indptr = np.empty(N + 1, dtype=idx_dtype) indices = np.empty(self.nnz, dtype=idx_dtype) data = np.empty(self.nnz, dtype=upcast(self.dtype)) coo_tocsr(N, M, self.nnz, self.col.astype(idx_dtype), self.row.astype(idx_dtype), self.data, indptr, indices, data) A = csc_matrix((data, indices, indptr), shape=self.shape) A.sum_duplicates() return A def tocsr(self): """Return a copy of this matrix in Compressed Sparse Row format Duplicate entries will be summed together. Examples -------- >>> from numpy import array >>> from scipy.sparse import coo_matrix >>> row = array([0, 0, 1, 3, 1, 0, 0]) >>> col = array([0, 2, 1, 3, 1, 0, 0]) >>> data = array([1, 1, 1, 1, 1, 1, 1]) >>> A = coo_matrix((data, (row, col)), shape=(4, 4)).tocsr() >>> A.toarray() array([[3, 0, 1, 0], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]]) """ from .csr import csr_matrix if self.nnz == 0: return csr_matrix(self.shape, dtype=self.dtype) else: M,N = self.shape idx_dtype = get_index_dtype((self.row, self.col), maxval=max(self.nnz, N)) indptr = np.empty(M + 1, dtype=idx_dtype) indices = np.empty(self.nnz, dtype=idx_dtype) data = np.empty(self.nnz, dtype=upcast(self.dtype)) coo_tocsr(M, N, self.nnz, self.row.astype(idx_dtype), self.col.astype(idx_dtype), self.data, indptr, indices, data) A = csr_matrix((data, indices, indptr), shape=self.shape) A.sum_duplicates() return A def tocoo(self, copy=False): if copy: return self.copy() else: return self def todia(self): from .dia import dia_matrix ks = self.col - self.row # the diagonal for each nonzero diags = np.unique(ks) if len(diags) > 100: #probably undesired, should we do something? #should todia() have a maxdiags parameter? pass #initialize and fill in data array if self.data.size == 0: data = np.zeros((0, 0), dtype=self.dtype) else: data = np.zeros((len(diags), self.col.max()+1), dtype=self.dtype) data[np.searchsorted(diags,ks), self.col] = self.data return dia_matrix((data,diags), shape=self.shape) def todok(self): from .dok import dok_matrix self.sum_duplicates() dok = dok_matrix((self.shape), dtype=self.dtype) dok.update(izip(izip(self.row,self.col),self.data)) return dok def diagonal(self): # Could be rewritten without the python loop. # Data entries at the same (row, col) are summed. n = min(self.shape) ndata = self.data.shape[0] d = np.zeros(n, dtype=self.dtype) for i in xrange(ndata): r = self.row[i] if r == self.col[i]: d[r] += self.data[i] return d diagonal.__doc__ = _data_matrix.diagonal.__doc__ def _setdiag(self, values, k): M, N = self.shape if values.ndim and not len(values): return idx_dtype = self.row.dtype # Determine which triples to keep and where to put the new ones. full_keep = self.col - self.row != k if k < 0: max_index = min(M+k, N) if values.ndim: max_index = min(max_index, len(values)) keep = np.logical_or(full_keep, self.col >= max_index) new_row = np.arange(-k, -k + max_index, dtype=idx_dtype) new_col = np.arange(max_index, dtype=idx_dtype) else: max_index = min(M, N-k) if values.ndim: max_index = min(max_index, len(values)) keep = np.logical_or(full_keep, self.row >= max_index) new_row = np.arange(max_index, dtype=idx_dtype) new_col = np.arange(k, k + max_index, dtype=idx_dtype) # Define the array of data consisting of the entries to be added. if values.ndim: new_data = values[:max_index] else: new_data = np.empty(max_index, dtype=self.dtype) new_data[:] = values # Update the internal structure. self.row = np.concatenate((self.row[keep], new_row)) self.col = np.concatenate((self.col[keep], new_col)) self.data = np.concatenate((self.data[keep], new_data)) self.has_canonical_format = False # needed by _data_matrix def _with_data(self,data,copy=True): """Returns a matrix with the same sparsity structure as self, but with different data. By default the index arrays (i.e. .row and .col) are copied. """ if copy: return coo_matrix((data, (self.row.copy(), self.col.copy())), shape=self.shape, dtype=data.dtype) else: return coo_matrix((data, (self.row, self.col)), shape=self.shape, dtype=data.dtype) def sum_duplicates(self): """Eliminate duplicate matrix entries by adding them together This is an *in place* operation """ if self.has_canonical_format or len(self.data) == 0: return order = np.lexsort((self.row,self.col)) self.row = self.row[order] self.col = self.col[order] self.data = self.data[order] unique_mask = ((self.row[1:] != self.row[:-1]) | (self.col[1:] != self.col[:-1])) unique_mask = np.append(True, unique_mask) self.row = self.row[unique_mask] self.col = self.col[unique_mask] unique_inds, = np.nonzero(unique_mask) self.data = np.add.reduceat(self.data, unique_inds, dtype=self.dtype) self.has_canonical_format = True ########################### # Multiplication handlers # ########################### def _mul_vector(self, other): #output array result = np.zeros(self.shape[0], dtype=upcast_char(self.dtype.char, other.dtype.char)) coo_matvec(self.nnz, self.row, self.col, self.data, other, result) return result def _mul_multivector(self, other): return np.hstack([self._mul_vector(col).reshape(-1,1) for col in other.T]) def isspmatrix_coo(x): return isinstance(x, coo_matrix)
bsd-3-clause
stefanbirkner/quaidan
quaidan/command.py
1
1431
"""Update commands that are send to the balancer manager. """ class UpdateMember(object): """A command that updates a single member. """ # pylint: disable = too-few-public-methods # pylint: disable = too-many-instance-attributes def __init__(self, cluster_name, member): """Creates a new update command for a single member. :param cluster_name: The name of the member's cluster. :param member: the member's current status. """ self.cluster_name = cluster_name self.worker_url = member.worker_url self.route = member.route self.route_redirect = member.route_redirect self.load_factor = member.load_factor self.lb_set = member.lb_set self.ignore_errors = member.ignore_errors self.draining_mode = member.draining_mode self.enabled = member.enabled self.hot_standby = member.hot_standby def to_form(self): """Create the form parameters for this command. """ return { 'b': self.cluster_name, 'w': self.worker_url, 'w_wr': self.route, 'w_rr': self.route_redirect, 'w_lf': self.load_factor, 'w_ls': self.lb_set, 'w_status_I': self.ignore_errors, 'w_status_N': self.draining_mode, 'w_status_D': not self.enabled, 'w_status_H': self.hot_standby }
mit
maurofaccenda/ansible
lib/ansible/modules/network/f5/bigip_hostname.py
53
5148
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2016 F5 Networks Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: bigip_hostname short_description: Manage the hostname of a BIG-IP. description: - Manage the hostname of a BIG-IP. version_added: "2.3" options: hostname: description: - Hostname of the BIG-IP host. required: true notes: - Requires the f5-sdk Python package on the host. This is as easy as pip install f5-sdk. extends_documentation_fragment: f5 requirements: - f5-sdk author: - Tim Rupp (@caphrim007) ''' EXAMPLES = ''' - name: Set the hostname of the BIG-IP bigip_hostname: hostname: "bigip.localhost.localdomain" password: "admin" server: "bigip.localhost.localdomain" user: "admin" delegate_to: localhost ''' RETURN = ''' hostname: description: The new hostname of the device returned: changed type: string sample: "big-ip01.internal" ''' try: from f5.bigip.contexts import TransactionContextManager from f5.bigip import ManagementRoot from icontrol.session import iControlUnexpectedHTTPError HAS_F5SDK = True except ImportError: HAS_F5SDK = False class BigIpHostnameManager(object): def __init__(self, *args, **kwargs): self.changed_params = dict() self.params = kwargs self.api = None def connect_to_bigip(self, **kwargs): return ManagementRoot(kwargs['server'], kwargs['user'], kwargs['password'], port=kwargs['server_port']) def ensure_hostname_is_present(self): self.changed_params['hostname'] = self.params['hostname'] if self.params['check_mode']: return True tx = self.api.tm.transactions.transaction with TransactionContextManager(tx) as api: r = api.tm.sys.global_settings.load() r.update(hostname=self.params['hostname']) if self.hostname_exists(): return True else: raise F5ModuleError("Failed to set the hostname") def hostname_exists(self): if self.params['hostname'] == self.current_hostname(): return True else: return False def present(self): if self.hostname_exists(): return False else: return self.ensure_hostname_is_present() def current_hostname(self): r = self.api.tm.sys.global_settings.load() return r.hostname def apply_changes(self): result = dict() changed = self.apply_to_running_config() if changed: self.save_running_config() result.update(**self.changed_params) result.update(dict(changed=changed)) return result def apply_to_running_config(self): try: self.api = self.connect_to_bigip(**self.params) return self.present() except iControlUnexpectedHTTPError as e: raise F5ModuleError(str(e)) def save_running_config(self): self.api.tm.sys.config.exec_cmd('save') class BigIpHostnameModuleConfig(object): def __init__(self): self.argument_spec = dict() self.meta_args = dict() self.supports_check_mode = True self.initialize_meta_args() self.initialize_argument_spec() def initialize_meta_args(self): args = dict( hostname=dict(required=True) ) self.meta_args = args def initialize_argument_spec(self): self.argument_spec = f5_argument_spec() self.argument_spec.update(self.meta_args) def create(self): return AnsibleModule( argument_spec=self.argument_spec, supports_check_mode=self.supports_check_mode ) def main(): if not HAS_F5SDK: raise F5ModuleError("The python f5-sdk module is required") config = BigIpHostnameModuleConfig() module = config.create() try: obj = BigIpHostnameManager( check_mode=module.check_mode, **module.params ) result = obj.apply_changes() module.exit_json(**result) except F5ModuleError as e: module.fail_json(msg=str(e)) from ansible.module_utils.basic import * from ansible.module_utils.f5_utils import * if __name__ == '__main__': main()
gpl-3.0
testlnord/entity_matching_tool
main.py
1
1049
from entity_matching_tool import app, api, models from entity_matching_tool.resources import JobList, Jobs, CsvFiles, FieldNames, Entities, Matching, MetricNames, Users, \ Token, SavingResults, ChangingMetric, AddSource from flask import render_template api.add_resource(JobList, '/joblist/') api.add_resource(Jobs, '/jobs/') api.add_resource(CsvFiles, '/csvfiles/') api.add_resource(FieldNames, '/fieldnames/') api.add_resource(Entities, '/entities/') api.add_resource(Matching, '/matching/') api.add_resource(MetricNames, '/metrics/') api.add_resource(Users, '/signup/') api.add_resource(Token, '/login/') api.add_resource(SavingResults, '/saving/') api.add_resource(ChangingMetric, '/changemetric/') api.add_resource(AddSource, '/files/') @app.route('/') @app.route('/signin/') @app.route('/signup/') @app.route('/joblist/') @app.route('/jobs/') @app.route('/csvfiles/') @app.route('/fieldnames/') @app.route('/metrics/') @app.route('/files/') def index(): return render_template('index.html') app.run(host='0.0.0.0', threaded=True)
mit
dflemin3/ICgen
make_snapshotSType.py
2
9904
# -*- coding: utf-8 -*- """ Created on Fri Mar 21 15:11:31 2014 @author: ibackus @editor: dflemin3 -Note: indentation is 4 spaces in this file, not a tab! This module initializes an S-type binary system in which the gas disk is around the primary, not both stars! Assumes a_bin >> r_disk such that the disk's velocity is dominated by the influence of the primary. """ __version__ = "$Revision: 1 $" # $Source$ import pynbody SimArray = pynbody.array.SimArray import numpy as np import binaryUtils import gc import os import AddBinary import isaac import calc_velocity import ICgen_utils import ICglobal_settings global_settings = ICglobal_settings.global_settings def snapshot_gen(ICobj): """ Generates a tipsy snapshot from the initial conditions object ICobj. Returns snapshot, param snapshot: tipsy snapshot param: dictionary containing info for a .param file Note: Code has been edited (dflemin3) such that now it returns a snapshot for a circumbinary disk where initial conditions generated assuming star at origin of mass M. After gas initialized, replaced star at origin with binary system who's center of mass lies at the origin and who's mass m1 +m2 = M """ print 'Generating snapshot...' # Constants G = SimArray(1.0,'G') # ------------------------------------ # Load in things from ICobj # ------------------------------------ print 'Accessing data from ICs' settings = ICobj.settings # snapshot file name snapshotName = settings.filenames.snapshotName paramName = settings.filenames.paramName # particle positions r = ICobj.pos.r xyz = ICobj.pos.xyz # Number of particles nParticles = ICobj.pos.nParticles # molecular mass m = settings.physical.m # star mass m_star = settings.physical.M.copy() # disk mass m_disk = ICobj.sigma.m_disk.copy() m_disk = isaac.match_units(m_disk, m_star)[0] # mass of the gas particles m_particles = m_disk / float(nParticles) # re-scale the particles (allows making of low-mass disk) m_particles *= settings.snapshot.mScale # ------------------------------------------------- # Assign output # ------------------------------------------------- print 'Assigning data to snapshot' # Get units all set up m_unit = m_star.units pos_unit = r.units if xyz.units != r.units: xyz.convert_units(pos_unit) # time units are sqrt(L^3/GM) t_unit = np.sqrt((pos_unit**3)*np.power((G*m_unit), -1)).units # velocity units are L/t v_unit = (pos_unit/t_unit).ratio('km s**-1') # Make it a unit, save value for future conversion v_unit_vel = v_unit #Ensure v_unit_vel is the same as what I assume it is. assert(np.fabs(AddBinary.VEL_UNIT-v_unit_vel)<AddBinary.SMALL),"VEL_UNIT not equal to ChaNGa unit! Why??" v_unit = pynbody.units.Unit('{0} km s**-1'.format(v_unit)) # Other settings metals = settings.snapshot.metals star_metals = metals # Generate snapshot # Note that empty pos, vel, and mass arrays are created in the snapshot snapshot = pynbody.new(star=1,gas=nParticles) snapshot['vel'].units = v_unit snapshot['eps'] = 0.01*SimArray(np.ones(nParticles+1, dtype=np.float32), pos_unit) snapshot['metals'] = SimArray(np.zeros(nParticles+1, dtype=np.float32)) snapshot['rho'] = SimArray(np.zeros(nParticles+1, dtype=np.float32)) snapshot.gas['pos'] = xyz snapshot.gas['temp'] = ICobj.T(r) snapshot.gas['mass'] = m_particles snapshot.gas['metals'] = metals snapshot.star['pos'] = SimArray([[ 0., 0., 0.]],pos_unit) snapshot.star['vel'] = SimArray([[ 0., 0., 0.]], v_unit) snapshot.star['mass'] = m_star snapshot.star['metals'] = SimArray(star_metals) # Estimate the star's softening length as the closest particle distance eps = r.min() # Make param file param = isaac.make_param(snapshot, snapshotName) param['dMeanMolWeight'] = m gc.collect() # CALCULATE VELOCITY USING calc_velocity.py. This also estimates the # gravitational softening length eps print 'Calculating circular velocity' preset = settings.changa_run.preset max_particles = global_settings['misc']['max_particles'] calc_velocity.v_xy(snapshot, param, changa_preset=preset, max_particles=max_particles) gc.collect() # ------------------------------------------------- # Estimate time step for changa to use # ------------------------------------------------- # Save param file isaac.configsave(param, paramName, 'param') # Save snapshot snapshot.write(filename=snapshotName, fmt=pynbody.tipsy.TipsySnap) # est dDelta dDelta = ICgen_utils.est_time_step(paramName, preset) param['dDelta'] = dDelta # ------------------------------------------------- # Create director file # ------------------------------------------------- # largest radius to plot r_director = float(0.9 * r.max()) # Maximum surface density sigma_min = float(ICobj.sigma(r_director)) # surface density at largest radius sigma_max = float(ICobj.sigma.input_dict['sigma'].max()) # Create director dict director = isaac.make_director(sigma_min, sigma_max, r_director, filename=param['achOutName']) ## Save .director file #isaac.configsave(director, directorName, 'director') """ Now that the gas disk is initializes around the primary (M=m1), add in the second star as specified by the user. """ #Now that velocities and everything are all initialized for gas particles, create new snapshot to return in which #single star particle is replaced by 2, same units as above snapshotBinary = pynbody.new(star=2,gas=nParticles) snapshotBinary['eps'] = 0.01*SimArray(np.ones(nParticles+2, dtype=np.float32), pos_unit) snapshotBinary['metals'] = SimArray(np.zeros(nParticles+2, dtype=np.float32)) snapshotBinary['vel'].units = v_unit snapshotBinary['pos'].units = pos_unit snapshotBinary['mass'].units = snapshot['mass'].units snapshotBinary['rho'] = SimArray(np.zeros(nParticles+2, dtype=np.float32)) #Assign gas particles with calculated/given values from above snapshotBinary.gas['pos'] = snapshot.gas['pos'] snapshotBinary.gas['vel'] = snapshot.gas['vel'] snapshotBinary.gas['temp'] = snapshot.gas['temp'] snapshotBinary.gas['rho'] = snapshot.gas['rho'] snapshotBinary.gas['eps'] = snapshot.gas['eps'] snapshotBinary.gas['mass'] = snapshot.gas['mass'] snapshotBinary.gas['metals'] = snapshot.gas['metals'] #Load Binary system obj to initialize system binsys = ICobj.settings.physical.binsys m_disk = isaac.strip_units(np.sum(snapshotBinary.gas['mass'])) binsys.m1 = binsys.m1 + m_disk #Recompute cartesian coords considering primary as m1+m_disk binsys.computeCartesian() x1,x2,v1,v2 = binsys.generateICs() #Assign position, velocity assuming CCW orbit snapshotBinary.star[0]['pos'] = SimArray(x1,pos_unit) snapshotBinary.star[0]['vel'] = SimArray(v1,v_unit) snapshotBinary.star[1]['pos'] = SimArray(x2,pos_unit) snapshotBinary.star[1]['vel'] = SimArray(v2,v_unit) """ We have the binary positions about their center of mass, (0,0,0), so shift the position, velocity of the gas disk to be around the primary. """ snapshotBinary.gas['pos'] += snapshotBinary.star[0]['pos'] snapshotBinary.gas['vel'] += snapshotBinary.star[0]['vel'] #Set stellar masses: Create simArray for mass, convert units to simulation mass units snapshotBinary.star[0]['mass'] = SimArray(binsys.m1-m_disk,m_unit) snapshotBinary.star[1]['mass'] = SimArray(binsys.m2,m_unit) snapshotBinary.star['metals'] = SimArray(star_metals) #Now that everything has masses and positions, adjust positions so the #system center of mass corresponds to the origin """ com = binaryUtils.computeCOM(snapshotBinary.stars,snapshotBinary.gas) print com snapshotBinary.stars['pos'] -= com snapshotBinary.gas['pos'] -= com """ print 'Wrapping up' # Now set the star particle's tform to a negative number. This allows # UW ChaNGa treat it as a sink particle. snapshotBinary.star['tform'] = -1.0 #Set sink radius, stellar smoothing length as fraction of distance #from primary to inner edge of the disk r_sink = eps snapshotBinary.star[0]['eps'] = SimArray(r_sink/2.0,pos_unit) snapshotBinary.star[1]['eps'] = SimArray(r_sink/2.0,pos_unit) param['dSinkBoundOrbitRadius'] = r_sink param['dSinkRadius'] = r_sink param['dSinkMassMin'] = 0.9 * binsys.m2 param['bDoSinks'] = 1 return snapshotBinary, param, director def make_director(ICobj, res=1200): director = {} director['render'] = 'tsc' director['FOV'] = 45.0 director['clip'] = [0.0001, 500] director['up'] = [1, 0, 0] director['project'] = 'ortho' director['softgassph'] = 'softgassph' director['physical'] = 'physical' director['size'] = [res, res] sig_set = ICobj.settings.sigma mScale = ICobj.settings.snapshot.mScale snapshot_name = ICobj.settings.filenames.snapshotName f_prefix = os.path.splitext(os.path.basename(snapshot_name))[0] director['file'] = f_prefix if sig_set.kind == 'MQWS': rmax = sig_set.rout + 3*sig_set.rin zmax = float(rmax) director['eye'] = [0, 0, zmax] vmin = float(ICobj.rho(0, rmax)) vmax = float(ICobj.rho.rho_binned[0,:].max()) vmax *= mScale director['logscale'] = [vmin, 10*vmax] director['colgas'] = [1, 1, 1] return director
mit
gcd0318/python-oauth2
docs/examples/pyramid/base.py
5
2537
import json from pyramid.response import Response as PyramidResponse from oauth2.web import Response from oauth2.error import OAuthInvalidError, \ ClientNotFoundError, OAuthInvalidNoRedirectError, UnsupportedGrantError, ParameterMissingError from oauth2.client_authenticator import ClientAuthenticator, request_body from oauth2.tokengenerator import Uuid4 class Request(): """ Contains data of the current HTTP request. """ def __init__(self, env): self.method = env.method self.params = env.json_body self.registry = env.registry self.headers = env.registry def post_param(self, name): return self.params.get(name) class BaseAuthController(object): def __init__(self, request, site_adapter): self.request = Request(request) self.site_adapter = site_adapter self.token_generator = Uuid4() self.client_store = self._get_client_store() self.access_token_store = self._get_token_store() self.client_authenticator = ClientAuthenticator( client_store=self.client_store, source=request_body ) self.grant_types = []; @classmethod def _get_token_store(cls): NotImplementedError @classmethod def _get_client_store(cls): NotImplementedError def add_grant(self, grant): """ Adds a Grant that the provider should support. :param grant: An instance of a class that extends :class:`oauth2.grant.GrantHandlerFactory` """ if hasattr(grant, "expires_in"): self.token_generator.expires_in[grant.grant_type] = grant.expires_in if hasattr(grant, "refresh_expires_in"): self.token_generator.refresh_expires_in = grant.refresh_expires_in self.grant_types.append(grant) def _determine_grant_type(self, request): for grant in self.grant_types: grant_handler = grant(request, self) if grant_handler is not None: return grant_handler raise UnsupportedGrantError def authenticate(self): response = Response() grant_type = self._determine_grant_type(self.request) grant_type.read_validate_params(self.request) grant_type.process(self.request, response, {}) return PyramidResponse(body=response.body, status=response.status_code, content_type="application/json")
mit
waseem18/oh-mainline
vendor/packages/Django/tests/regressiontests/middleware_exceptions/tests.py
66
39619
import sys from django.conf import settings from django.core.signals import got_request_exception from django.http import HttpResponse from django.template.response import TemplateResponse from django.template import Template from django.test import TestCase class TestException(Exception): pass # A middleware base class that tracks which methods have been called class TestMiddleware(object): def __init__(self): self.process_request_called = False self.process_view_called = False self.process_response_called = False self.process_template_response_called = False self.process_exception_called = False def process_request(self, request): self.process_request_called = True def process_view(self, request, view_func, view_args, view_kwargs): self.process_view_called = True def process_template_response(self, request, response): self.process_template_response_called = True return response def process_response(self, request, response): self.process_response_called = True return response def process_exception(self, request, exception): self.process_exception_called = True # Middleware examples that do the right thing class RequestMiddleware(TestMiddleware): def process_request(self, request): super(RequestMiddleware, self).process_request(request) return HttpResponse('Request Middleware') class ViewMiddleware(TestMiddleware): def process_view(self, request, view_func, view_args, view_kwargs): super(ViewMiddleware, self).process_view(request, view_func, view_args, view_kwargs) return HttpResponse('View Middleware') class ResponseMiddleware(TestMiddleware): def process_response(self, request, response): super(ResponseMiddleware, self).process_response(request, response) return HttpResponse('Response Middleware') class TemplateResponseMiddleware(TestMiddleware): def process_template_response(self, request, response): super(TemplateResponseMiddleware, self).process_template_response(request, response) return TemplateResponse(request, Template('Template Response Middleware')) class ExceptionMiddleware(TestMiddleware): def process_exception(self, request, exception): super(ExceptionMiddleware, self).process_exception(request, exception) return HttpResponse('Exception Middleware') # Sample middlewares that raise exceptions class BadRequestMiddleware(TestMiddleware): def process_request(self, request): super(BadRequestMiddleware, self).process_request(request) raise TestException('Test Request Exception') class BadViewMiddleware(TestMiddleware): def process_view(self, request, view_func, view_args, view_kwargs): super(BadViewMiddleware, self).process_view(request, view_func, view_args, view_kwargs) raise TestException('Test View Exception') class BadTemplateResponseMiddleware(TestMiddleware): def process_template_response(self, request, response): super(BadTemplateResponseMiddleware, self).process_template_response(request, response) raise TestException('Test Template Response Exception') class BadResponseMiddleware(TestMiddleware): def process_response(self, request, response): super(BadResponseMiddleware, self).process_response(request, response) raise TestException('Test Response Exception') class BadExceptionMiddleware(TestMiddleware): def process_exception(self, request, exception): super(BadExceptionMiddleware, self).process_exception(request, exception) raise TestException('Test Exception Exception') class BaseMiddlewareExceptionTest(TestCase): urls = 'regressiontests.middleware_exceptions.urls' def setUp(self): self.exceptions = [] got_request_exception.connect(self._on_request_exception) self.client.handler.load_middleware() def tearDown(self): got_request_exception.disconnect(self._on_request_exception) self.exceptions = [] def _on_request_exception(self, sender, request, **kwargs): self.exceptions.append(sys.exc_info()) def _add_middleware(self, middleware): self.client.handler._request_middleware.insert(0, middleware.process_request) self.client.handler._view_middleware.insert(0, middleware.process_view) self.client.handler._template_response_middleware.append(middleware.process_template_response) self.client.handler._response_middleware.append(middleware.process_response) self.client.handler._exception_middleware.append(middleware.process_exception) def assert_exceptions_handled(self, url, errors, extra_error=None): try: response = self.client.get(url) except TestException: # Test client intentionally re-raises any exceptions being raised # during request handling. Hence actual testing that exception was # properly handled is done by relying on got_request_exception # signal being sent. pass except Exception as e: if type(extra_error) != type(e): self.fail("Unexpected exception: %s" % e) self.assertEqual(len(self.exceptions), len(errors)) for i, error in enumerate(errors): exception, value, tb = self.exceptions[i] self.assertEqual(value.args, (error, )) def assert_middleware_usage(self, middleware, request, view, template_response, response, exception): self.assertEqual(middleware.process_request_called, request) self.assertEqual(middleware.process_view_called, view) self.assertEqual(middleware.process_template_response_called, template_response) self.assertEqual(middleware.process_response_called, response) self.assertEqual(middleware.process_exception_called, exception) class MiddlewareTests(BaseMiddlewareExceptionTest): def test_process_request_middleware(self): pre_middleware = TestMiddleware() middleware = RequestMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/view/', []) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, False, False, True, False) self.assert_middleware_usage(middleware, True, False, False, True, False) self.assert_middleware_usage(post_middleware, False, False, False, True, False) def test_process_view_middleware(self): pre_middleware = TestMiddleware() middleware = ViewMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/view/', []) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, False) self.assert_middleware_usage(middleware, True, True, False, True, False) self.assert_middleware_usage(post_middleware, True, False, False, True, False) def test_process_response_middleware(self): pre_middleware = TestMiddleware() middleware = ResponseMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/view/', []) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, False) self.assert_middleware_usage(middleware, True, True, False, True, False) self.assert_middleware_usage(post_middleware, True, True, False, True, False) def test_process_template_response_middleware(self): pre_middleware = TestMiddleware() middleware = TemplateResponseMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/template_response/', []) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, True, True, False) self.assert_middleware_usage(middleware, True, True, True, True, False) self.assert_middleware_usage(post_middleware, True, True, True, True, False) def test_process_exception_middleware(self): pre_middleware = TestMiddleware() middleware = ExceptionMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/view/', []) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, False) self.assert_middleware_usage(middleware, True, True, False, True, False) self.assert_middleware_usage(post_middleware, True, True, False, True, False) def test_process_request_middleware_not_found(self): pre_middleware = TestMiddleware() middleware = RequestMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/not_found/', []) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, False, False, True, False) self.assert_middleware_usage(middleware, True, False, False, True, False) self.assert_middleware_usage(post_middleware, False, False, False, True, False) def test_process_view_middleware_not_found(self): pre_middleware = TestMiddleware() middleware = ViewMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/not_found/', []) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, False) self.assert_middleware_usage(middleware, True, True, False, True, False) self.assert_middleware_usage(post_middleware, True, False, False, True, False) def test_process_template_response_middleware_not_found(self): pre_middleware = TestMiddleware() middleware = TemplateResponseMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/not_found/', []) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, True) self.assert_middleware_usage(middleware, True, True, False, True, True) self.assert_middleware_usage(post_middleware, True, True, False, True, True) def test_process_response_middleware_not_found(self): pre_middleware = TestMiddleware() middleware = ResponseMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/not_found/', []) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, True) self.assert_middleware_usage(middleware, True, True, False, True, True) self.assert_middleware_usage(post_middleware, True, True, False, True, True) def test_process_exception_middleware_not_found(self): pre_middleware = TestMiddleware() middleware = ExceptionMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/not_found/', []) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, False) self.assert_middleware_usage(middleware, True, True, False, True, True) self.assert_middleware_usage(post_middleware, True, True, False, True, True) def test_process_request_middleware_exception(self): pre_middleware = TestMiddleware() middleware = RequestMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/error/', []) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, False, False, True, False) self.assert_middleware_usage(middleware, True, False, False, True, False) self.assert_middleware_usage(post_middleware, False, False, False, True, False) def test_process_view_middleware_exception(self): pre_middleware = TestMiddleware() middleware = ViewMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/error/', []) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, False) self.assert_middleware_usage(middleware, True, True, False, True, False) self.assert_middleware_usage(post_middleware, True, False, False, True, False) def test_process_response_middleware_exception(self): pre_middleware = TestMiddleware() middleware = ResponseMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/error/', ['Error in view'], Exception()) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, True) self.assert_middleware_usage(middleware, True, True, False, True, True) self.assert_middleware_usage(post_middleware, True, True, False, True, True) def test_process_exception_middleware_exception(self): pre_middleware = TestMiddleware() middleware = ExceptionMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/error/', []) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, False) self.assert_middleware_usage(middleware, True, True, False, True, True) self.assert_middleware_usage(post_middleware, True, True, False, True, True) def test_process_request_middleware_null_view(self): pre_middleware = TestMiddleware() middleware = RequestMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/null_view/', []) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, False, False, True, False) self.assert_middleware_usage(middleware, True, False, False, True, False) self.assert_middleware_usage(post_middleware, False, False, False, True, False) def test_process_view_middleware_null_view(self): pre_middleware = TestMiddleware() middleware = ViewMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/null_view/', []) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, False) self.assert_middleware_usage(middleware, True, True, False, True, False) self.assert_middleware_usage(post_middleware, True, False, False, True, False) def test_process_response_middleware_null_view(self): pre_middleware = TestMiddleware() middleware = ResponseMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/null_view/', [ "The view regressiontests.middleware_exceptions.views.null_view didn't return an HttpResponse object.", ], ValueError()) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, False) self.assert_middleware_usage(middleware, True, True, False, True, False) self.assert_middleware_usage(post_middleware, True, True, False, True, False) def test_process_exception_middleware_null_view(self): pre_middleware = TestMiddleware() middleware = ExceptionMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/null_view/', [ "The view regressiontests.middleware_exceptions.views.null_view didn't return an HttpResponse object." ], ValueError()) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, False) self.assert_middleware_usage(middleware, True, True, False, True, False) self.assert_middleware_usage(post_middleware, True, True, False, True, False) def test_process_request_middleware_permission_denied(self): pre_middleware = TestMiddleware() middleware = RequestMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', []) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, False, False, True, False) self.assert_middleware_usage(middleware, True, False, False, True, False) self.assert_middleware_usage(post_middleware, False, False, False, True, False) def test_process_view_middleware_permission_denied(self): pre_middleware = TestMiddleware() middleware = ViewMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', []) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, False) self.assert_middleware_usage(middleware, True, True, False, True, False) self.assert_middleware_usage(post_middleware, True, False, False, True, False) def test_process_response_middleware_permission_denied(self): pre_middleware = TestMiddleware() middleware = ResponseMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', []) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, True) self.assert_middleware_usage(middleware, True, True, False, True, True) self.assert_middleware_usage(post_middleware, True, True, False, True, True) def test_process_exception_middleware_permission_denied(self): pre_middleware = TestMiddleware() middleware = ExceptionMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', []) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, False) self.assert_middleware_usage(middleware, True, True, False, True, True) self.assert_middleware_usage(post_middleware, True, True, False, True, True) def test_process_template_response_error(self): middleware = TestMiddleware() self._add_middleware(middleware) self.assert_exceptions_handled('/middleware_exceptions/template_response_error/', []) # Check that the right middleware methods have been invoked self.assert_middleware_usage(middleware, True, True, True, True, False) class BadMiddlewareTests(BaseMiddlewareExceptionTest): def test_process_request_bad_middleware(self): pre_middleware = TestMiddleware() bad_middleware = BadRequestMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(bad_middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/view/', ['Test Request Exception']) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, False, False, True, False) self.assert_middleware_usage(bad_middleware, True, False, False, True, False) self.assert_middleware_usage(post_middleware, False, False, False, True, False) def test_process_view_bad_middleware(self): pre_middleware = TestMiddleware() bad_middleware = BadViewMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(bad_middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/view/', ['Test View Exception']) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, False) self.assert_middleware_usage(bad_middleware, True, True, False, True, False) self.assert_middleware_usage(post_middleware, True, False, False, True, False) def test_process_template_response_bad_middleware(self): pre_middleware = TestMiddleware() bad_middleware = BadTemplateResponseMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(bad_middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/template_response/', ['Test Template Response Exception']) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, False) self.assert_middleware_usage(bad_middleware, True, True, True, True, False) self.assert_middleware_usage(post_middleware, True, True, True, True, False) def test_process_response_bad_middleware(self): pre_middleware = TestMiddleware() bad_middleware = BadResponseMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(bad_middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/view/', ['Test Response Exception']) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, False, False) self.assert_middleware_usage(bad_middleware, True, True, False, True, False) self.assert_middleware_usage(post_middleware, True, True, False, True, False) def test_process_exception_bad_middleware(self): pre_middleware = TestMiddleware() bad_middleware = BadExceptionMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(bad_middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/view/', []) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, False) self.assert_middleware_usage(bad_middleware, True, True, False, True, False) self.assert_middleware_usage(post_middleware, True, True, False, True, False) def test_process_request_bad_middleware_not_found(self): pre_middleware = TestMiddleware() bad_middleware = BadRequestMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(bad_middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test Request Exception']) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, False, False, True, False) self.assert_middleware_usage(bad_middleware, True, False, False, True, False) self.assert_middleware_usage(post_middleware, False, False, False, True, False) def test_process_view_bad_middleware_not_found(self): pre_middleware = TestMiddleware() bad_middleware = BadViewMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(bad_middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test View Exception']) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, False) self.assert_middleware_usage(bad_middleware, True, True, False, True, False) self.assert_middleware_usage(post_middleware, True, False, False, True, False) def test_process_response_bad_middleware_not_found(self): pre_middleware = TestMiddleware() bad_middleware = BadResponseMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(bad_middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test Response Exception']) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, False, True) self.assert_middleware_usage(bad_middleware, True, True, False, True, True) self.assert_middleware_usage(post_middleware, True, True, False, True, True) def test_process_exception_bad_middleware_not_found(self): pre_middleware = TestMiddleware() bad_middleware = BadExceptionMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(bad_middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test Exception Exception']) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, False) self.assert_middleware_usage(bad_middleware, True, True, False, True, True) self.assert_middleware_usage(post_middleware, True, True, False, True, True) def test_process_request_bad_middleware_exception(self): pre_middleware = TestMiddleware() bad_middleware = BadRequestMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(bad_middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/error/', ['Test Request Exception']) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, False, False, True, False) self.assert_middleware_usage(bad_middleware, True, False, False, True, False) self.assert_middleware_usage(post_middleware, False, False, False, True, False) def test_process_view_bad_middleware_exception(self): pre_middleware = TestMiddleware() bad_middleware = BadViewMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(bad_middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/error/', ['Test View Exception']) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, False) self.assert_middleware_usage(bad_middleware, True, True, False, True, False) self.assert_middleware_usage(post_middleware, True, False, False, True, False) def test_process_response_bad_middleware_exception(self): pre_middleware = TestMiddleware() bad_middleware = BadResponseMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(bad_middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/error/', ['Error in view', 'Test Response Exception']) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, False, True) self.assert_middleware_usage(bad_middleware, True, True, False, True, True) self.assert_middleware_usage(post_middleware, True, True, False, True, True) def test_process_exception_bad_middleware_exception(self): pre_middleware = TestMiddleware() bad_middleware = BadExceptionMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(bad_middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/error/', ['Test Exception Exception']) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, False) self.assert_middleware_usage(bad_middleware, True, True, False, True, True) self.assert_middleware_usage(post_middleware, True, True, False, True, True) def test_process_request_bad_middleware_null_view(self): pre_middleware = TestMiddleware() bad_middleware = BadRequestMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(bad_middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/null_view/', ['Test Request Exception']) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, False, False, True, False) self.assert_middleware_usage(bad_middleware, True, False, False, True, False) self.assert_middleware_usage(post_middleware, False, False, False, True, False) def test_process_view_bad_middleware_null_view(self): pre_middleware = TestMiddleware() bad_middleware = BadViewMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(bad_middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/null_view/', ['Test View Exception']) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, False) self.assert_middleware_usage(bad_middleware, True, True, False, True, False) self.assert_middleware_usage(post_middleware, True, False, False, True, False) def test_process_response_bad_middleware_null_view(self): pre_middleware = TestMiddleware() bad_middleware = BadResponseMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(bad_middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/null_view/', [ "The view regressiontests.middleware_exceptions.views.null_view didn't return an HttpResponse object.", 'Test Response Exception' ]) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, False, False) self.assert_middleware_usage(bad_middleware, True, True, False, True, False) self.assert_middleware_usage(post_middleware, True, True, False, True, False) def test_process_exception_bad_middleware_null_view(self): pre_middleware = TestMiddleware() bad_middleware = BadExceptionMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(bad_middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/null_view/', [ "The view regressiontests.middleware_exceptions.views.null_view didn't return an HttpResponse object." ], ValueError()) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, False) self.assert_middleware_usage(bad_middleware, True, True, False, True, False) self.assert_middleware_usage(post_middleware, True, True, False, True, False) def test_process_request_bad_middleware_permission_denied(self): pre_middleware = TestMiddleware() bad_middleware = BadRequestMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(bad_middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test Request Exception']) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, False, False, True, False) self.assert_middleware_usage(bad_middleware, True, False, False, True, False) self.assert_middleware_usage(post_middleware, False, False, False, True, False) def test_process_view_bad_middleware_permission_denied(self): pre_middleware = TestMiddleware() bad_middleware = BadViewMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(bad_middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test View Exception']) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, False) self.assert_middleware_usage(bad_middleware, True, True, False, True, False) self.assert_middleware_usage(post_middleware, True, False, False, True, False) def test_process_response_bad_middleware_permission_denied(self): pre_middleware = TestMiddleware() bad_middleware = BadResponseMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(bad_middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test Response Exception']) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, False, True) self.assert_middleware_usage(bad_middleware, True, True, False, True, True) self.assert_middleware_usage(post_middleware, True, True, False, True, True) def test_process_exception_bad_middleware_permission_denied(self): pre_middleware = TestMiddleware() bad_middleware = BadExceptionMiddleware() post_middleware = TestMiddleware() self._add_middleware(post_middleware) self._add_middleware(bad_middleware) self._add_middleware(pre_middleware) self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test Exception Exception']) # Check that the right middleware methods have been invoked self.assert_middleware_usage(pre_middleware, True, True, False, True, False) self.assert_middleware_usage(bad_middleware, True, True, False, True, True) self.assert_middleware_usage(post_middleware, True, True, False, True, True) _missing = object() class RootUrlconfTests(TestCase): urls = 'regressiontests.middleware_exceptions.urls' def test_missing_root_urlconf(self): try: original_ROOT_URLCONF = settings.ROOT_URLCONF del settings.ROOT_URLCONF except AttributeError: original_ROOT_URLCONF = _missing self.assertRaises(AttributeError, self.client.get, "/middleware_exceptions/view/" ) if original_ROOT_URLCONF is not _missing: settings.ROOT_URLCONF = original_ROOT_URLCONF
agpl-3.0
pranav01/kernel_mediatek_sprout
tools/perf/python/twatch.py
7370
1334
#! /usr/bin/python # -*- python -*- # -*- coding: utf-8 -*- # twatch - Experimental use of the perf python interface # Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com> # # This application is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2. # # This application is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. import perf def main(): cpus = perf.cpu_map() threads = perf.thread_map() evsel = perf.evsel(task = 1, comm = 1, mmap = 0, wakeup_events = 1, watermark = 1, sample_id_all = 1, sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID) evsel.open(cpus = cpus, threads = threads); evlist = perf.evlist(cpus, threads) evlist.add(evsel) evlist.mmap() while True: evlist.poll(timeout = -1) for cpu in cpus: event = evlist.read_on_cpu(cpu) if not event: continue print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu, event.sample_pid, event.sample_tid), print event if __name__ == '__main__': main()
gpl-2.0
Aldriana/ShadowCraft-Engine
tests/objects_tests/procs_tests.py
3
3050
import unittest from shadowcraft.objects import procs class TestProcsList(unittest.TestCase): def setUp(self): self.procsList = procs.ProcsList('darkmoon_card_hurricane','heroic_left_eye_of_rajh') def test__init__(self): self.assertRaises(procs.InvalidProcException, procs.ProcsList, 'fake_proc') self.procsList = procs.ProcsList('darkmoon_card_hurricane') self.assertEqual(len(self.procsList.get_all_procs_for_stat(stat=None)), 1) def test__getattr__(self): self.assertRaises(AttributeError, self.procsList.__getattr__, 'fake_proc') self.assertTrue(self.procsList.darkmoon_card_hurricane) self.assertFalse(self.procsList.fluid_death) def test_get_all_procs_for_stat(self): self.assertEqual(len(self.procsList.get_all_procs_for_stat(stat=None)), 2) self.procsList = procs.ProcsList() self.assertEqual(len(self.procsList.get_all_procs_for_stat(stat=None)), 0) def test_get_all_damage_procs(self): self.assertEqual(len(self.procsList.get_all_damage_procs()), 1) self.procsList = procs.ProcsList() self.assertEqual(len(self.procsList.get_all_damage_procs()), 0) class TestProc(unittest.TestCase): def setUp(self): self.proc = procs.Proc(**procs.ProcsList.allowed_procs['prestors_talisman_of_machination']) def test__init__(self): self.assertEqual(self.proc.stat, 'haste') self.assertEqual(self.proc.value, 1926) self.assertEqual(self.proc.duration, 15) self.assertEqual(self.proc.proc_chance, .1) self.assertEqual(self.proc.trigger, 'all_attacks') self.assertEqual(self.proc.icd, 75) self.assertEqual(self.proc.max_stacks, 1) self.assertEqual(self.proc.on_crit, False) self.assertEqual(self.proc.proc_name, 'Nefarious Plot') self.assertEqual(self.proc.ppm, False) def test_procs_off_auto_attacks(self): self.assertTrue(self.proc.procs_off_auto_attacks()) def test_procs_off_strikes(self): self.assertTrue(self.proc.procs_off_strikes()) def test_procs_off_harmful_spells(self): self.assertFalse(self.proc.procs_off_harmful_spells()) def test_procs_off_heals(self): self.assertFalse(self.proc.procs_off_heals()) def test_procs_off_periodic_spell_damage(self): self.assertFalse(self.proc.procs_off_periodic_spell_damage()) def test_procs_off_periodic_heals(self): self.assertFalse(self.proc.procs_off_periodic_heals()) def test_procs_off_apply_debuff(self): self.assertTrue(self.proc.procs_off_apply_debuff()) def test_procs_off_bleeds(self): self.assertFalse(self.proc.procs_off_bleeds()) def test_procs_off_crit_only(self): self.assertFalse(self.proc.procs_off_crit_only()) def test_is_ppm(self): self.assertFalse(self.proc.is_ppm()) def test_proc_rate(self): self.assertEqual(self.proc.proc_rate(), self.proc.proc_chance)
lgpl-3.0
rlr/fjord
vendor/packages/translate-toolkit/translate/lang/th.py
30
1228
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2007 Zuza Software Foundation # # This file is part of translate. # # translate is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # translate is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. """This module represents the Thai language. .. seealso:: http://en.wikipedia.org/wiki/Thai_language """ from translate.lang import common class th(common.Common): """This class represents Thai.""" puncdict = { u". ": u" ", #u"; ": u" ", # Test interaction with XML entities } # No capitalisation. While we can't do sentence segmentation, sentencecount # is useless. ignoretests = ["startcaps", "simplecaps", "sentencecount"]
bsd-3-clause
a-sevin/freeopcua
tests/gmock/gtest/test/gtest_env_var_test.py
28
3489
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Verifies that Google Test correctly parses environment variables.""" __author__ = 'wan@google.com (Zhanyong Wan)' import os import gtest_test_utils IS_WINDOWS = os.name == 'nt' IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux' COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_') environ = os.environ.copy() def AssertEq(expected, actual): if expected != actual: print('Expected: %s' % (expected,)) print(' Actual: %s' % (actual,)) raise AssertionError def SetEnvVar(env_var, value): """Sets the env variable to 'value'; unsets it when 'value' is None.""" if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var] def GetFlag(flag): """Runs gtest_env_var_test_ and returns its output.""" args = [COMMAND] if flag is not None: args += [flag] return gtest_test_utils.Subprocess(args, env=environ).output def TestFlag(flag, test_val, default_val): """Verifies that the given flag is affected by the corresponding env var.""" env_var = 'GTEST_' + flag.upper() SetEnvVar(env_var, test_val) AssertEq(test_val, GetFlag(flag)) SetEnvVar(env_var, None) AssertEq(default_val, GetFlag(flag)) class GTestEnvVarTest(gtest_test_utils.TestCase): def testEnvVarAffectsFlag(self): """Tests that environment variable should affect the corresponding flag.""" TestFlag('break_on_failure', '1', '0') TestFlag('color', 'yes', 'auto') TestFlag('filter', 'FooTest.Bar', '*') TestFlag('output', 'xml:tmp/foo.xml', '') TestFlag('print_time', '0', '1') TestFlag('repeat', '999', '1') TestFlag('throw_on_failure', '1', '0') TestFlag('death_test_style', 'threadsafe', 'fast') TestFlag('catch_exceptions', '0', '1') if IS_LINUX: TestFlag('death_test_use_fork', '1', '0') TestFlag('stack_trace_depth', '0', '100') if __name__ == '__main__': gtest_test_utils.Main()
lgpl-3.0
rishilification/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/commandtest.py
131
2608
# Copyright (C) 2009 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from webkitpy.common.system.outputcapture import OutputCapture from webkitpy.common.webkitunittest import TestCase from webkitpy.tool.mocktool import MockOptions, MockTool class CommandsTest(TestCase): def assert_execute_outputs(self, command, args=[], expected_stdout="", expected_stderr="", expected_exception=None, expected_logs=None, options=MockOptions(), tool=MockTool()): options.blocks = None options.cc = 'MOCK cc' options.component = 'MOCK component' options.confirm = True options.email = 'MOCK email' options.git_commit = 'MOCK git commit' options.obsolete_patches = True options.open_bug = True options.port = 'MOCK port' options.update_changelogs = False options.quiet = True options.reviewer = 'MOCK reviewer' command.bind_to_tool(tool) OutputCapture().assert_outputs(self, command.execute, [options, args, tool], expected_stdout=expected_stdout, expected_stderr=expected_stderr, expected_exception=expected_exception, expected_logs=expected_logs)
bsd-3-clause
alanwells/donkey
donkeycar/templates/config_defaults.py
1
1059
""" CAR CONFIG This file is read by your car application's manage.py script to change the car performance. EXMAPLE ----------- import dk cfg = dk.load_config(config_path='~/d2/config.py') print(cfg.CAMERA_RESOLUTION) """ import os #PATHS CAR_PATH = PACKAGE_PATH = os.path.dirname(os.path.realpath(__file__)) DATA_PATH = os.path.join(CAR_PATH, 'data') MODELS_PATH = os.path.join(CAR_PATH, 'models') #VEHICLE DRIVE_LOOP_HZ = 20 MAX_LOOPS = None #CAMERA CAMERA_RESOLUTION = (160, 120) CAMERA_FRAMERATE = DRIVE_LOOP_HZ #STEERING STEERING_CHANNEL = 1 STEERING_LEFT_PWM = 460 STEERING_RIGHT_PWM = 290 #THROTTLE THROTTLE_CHANNEL = 0 THROTTLE_FORWARD_PWM = 500 THROTTLE_STOPPED_PWM = 370 THROTTLE_REVERSE_PWM = 220 #TRAINING BATCH_SIZE = 128 TRAIN_TEST_SPLIT = 0.8 #JOYSTICK JOYSTICK_MAX_THROTTLE=1 JOYSTICK_STEERING_SCALE=1 USE_JOYSTICK_AS_DEFAULT = False AUTO_RECORD_ON_THROTTLE = True #ROTARY ENCODER ROTARY_ENCODER_MM_PER_TICK=0.306096 ROTARY_ENCODER_PIN=27 MAX_VELOCITY=7.0 #THROTTLE PID THROTTLE_PID_P=1.0 THROTTLE_PID_D=0.2 THROTTLE_PID_I=0.1
mit
wbyne/QGIS
python/plugins/processing/algs/lidar/lastools/lascontrol.py
5
4110
# -*- coding: utf-8 -*- """ *************************************************************************** lascontrol.py --------------------- Date : May 2016 Copyright : (C) 2016 by Martin Isenburg Email : martin near rapidlasso point com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ from __future__ import absolute_import from future import standard_library standard_library.install_aliases() from builtins import str __author__ = 'Martin Isenburg' __date__ = 'May 2016' __copyright__ = '(C) 2016, Martin Isenburg' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from .LAStoolsUtils import LAStoolsUtils from .LAStoolsAlgorithm import LAStoolsAlgorithm from processing.core.parameters import ParameterFile from processing.core.parameters import ParameterString from processing.core.parameters import ParameterBoolean from processing.core.parameters import ParameterSelection class lascontrol(LAStoolsAlgorithm): CONTROL_POINT_FILE = "CONTROL_POINT_FILE" PARSE_STRING = "PARSE_STRING" USE_POINTS = "USE_POINTS" USE_POINTS_LIST = ["all", "ground (2)", "ground (2) and keypoints (8)", "ground (2), buldings (6), and keypoints (8)"] ADJUST_Z = "ADJUST_Z" def defineCharacteristics(self): self.name, self.i18n_name = self.trAlgorithm('lascontrol') self.group, self.i18n_group = self.trAlgorithm('LAStools') self.addParametersVerboseGUI() self.addParametersPointInputGUI() self.addParameter(ParameterFile(lascontrol.CONTROL_POINT_FILE, self.tr("ASCII text file of control points"), False, False)) self.addParameter(ParameterString(lascontrol.PARSE_STRING, self.tr("parse string marking which columns are xyz (use 's' for skip)"), "sxyz", False, False)) self.addParameter(ParameterSelection(lascontrol.USE_POINTS, self.tr("which points to use for elevation checks"), lascontrol.USE_POINTS_LIST, 0)) self.addParameter(ParameterBoolean(lascontrol.ADJUST_Z, self.tr("adjust z elevation by translating away the average error"), False)) self.addParametersAdditionalGUI() def processAlgorithm(self, progress): commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lascontrol")] self.addParametersVerboseCommands(commands) self.addParametersPointInputCommands(commands) file = self.getParameterValue(lascontrol.CONTROL_POINT_FILE) if file is not None: commands.append("-cp") commands.append('"' + file + '"') parse = self.getParameterValue(lascontrol.PARSE_STRING) if parse is not None: commands.append("-parse") commands.append(parse) use_point = self.getParameterValue(lascontrol.USE_POINTS) if use_point > 0: commands.append("-keep_class") commands.append(str(2)) if use_point > 1: commands.append(str(8)) if use_point > 2: commands.append(str(6)) if self.getParameterValue(lascontrol.ADJUST_Z): commands.append("-adjust_z") commands.append("-odix _adjusted") commands.append("-olaz") self.addParametersAdditionalCommands(commands) LAStoolsUtils.runLAStools(commands, progress)
gpl-2.0
andyfaff/scipy
scipy/optimize/_trustregion_constr/qp_subproblem.py
21
22596
"""Equality-constrained quadratic programming solvers.""" from scipy.sparse import (linalg, bmat, csc_matrix) from math import copysign import numpy as np from numpy.linalg import norm __all__ = [ 'eqp_kktfact', 'sphere_intersections', 'box_intersections', 'box_sphere_intersections', 'inside_box_boundaries', 'modified_dogleg', 'projected_cg' ] # For comparison with the projected CG def eqp_kktfact(H, c, A, b): """Solve equality-constrained quadratic programming (EQP) problem. Solve ``min 1/2 x.T H x + x.t c`` subject to ``A x + b = 0`` using direct factorization of the KKT system. Parameters ---------- H : sparse matrix, shape (n, n) Hessian matrix of the EQP problem. c : array_like, shape (n,) Gradient of the quadratic objective function. A : sparse matrix Jacobian matrix of the EQP problem. b : array_like, shape (m,) Right-hand side of the constraint equation. Returns ------- x : array_like, shape (n,) Solution of the KKT problem. lagrange_multipliers : ndarray, shape (m,) Lagrange multipliers of the KKT problem. """ n, = np.shape(c) # Number of parameters m, = np.shape(b) # Number of constraints # Karush-Kuhn-Tucker matrix of coefficients. # Defined as in Nocedal/Wright "Numerical # Optimization" p.452 in Eq. (16.4). kkt_matrix = csc_matrix(bmat([[H, A.T], [A, None]])) # Vector of coefficients. kkt_vec = np.hstack([-c, -b]) # TODO: Use a symmetric indefinite factorization # to solve the system twice as fast (because # of the symmetry). lu = linalg.splu(kkt_matrix) kkt_sol = lu.solve(kkt_vec) x = kkt_sol[:n] lagrange_multipliers = -kkt_sol[n:n+m] return x, lagrange_multipliers def sphere_intersections(z, d, trust_radius, entire_line=False): """Find the intersection between segment (or line) and spherical constraints. Find the intersection between the segment (or line) defined by the parametric equation ``x(t) = z + t*d`` and the ball ``||x|| <= trust_radius``. Parameters ---------- z : array_like, shape (n,) Initial point. d : array_like, shape (n,) Direction. trust_radius : float Ball radius. entire_line : bool, optional When ``True``, the function returns the intersection between the line ``x(t) = z + t*d`` (``t`` can assume any value) and the ball ``||x|| <= trust_radius``. When ``False``, the function returns the intersection between the segment ``x(t) = z + t*d``, ``0 <= t <= 1``, and the ball. Returns ------- ta, tb : float The line/segment ``x(t) = z + t*d`` is inside the ball for for ``ta <= t <= tb``. intersect : bool When ``True``, there is a intersection between the line/segment and the sphere. On the other hand, when ``False``, there is no intersection. """ # Special case when d=0 if norm(d) == 0: return 0, 0, False # Check for inf trust_radius if np.isinf(trust_radius): if entire_line: ta = -np.inf tb = np.inf else: ta = 0 tb = 1 intersect = True return ta, tb, intersect a = np.dot(d, d) b = 2 * np.dot(z, d) c = np.dot(z, z) - trust_radius**2 discriminant = b*b - 4*a*c if discriminant < 0: intersect = False return 0, 0, intersect sqrt_discriminant = np.sqrt(discriminant) # The following calculation is mathematically # equivalent to: # ta = (-b - sqrt_discriminant) / (2*a) # tb = (-b + sqrt_discriminant) / (2*a) # but produce smaller round off errors. # Look at Matrix Computation p.97 # for a better justification. aux = b + copysign(sqrt_discriminant, b) ta = -aux / (2*a) tb = -2*c / aux ta, tb = sorted([ta, tb]) if entire_line: intersect = True else: # Checks to see if intersection happens # within vectors length. if tb < 0 or ta > 1: intersect = False ta = 0 tb = 0 else: intersect = True # Restrict intersection interval # between 0 and 1. ta = max(0, ta) tb = min(1, tb) return ta, tb, intersect def box_intersections(z, d, lb, ub, entire_line=False): """Find the intersection between segment (or line) and box constraints. Find the intersection between the segment (or line) defined by the parametric equation ``x(t) = z + t*d`` and the rectangular box ``lb <= x <= ub``. Parameters ---------- z : array_like, shape (n,) Initial point. d : array_like, shape (n,) Direction. lb : array_like, shape (n,) Lower bounds to each one of the components of ``x``. Used to delimit the rectangular box. ub : array_like, shape (n, ) Upper bounds to each one of the components of ``x``. Used to delimit the rectangular box. entire_line : bool, optional When ``True``, the function returns the intersection between the line ``x(t) = z + t*d`` (``t`` can assume any value) and the rectangular box. When ``False``, the function returns the intersection between the segment ``x(t) = z + t*d``, ``0 <= t <= 1``, and the rectangular box. Returns ------- ta, tb : float The line/segment ``x(t) = z + t*d`` is inside the box for for ``ta <= t <= tb``. intersect : bool When ``True``, there is a intersection between the line (or segment) and the rectangular box. On the other hand, when ``False``, there is no intersection. """ # Make sure it is a numpy array z = np.asarray(z) d = np.asarray(d) lb = np.asarray(lb) ub = np.asarray(ub) # Special case when d=0 if norm(d) == 0: return 0, 0, False # Get values for which d==0 zero_d = (d == 0) # If the boundaries are not satisfied for some coordinate # for which "d" is zero, there is no box-line intersection. if (z[zero_d] < lb[zero_d]).any() or (z[zero_d] > ub[zero_d]).any(): intersect = False return 0, 0, intersect # Remove values for which d is zero not_zero_d = np.logical_not(zero_d) z = z[not_zero_d] d = d[not_zero_d] lb = lb[not_zero_d] ub = ub[not_zero_d] # Find a series of intervals (t_lb[i], t_ub[i]). t_lb = (lb-z) / d t_ub = (ub-z) / d # Get the intersection of all those intervals. ta = max(np.minimum(t_lb, t_ub)) tb = min(np.maximum(t_lb, t_ub)) # Check if intersection is feasible if ta <= tb: intersect = True else: intersect = False # Checks to see if intersection happens within vectors length. if not entire_line: if tb < 0 or ta > 1: intersect = False ta = 0 tb = 0 else: # Restrict intersection interval between 0 and 1. ta = max(0, ta) tb = min(1, tb) return ta, tb, intersect def box_sphere_intersections(z, d, lb, ub, trust_radius, entire_line=False, extra_info=False): """Find the intersection between segment (or line) and box/sphere constraints. Find the intersection between the segment (or line) defined by the parametric equation ``x(t) = z + t*d``, the rectangular box ``lb <= x <= ub`` and the ball ``||x|| <= trust_radius``. Parameters ---------- z : array_like, shape (n,) Initial point. d : array_like, shape (n,) Direction. lb : array_like, shape (n,) Lower bounds to each one of the components of ``x``. Used to delimit the rectangular box. ub : array_like, shape (n, ) Upper bounds to each one of the components of ``x``. Used to delimit the rectangular box. trust_radius : float Ball radius. entire_line : bool, optional When ``True``, the function returns the intersection between the line ``x(t) = z + t*d`` (``t`` can assume any value) and the constraints. When ``False``, the function returns the intersection between the segment ``x(t) = z + t*d``, ``0 <= t <= 1`` and the constraints. extra_info : bool, optional When ``True``, the function returns ``intersect_sphere`` and ``intersect_box``. Returns ------- ta, tb : float The line/segment ``x(t) = z + t*d`` is inside the rectangular box and inside the ball for for ``ta <= t <= tb``. intersect : bool When ``True``, there is a intersection between the line (or segment) and both constraints. On the other hand, when ``False``, there is no intersection. sphere_info : dict, optional Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]`` for which the line intercepts the ball. And a boolean value indicating whether the sphere is intersected by the line. box_info : dict, optional Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]`` for which the line intercepts the box. And a boolean value indicating whether the box is intersected by the line. """ ta_b, tb_b, intersect_b = box_intersections(z, d, lb, ub, entire_line) ta_s, tb_s, intersect_s = sphere_intersections(z, d, trust_radius, entire_line) ta = np.maximum(ta_b, ta_s) tb = np.minimum(tb_b, tb_s) if intersect_b and intersect_s and ta <= tb: intersect = True else: intersect = False if extra_info: sphere_info = {'ta': ta_s, 'tb': tb_s, 'intersect': intersect_s} box_info = {'ta': ta_b, 'tb': tb_b, 'intersect': intersect_b} return ta, tb, intersect, sphere_info, box_info else: return ta, tb, intersect def inside_box_boundaries(x, lb, ub): """Check if lb <= x <= ub.""" return (lb <= x).all() and (x <= ub).all() def reinforce_box_boundaries(x, lb, ub): """Return clipped value of x""" return np.minimum(np.maximum(x, lb), ub) def modified_dogleg(A, Y, b, trust_radius, lb, ub): """Approximately minimize ``1/2*|| A x + b ||^2`` inside trust-region. Approximately solve the problem of minimizing ``1/2*|| A x + b ||^2`` subject to ``||x|| < Delta`` and ``lb <= x <= ub`` using a modification of the classical dogleg approach. Parameters ---------- A : LinearOperator (or sparse matrix or ndarray), shape (m, n) Matrix ``A`` in the minimization problem. It should have dimension ``(m, n)`` such that ``m < n``. Y : LinearOperator (or sparse matrix or ndarray), shape (n, m) LinearOperator that apply the projection matrix ``Q = A.T inv(A A.T)`` to the vector. The obtained vector ``y = Q x`` being the minimum norm solution of ``A y = x``. b : array_like, shape (m,) Vector ``b``in the minimization problem. trust_radius: float Trust radius to be considered. Delimits a sphere boundary to the problem. lb : array_like, shape (n,) Lower bounds to each one of the components of ``x``. It is expected that ``lb <= 0``, otherwise the algorithm may fail. If ``lb[i] = -Inf``, the lower bound for the ith component is just ignored. ub : array_like, shape (n, ) Upper bounds to each one of the components of ``x``. It is expected that ``ub >= 0``, otherwise the algorithm may fail. If ``ub[i] = Inf``, the upper bound for the ith component is just ignored. Returns ------- x : array_like, shape (n,) Solution to the problem. Notes ----- Based on implementations described in pp. 885-886 from [1]_. References ---------- .. [1] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. "An interior point algorithm for large-scale nonlinear programming." SIAM Journal on Optimization 9.4 (1999): 877-900. """ # Compute minimum norm minimizer of 1/2*|| A x + b ||^2. newton_point = -Y.dot(b) # Check for interior point if inside_box_boundaries(newton_point, lb, ub) \ and norm(newton_point) <= trust_radius: x = newton_point return x # Compute gradient vector ``g = A.T b`` g = A.T.dot(b) # Compute Cauchy point # `cauchy_point = g.T g / (g.T A.T A g)``. A_g = A.dot(g) cauchy_point = -np.dot(g, g) / np.dot(A_g, A_g) * g # Origin origin_point = np.zeros_like(cauchy_point) # Check the segment between cauchy_point and newton_point # for a possible solution. z = cauchy_point p = newton_point - cauchy_point _, alpha, intersect = box_sphere_intersections(z, p, lb, ub, trust_radius) if intersect: x1 = z + alpha*p else: # Check the segment between the origin and cauchy_point # for a possible solution. z = origin_point p = cauchy_point _, alpha, _ = box_sphere_intersections(z, p, lb, ub, trust_radius) x1 = z + alpha*p # Check the segment between origin and newton_point # for a possible solution. z = origin_point p = newton_point _, alpha, _ = box_sphere_intersections(z, p, lb, ub, trust_radius) x2 = z + alpha*p # Return the best solution among x1 and x2. if norm(A.dot(x1) + b) < norm(A.dot(x2) + b): return x1 else: return x2 def projected_cg(H, c, Z, Y, b, trust_radius=np.inf, lb=None, ub=None, tol=None, max_iter=None, max_infeasible_iter=None, return_all=False): """Solve EQP problem with projected CG method. Solve equality-constrained quadratic programming problem ``min 1/2 x.T H x + x.t c`` subject to ``A x + b = 0`` and, possibly, to trust region constraints ``||x|| < trust_radius`` and box constraints ``lb <= x <= ub``. Parameters ---------- H : LinearOperator (or sparse matrix or ndarray), shape (n, n) Operator for computing ``H v``. c : array_like, shape (n,) Gradient of the quadratic objective function. Z : LinearOperator (or sparse matrix or ndarray), shape (n, n) Operator for projecting ``x`` into the null space of A. Y : LinearOperator, sparse matrix, ndarray, shape (n, m) Operator that, for a given a vector ``b``, compute smallest norm solution of ``A x + b = 0``. b : array_like, shape (m,) Right-hand side of the constraint equation. trust_radius : float, optional Trust radius to be considered. By default, uses ``trust_radius=inf``, which means no trust radius at all. lb : array_like, shape (n,), optional Lower bounds to each one of the components of ``x``. If ``lb[i] = -Inf`` the lower bound for the i-th component is just ignored (default). ub : array_like, shape (n, ), optional Upper bounds to each one of the components of ``x``. If ``ub[i] = Inf`` the upper bound for the i-th component is just ignored (default). tol : float, optional Tolerance used to interrupt the algorithm. max_iter : int, optional Maximum algorithm iterations. Where ``max_inter <= n-m``. By default, uses ``max_iter = n-m``. max_infeasible_iter : int, optional Maximum infeasible (regarding box constraints) iterations the algorithm is allowed to take. By default, uses ``max_infeasible_iter = n-m``. return_all : bool, optional When ``true``, return the list of all vectors through the iterations. Returns ------- x : array_like, shape (n,) Solution of the EQP problem. info : Dict Dictionary containing the following: - niter : Number of iterations. - stop_cond : Reason for algorithm termination: 1. Iteration limit was reached; 2. Reached the trust-region boundary; 3. Negative curvature detected; 4. Tolerance was satisfied. - allvecs : List containing all intermediary vectors (optional). - hits_boundary : True if the proposed step is on the boundary of the trust region. Notes ----- Implementation of Algorithm 6.2 on [1]_. In the absence of spherical and box constraints, for sufficient iterations, the method returns a truly optimal result. In the presence of those constraints, the value returned is only a inexpensive approximation of the optimal value. References ---------- .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal. "On the solution of equality constrained quadratic programming problems arising in optimization." SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395. """ CLOSE_TO_ZERO = 1e-25 n, = np.shape(c) # Number of parameters m, = np.shape(b) # Number of constraints # Initial Values x = Y.dot(-b) r = Z.dot(H.dot(x) + c) g = Z.dot(r) p = -g # Store ``x`` value if return_all: allvecs = [x] # Values for the first iteration H_p = H.dot(p) rt_g = norm(g)**2 # g.T g = r.T Z g = r.T g (ref [1]_ p.1389) # If x > trust-region the problem does not have a solution. tr_distance = trust_radius - norm(x) if tr_distance < 0: raise ValueError("Trust region problem does not have a solution.") # If x == trust_radius, then x is the solution # to the optimization problem, since x is the # minimum norm solution to Ax=b. elif tr_distance < CLOSE_TO_ZERO: info = {'niter': 0, 'stop_cond': 2, 'hits_boundary': True} if return_all: allvecs.append(x) info['allvecs'] = allvecs return x, info # Set default tolerance if tol is None: tol = max(min(0.01 * np.sqrt(rt_g), 0.1 * rt_g), CLOSE_TO_ZERO) # Set default lower and upper bounds if lb is None: lb = np.full(n, -np.inf) if ub is None: ub = np.full(n, np.inf) # Set maximum iterations if max_iter is None: max_iter = n-m max_iter = min(max_iter, n-m) # Set maximum infeasible iterations if max_infeasible_iter is None: max_infeasible_iter = n-m hits_boundary = False stop_cond = 1 counter = 0 last_feasible_x = np.zeros_like(x) k = 0 for i in range(max_iter): # Stop criteria - Tolerance : r.T g < tol if rt_g < tol: stop_cond = 4 break k += 1 # Compute curvature pt_H_p = H_p.dot(p) # Stop criteria - Negative curvature if pt_H_p <= 0: if np.isinf(trust_radius): raise ValueError("Negative curvature not allowed " "for unrestricted problems.") else: # Find intersection with constraints _, alpha, intersect = box_sphere_intersections( x, p, lb, ub, trust_radius, entire_line=True) # Update solution if intersect: x = x + alpha*p # Reinforce variables are inside box constraints. # This is only necessary because of roundoff errors. x = reinforce_box_boundaries(x, lb, ub) # Attribute information stop_cond = 3 hits_boundary = True break # Get next step alpha = rt_g / pt_H_p x_next = x + alpha*p # Stop criteria - Hits boundary if np.linalg.norm(x_next) >= trust_radius: # Find intersection with box constraints _, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub, trust_radius) # Update solution if intersect: x = x + theta*alpha*p # Reinforce variables are inside box constraints. # This is only necessary because of roundoff errors. x = reinforce_box_boundaries(x, lb, ub) # Attribute information stop_cond = 2 hits_boundary = True break # Check if ``x`` is inside the box and start counter if it is not. if inside_box_boundaries(x_next, lb, ub): counter = 0 else: counter += 1 # Whenever outside box constraints keep looking for intersections. if counter > 0: _, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub, trust_radius) if intersect: last_feasible_x = x + theta*alpha*p # Reinforce variables are inside box constraints. # This is only necessary because of roundoff errors. last_feasible_x = reinforce_box_boundaries(last_feasible_x, lb, ub) counter = 0 # Stop after too many infeasible (regarding box constraints) iteration. if counter > max_infeasible_iter: break # Store ``x_next`` value if return_all: allvecs.append(x_next) # Update residual r_next = r + alpha*H_p # Project residual g+ = Z r+ g_next = Z.dot(r_next) # Compute conjugate direction step d rt_g_next = norm(g_next)**2 # g.T g = r.T g (ref [1]_ p.1389) beta = rt_g_next / rt_g p = - g_next + beta*p # Prepare for next iteration x = x_next g = g_next r = g_next rt_g = norm(g)**2 # g.T g = r.T Z g = r.T g (ref [1]_ p.1389) H_p = H.dot(p) if not inside_box_boundaries(x, lb, ub): x = last_feasible_x hits_boundary = True info = {'niter': k, 'stop_cond': stop_cond, 'hits_boundary': hits_boundary} if return_all: info['allvecs'] = allvecs return x, info
bsd-3-clause
sspreitzer/tahoe-lafs
setuptools-0.6c16dev6.egg/setuptools/command/scriptsetup.py
5
13095
from distutils.errors import DistutilsSetupError from setuptools import Command import sys class scriptsetup(Command): action = (sys.platform == "win32" and "set up .pyscript association and PATHEXT variable to run scripts" or "this does nothing on non-Windows platforms") user_options = [ ('allusers', 'a', 'make changes for all users of this Windows installation (requires Administrator privileges)'), ] boolean_options = ['allusers'] def initialize_options(self): self.allusers = False def finalize_options(self): pass def run(self): if sys.platform != "win32": print "\n'scriptsetup' isn't needed on non-Windows platforms." else: do_scriptsetup(self.allusers) def do_scriptsetup(allusers=False): print "\nSetting up environment to run scripts for %s..." % (allusers and "all users" or "the current user") from _winreg import HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE, HKEY_CLASSES_ROOT, \ REG_SZ, REG_EXPAND_SZ, KEY_QUERY_VALUE, KEY_SET_VALUE, \ OpenKey, CreateKey, QueryValueEx, SetValueEx, FlushKey, CloseKey USER_ENV = "Environment" try: user_env = OpenKey(HKEY_CURRENT_USER, USER_ENV, 0, KEY_QUERY_VALUE) except WindowsError, e: raise DistutilsSetupError("I could not read the user environment from the registry.\n%r" % (e,)) SYSTEM_ENV = "SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment" try: system_env = OpenKey(HKEY_LOCAL_MACHINE, SYSTEM_ENV, 0, KEY_QUERY_VALUE) except WindowsError, e: raise DistutilsSetupError("I could not read the system environment from the registry.\n%r" % (e,)) # HKEY_CLASSES_ROOT is a merged view that would only confuse us. # <http://technet.microsoft.com/en-us/library/cc739822(WS.10).aspx> USER_CLASSES = "SOFTWARE\\Classes" try: user_classes = OpenKey(HKEY_CURRENT_USER, USER_CLASSES, 0, KEY_QUERY_VALUE) except WindowsError, e: raise DistutilsSetupError("I could not read the user filetype associations from the registry.\n%r" % (e,)) SYSTEM_CLASSES = "SOFTWARE\\Classes" try: system_classes = OpenKey(HKEY_LOCAL_MACHINE, SYSTEM_CLASSES, 0, KEY_QUERY_VALUE) except WindowsError, e: raise DistutilsSetupError("I could not read the system filetype associations from the registry.\n%r" % (e,)) def query(key, subkey, what): try: (value, type) = QueryValueEx(key, subkey) except WindowsError, e: if e.winerror == 2: # not found return None raise DistutilsSetupError("I could not read %s from the registry.\n%r" % (what, e)) # It does not matter that we don't expand environment strings, in fact it's better not to. if type != REG_SZ and type != REG_EXPAND_SZ: raise DistutilsSetupError("I expected the registry entry for %s to have a string type (REG_SZ or REG_EXPAND_SZ), " "and was flummoxed by it having type code %r." % (what, type)) return (value, type) def open_and_query(key, path, subkey, what): try: read_key = OpenKey(key, path, 0, KEY_QUERY_VALUE) except WindowsError, e: if e.winerror == 2: # not found return None raise DistutilsSetupError("I could not read %s from the registry because I could not open " "the parent key.\n%r" % (what, e)) try: return query(read_key, subkey, what) finally: CloseKey(read_key) def update(key_name_path, subkey, desired_value, desired_type, goal, what): (key, name, path) = key_name_path (old_value, old_type) = open_and_query(key, path, subkey, what) or (None, None) if (old_value, old_type) == (desired_value, desired_type): print "Already done: %s." % (goal,) return False try: update_key = OpenKey(key, path, 0, KEY_SET_VALUE|KEY_QUERY_VALUE) except WindowsError, e: if e.winerror != 2: raise DistutilsSetupError("I tried to %s, but was not successful because I could not open " "the registry key %s\\%s for writing.\n%r" % (goal, name, path, e)) try: update_key = CreateKey(key, path) except WindowsError, e: raise DistutilsSetupError("I tried to %s, but was not successful because the registry key %s\\%s " "did not exist, and I was unable to create it.\n%r" % (goal, name, path, e)) (new_value, new_type) = (None, None) try: SetValueEx(update_key, subkey, 0, desired_type, desired_value) except WindowsError, e: raise DistutilsSetupError("I tried to %s, but was not able to set the subkey %r under %s\\%s to be %r.\n%r" % (goal, subkey, name, path, desired_value)) else: (new_value, new_type) = query(update_key, subkey, what) or (None, None) finally: FlushKey(update_key) CloseKey(update_key) if (new_value, new_type) != (desired_value, desired_type): raise DistutilsSetupError("I tried to %s by setting the subkey %r under %s\\%s to be %r, " "and the call to SetValueEx succeeded, but the value ended up as " "%r instead (it was previously %r). Maybe the update was unexpectedly virtualized?" % (goal, subkey, name, path, desired_value, new_value, old_value)) print "Done: %s." % (goal,) return True # Maintenance hazard: 'add_to_environment' and 'associate' use very similar, but not identical logic. def add_to_environment(varname, addition, change_allusers): changed = False what = "the %s environment variable %s" % (change_allusers and "system" or "user", varname) goal = "add %s to %s" % (addition, what) system_valueandtype = query(system_env, varname, "the system environment variable %s" % (varname,)) user_valueandtype = query(user_env, varname, "the user environment variable %s" % (varname,)) if change_allusers: (value, type) = system_valueandtype or (u'', REG_SZ) key_name_path = (HKEY_LOCAL_MACHINE, "HKEY_LOCAL_MACHINE", SYSTEM_ENV) else: (value, type) = user_valueandtype or system_valueandtype or (u'', REG_SZ) key_name_path = (HKEY_CURRENT_USER, "HKEY_CURRENT_USER", USER_ENV) def path_append(value, addition): if value != "": return value + u';' + addition else: return addition if addition.lower() in value.lower().split(u';'): print "Already done: %s." % (goal,) else: changed |= update(key_name_path, varname, path_append(value, addition), type, goal, what) if change_allusers: # Also change any overriding environment entry for the current user. (user_value, user_type) = user_valueandtype or (u'', REG_SZ) split_value = user_value.lower().split(u';') if not (addition.lower() in split_value or u'%'+varname.lower()+u'%' in split_value): now_what = "the overriding user environment variable %s" % (varname,) changed |= update((HKEY_CURRENT_USER, "HKEY_CURRENT_USER", USER_ENV), varname, path_append(user_value, addition), user_type, "add %s to %s" % (addition, now_what), now_what) return changed def associate(ext, target, change_allusers): changed = False what = "the %s association for %s" % (change_allusers and "system" or "user", ext) goal = "associate the filetype %s with %s for %s" % (ext, target, change_allusers and "all users" or "the current user") try: if change_allusers: target_key = OpenKey(HKEY_LOCAL_MACHINE, "%s\\%s" % (SYSTEM_CLASSES, target), 0, KEY_QUERY_VALUE) else: target_key = OpenKey(HKEY_CLASSES_ROOT, target, 0, KEY_QUERY_VALUE) except WindowsError, e: raise DistutilsSetupError("I was going to %s, but that won't work because the %s class does not exist in the registry, " "as far as I can tell.\n%r" % (goal, target, e)) CloseKey(target_key) system_key_name_path = (HKEY_LOCAL_MACHINE, "HKEY_LOCAL_MACHINE", "%s\\%s" % (SYSTEM_CLASSES, ext)) user_key_name_path = (HKEY_CURRENT_USER, "HKEY_CURRENT_USER", "%s\\%s" % (USER_CLASSES, ext)) system_valueandtype = open_and_query(system_classes, ext, "", "the system association for %s" % (ext,)) user_valueandtype = open_and_query(user_classes, ext, "", "the user association for %s" % (ext,)) if change_allusers: (value, type) = system_valueandtype or (u'', REG_SZ) key_name_path = system_key_name_path else: (value, type) = user_valueandtype or system_valueandtype or (u'', REG_SZ) key_name_path = user_key_name_path if value == target: print "Already done: %s." % (goal,) else: changed |= update(key_name_path, "", unicode(target), REG_SZ, goal, what) if change_allusers: # Also change any overriding association for the current user. (user_value, user_type) = user_valueandtype or (u'', REG_SZ) if user_value != target: changed |= update(user_key_name_path, "", unicode(target), REG_SZ, "associate the filetype %s with %s for the current user " \ "(because the system association is overridden)" % (ext, target), "the overriding user association for %s" % (ext,)) return changed def broadcast_settingchange(change_allusers): print "Broadcasting that the environment has changed, please wait..." # <http://support.microsoft.com/kb/104011/en-us> # <http://msdn.microsoft.com/en-us/library/ms644952(VS.85).aspx> # LRESULT WINAPI SendMessageTimeoutW(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam, # UINT fuFlags, UINT uTimeout, PDWORD_PTR lpdwResult); try: from ctypes import WINFUNCTYPE, POINTER, windll, addressof, c_wchar_p from ctypes.wintypes import LONG, HWND, UINT, WPARAM, LPARAM, DWORD SendMessageTimeout = WINFUNCTYPE(POINTER(LONG), HWND, UINT, WPARAM, LPARAM, UINT, UINT, POINTER(POINTER(DWORD))) \ (("SendMessageTimeoutW", windll.user32)) HWND_BROADCAST = 0xFFFF WM_SETTINGCHANGE = 0x001A SMTO_ABORTIFHUNG = 0x0002 SendMessageTimeout(HWND_BROADCAST, WM_SETTINGCHANGE, change_allusers and 1 or 0, addressof(c_wchar_p(u"Environment")), SMTO_ABORTIFHUNG, 5000, None); except Exception, e: print "Warning: %r" % (e,) changed_assoc = associate(".pyscript", "Python.File", allusers) changed_env = False try: changed_env |= add_to_environment("PATHEXT", ".pyscript", allusers) changed_env |= add_to_environment("PATHEXT", ".pyw", allusers) finally: CloseKey(user_env) CloseKey(system_env) if changed_assoc or changed_env: broadcast_settingchange(allusers) if changed_env: # whether logout is needed seems to randomly differ between installations # of XP, but it is not needed in Vista or later. try: import platform, re need_logout = not re.search(r'^[6-9]|([1-9][0-9]+)\.', platform.version()) except Exception, e: e # hush pyflakes need_logout = True if need_logout: print """ *********************************************************************** Changes have been made to the persistent environment, but they may not take effect in this Windows session. Running installed Python scripts from a Command Prompt may only work after you have logged out and back in again, or rebooted. *********************************************************************** """ else: print """ *********************************************************************** Changes have been made to the persistent environment, but not in this Command Prompt. Running installed Python scripts will only work from new Command Prompts opened from now on. *********************************************************************** """
gpl-2.0
mortenm12/P5
RecommenderSystem/NearestNeighbour/NearestNeighbour.py
1
5355
""" Implementation of K-Nearest Neighbour, with normalization and mean center. Morten Meyer Rasmussen Every page numbers is a reference to the book: Recommender Systems Handbook """ from DataAPI import * from AuxillaryMath import * import time # user2 is a user who is not the user self # returns an array of both the users ratings of movies the both have seen def find_both_rated_movies(user1, user2): rated = [[], []] for movie in user1.rated_movies: if int(movie) in user2.rated_movies: rated[0].append(user1.rated_movies[movie]) rated[1].append(user2.rated_movies[movie]) return rated # user2 is a user who is not the user self # returns the weight between self and user2 def weight(user1, user2): # page 124 data = find_both_rated_movies(user1, user2) return cos(data[0], data[1]) # movie is a movie in the dictionary all_movies # Returns the average of the users ratings, and the average of what other rat the movie, compared to normal def mean_center(user1, movie, list_of_users): # page 121 sum1 = 0 user_who_have_seen_this_movie = [] # makes a list of all users who have seen the movie for user2 in list_of_users: if movie in user2.rated_movies: user_who_have_seen_this_movie.append(user2) for user2 in user_who_have_seen_this_movie: sum1 += user2.rated_movies[movie] - user2.average_rating if len(user1.rated_movies) == 0 and len(user_who_have_seen_this_movie) == 0: return user1.average_rating + sum1 elif len(user1.rated_movies) == 0: return user1.average_rating + (sum1 / len(user_who_have_seen_this_movie)) elif len(user_who_have_seen_this_movie) == 0: return (user1.average_rating / len(user1.rated_movies)) + sum1 else: return (user1.average_rating / len(user1.rated_movies)) + (sum1 / len(user_who_have_seen_this_movie)) # k is the numbers of neighbours the algorithm should find, and movie is the movie ever neighbour should have rated # returns a list of k numbers of users who have the highest weight to the user self def find_k_nearest_neighbour(user1, k, movie, list_of_users): users = [] for user2 in list_of_users: if int(movie) in user2.rated_movies: users.insert(user2.id, [user2, weight(user1, user2)]) users.sort(key=lambda x: x[1]) return users[:k] # movie is a movie in the dictionary all_movies # return a recommendation for the user self on movie def recommend(user1, movie, list_of_users): # page 115 users = find_k_nearest_neighbour(user1, 5, movie, list_of_users) sum1 = 0 sum2 = 0 for user2 in users: sum1 += user2[1] * user2[0].rated_movies[int(movie)] sum2 += user2[1] if sum2 == 0: return mean_center(user1, movie, list_of_users) else: return (sum1 / sum2) + mean_center(user1, movie, list_of_users) def format_time(t): if t < 1: return "00:00:00" else: t_int = int(t) h = t_int / 3600 h_rest = t_int % 3600 m = h_rest / 60 s = h_rest % 60 return "{:02d}".format(int(h)) + ":" + "{:02d}".format(int(m)) + ":" + "{:02d}".format(int(s)) # loader user data into the list_of_user list_of_users = read_users_as_object_list() ratings = read_ratings_as_list("Test1") for rating in ratings: u_id = rating[0] m_id = rating[1] rat = rating[2] list_of_users[u_id-1].add_rating(m_id, rat) # loads all the movies into all_movies all_movies = read_movies_as_id_name_dict() # run through all users and calculates their average rating for user in list_of_users: user.calculate_average_rating() rating_matrix = [] for i in range(0, len(list_of_users)): rating_matrix.append([]) for j in range(0, len(all_movies)): rating_matrix[i].append(0.0) i = 0 starting_time = time.time() for user in list_of_users: i += 1 current_time = time.time() elapsed_time = current_time - starting_time remaining_time = ((elapsed_time * len(list_of_users)) / i) - elapsed_time print(round((i / len(list_of_users)) * 100, 1), "% tid brugt: ", format_time(elapsed_time), " tid tilbage: ", format_time(remaining_time)) for movie in all_movies: if movie not in user.rated_movies: rating_matrix[user.id-1][int(movie)-1] = recommend(user, movie, list_of_users) else: rating_matrix[user.id][int(movie)] = user.rated_movies[movie] for i in range(0, len(list_of_users)): for j in range(0, len(all_movies)): if rating_matrix[i][j] > 5: rating_matrix[i][j] = 5 elif rating_matrix[i][j] < 1: rating_matrix[i][j] = 3 # writes the ratings into an output file i = 0 test_set = "Test1" output = open("Output/" + test_set + "/ratings.data", "w") output.write(" ID, ") output.write(", ".join(["{:>5d}".format(movie) for movie in all_movies])) output.writelines("\n") for user in range(0, len(list_of_users)): i += 1 print(round((i / len(list_of_users)) * 100, 1), "%") output.write("{:>5d}".format(list_of_users[user].id) + ", ") j = 1 for movie in range(0, len(all_movies)): output.write("{: .2f}".format(rating_matrix[user][movie]) + (", " if j < len(all_movies) else "")) j += 1 output.writelines("\n") if not output.closed: output.close()
gpl-3.0
adaur/SickRage
lib/sqlalchemy/orm/unitofwork.py
78
23204
# orm/unitofwork.py # Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """The internals for the unit of work system. The session's flush() process passes objects to a contextual object here, which assembles flush tasks based on mappers and their properties, organizes them in order of dependency, and executes. """ from .. import util, event from ..util import topological from . import attributes, persistence, util as orm_util def track_cascade_events(descriptor, prop): """Establish event listeners on object attributes which handle cascade-on-set/append. """ key = prop.key def append(state, item, initiator): # process "save_update" cascade rules for when # an instance is appended to the list of another instance if item is None: return sess = state.session if sess: if sess._warn_on_events: sess._flush_warning("collection append") prop = state.manager.mapper._props[key] item_state = attributes.instance_state(item) if prop._cascade.save_update and \ (prop.cascade_backrefs or key == initiator.key) and \ not sess._contains_state(item_state): sess._save_or_update_state(item_state) return item def remove(state, item, initiator): if item is None: return sess = state.session if sess: prop = state.manager.mapper._props[key] if sess._warn_on_events: sess._flush_warning( "collection remove" if prop.uselist else "related attribute delete") # expunge pending orphans item_state = attributes.instance_state(item) if prop._cascade.delete_orphan and \ item_state in sess._new and \ prop.mapper._is_orphan(item_state): sess.expunge(item) def set_(state, newvalue, oldvalue, initiator): # process "save_update" cascade rules for when an instance # is attached to another instance if oldvalue is newvalue: return newvalue sess = state.session if sess: if sess._warn_on_events: sess._flush_warning("related attribute set") prop = state.manager.mapper._props[key] if newvalue is not None: newvalue_state = attributes.instance_state(newvalue) if prop._cascade.save_update and \ (prop.cascade_backrefs or key == initiator.key) and \ not sess._contains_state(newvalue_state): sess._save_or_update_state(newvalue_state) if oldvalue is not None and \ oldvalue is not attributes.PASSIVE_NO_RESULT and \ prop._cascade.delete_orphan: # possible to reach here with attributes.NEVER_SET ? oldvalue_state = attributes.instance_state(oldvalue) if oldvalue_state in sess._new and \ prop.mapper._is_orphan(oldvalue_state): sess.expunge(oldvalue) return newvalue event.listen(descriptor, 'append', append, raw=True, retval=True) event.listen(descriptor, 'remove', remove, raw=True, retval=True) event.listen(descriptor, 'set', set_, raw=True, retval=True) class UOWTransaction(object): def __init__(self, session): self.session = session # dictionary used by external actors to # store arbitrary state information. self.attributes = {} # dictionary of mappers to sets of # DependencyProcessors, which are also # set to be part of the sorted flush actions, # which have that mapper as a parent. self.deps = util.defaultdict(set) # dictionary of mappers to sets of InstanceState # items pending for flush which have that mapper # as a parent. self.mappers = util.defaultdict(set) # a dictionary of Preprocess objects, which gather # additional states impacted by the flush # and determine if a flush action is needed self.presort_actions = {} # dictionary of PostSortRec objects, each # one issues work during the flush within # a certain ordering. self.postsort_actions = {} # a set of 2-tuples, each containing two # PostSortRec objects where the second # is dependent on the first being executed # first self.dependencies = set() # dictionary of InstanceState-> (isdelete, listonly) # tuples, indicating if this state is to be deleted # or insert/updated, or just refreshed self.states = {} # tracks InstanceStates which will be receiving # a "post update" call. Keys are mappers, # values are a set of states and a set of the # columns which should be included in the update. self.post_update_states = util.defaultdict(lambda: (set(), set())) @property def has_work(self): return bool(self.states) def is_deleted(self, state): """return true if the given state is marked as deleted within this uowtransaction.""" return state in self.states and self.states[state][0] def memo(self, key, callable_): if key in self.attributes: return self.attributes[key] else: self.attributes[key] = ret = callable_() return ret def remove_state_actions(self, state): """remove pending actions for a state from the uowtransaction.""" isdelete = self.states[state][0] self.states[state] = (isdelete, True) def get_attribute_history(self, state, key, passive=attributes.PASSIVE_NO_INITIALIZE): """facade to attributes.get_state_history(), including caching of results.""" hashkey = ("history", state, key) # cache the objects, not the states; the strong reference here # prevents newly loaded objects from being dereferenced during the # flush process if hashkey in self.attributes: history, state_history, cached_passive = self.attributes[hashkey] # if the cached lookup was "passive" and now # we want non-passive, do a non-passive lookup and re-cache if not cached_passive & attributes.SQL_OK \ and passive & attributes.SQL_OK: impl = state.manager[key].impl history = impl.get_history(state, state.dict, attributes.PASSIVE_OFF | attributes.LOAD_AGAINST_COMMITTED) if history and impl.uses_objects: state_history = history.as_state() else: state_history = history self.attributes[hashkey] = (history, state_history, passive) else: impl = state.manager[key].impl # TODO: store the history as (state, object) tuples # so we don't have to keep converting here history = impl.get_history(state, state.dict, passive | attributes.LOAD_AGAINST_COMMITTED) if history and impl.uses_objects: state_history = history.as_state() else: state_history = history self.attributes[hashkey] = (history, state_history, passive) return state_history def has_dep(self, processor): return (processor, True) in self.presort_actions def register_preprocessor(self, processor, fromparent): key = (processor, fromparent) if key not in self.presort_actions: self.presort_actions[key] = Preprocess(processor, fromparent) def register_object(self, state, isdelete=False, listonly=False, cancel_delete=False, operation=None, prop=None): if not self.session._contains_state(state): if not state.deleted and operation is not None: util.warn("Object of type %s not in session, %s operation " "along '%s' will not proceed" % (orm_util.state_class_str(state), operation, prop)) return False if state not in self.states: mapper = state.manager.mapper if mapper not in self.mappers: self._per_mapper_flush_actions(mapper) self.mappers[mapper].add(state) self.states[state] = (isdelete, listonly) else: if not listonly and (isdelete or cancel_delete): self.states[state] = (isdelete, False) return True def issue_post_update(self, state, post_update_cols): mapper = state.manager.mapper.base_mapper states, cols = self.post_update_states[mapper] states.add(state) cols.update(post_update_cols) def _per_mapper_flush_actions(self, mapper): saves = SaveUpdateAll(self, mapper.base_mapper) deletes = DeleteAll(self, mapper.base_mapper) self.dependencies.add((saves, deletes)) for dep in mapper._dependency_processors: dep.per_property_preprocessors(self) for prop in mapper.relationships: if prop.viewonly: continue dep = prop._dependency_processor dep.per_property_preprocessors(self) @util.memoized_property def _mapper_for_dep(self): """return a dynamic mapping of (Mapper, DependencyProcessor) to True or False, indicating if the DependencyProcessor operates on objects of that Mapper. The result is stored in the dictionary persistently once calculated. """ return util.PopulateDict( lambda tup: tup[0]._props.get(tup[1].key) is tup[1].prop ) def filter_states_for_dep(self, dep, states): """Filter the given list of InstanceStates to those relevant to the given DependencyProcessor. """ mapper_for_dep = self._mapper_for_dep return [s for s in states if mapper_for_dep[(s.manager.mapper, dep)]] def states_for_mapper_hierarchy(self, mapper, isdelete, listonly): checktup = (isdelete, listonly) for mapper in mapper.base_mapper.self_and_descendants: for state in self.mappers[mapper]: if self.states[state] == checktup: yield state def _generate_actions(self): """Generate the full, unsorted collection of PostSortRecs as well as dependency pairs for this UOWTransaction. """ # execute presort_actions, until all states # have been processed. a presort_action might # add new states to the uow. while True: ret = False for action in list(self.presort_actions.values()): if action.execute(self): ret = True if not ret: break # see if the graph of mapper dependencies has cycles. self.cycles = cycles = topological.find_cycles( self.dependencies, list(self.postsort_actions.values())) if cycles: # if yes, break the per-mapper actions into # per-state actions convert = dict( (rec, set(rec.per_state_flush_actions(self))) for rec in cycles ) # rewrite the existing dependencies to point to # the per-state actions for those per-mapper actions # that were broken up. for edge in list(self.dependencies): if None in edge or \ edge[0].disabled or edge[1].disabled or \ cycles.issuperset(edge): self.dependencies.remove(edge) elif edge[0] in cycles: self.dependencies.remove(edge) for dep in convert[edge[0]]: self.dependencies.add((dep, edge[1])) elif edge[1] in cycles: self.dependencies.remove(edge) for dep in convert[edge[1]]: self.dependencies.add((edge[0], dep)) return set([a for a in self.postsort_actions.values() if not a.disabled ] ).difference(cycles) def execute(self): postsort_actions = self._generate_actions() #sort = topological.sort(self.dependencies, postsort_actions) #print "--------------" #print "\ndependencies:", self.dependencies #print "\ncycles:", self.cycles #print "\nsort:", list(sort) #print "\nCOUNT OF POSTSORT ACTIONS", len(postsort_actions) # execute if self.cycles: for set_ in topological.sort_as_subsets( self.dependencies, postsort_actions): while set_: n = set_.pop() n.execute_aggregate(self, set_) else: for rec in topological.sort( self.dependencies, postsort_actions): rec.execute(self) def finalize_flush_changes(self): """mark processed objects as clean / deleted after a successful flush(). this method is called within the flush() method after the execute() method has succeeded and the transaction has been committed. """ states = set(self.states) isdel = set( s for (s, (isdelete, listonly)) in self.states.items() if isdelete ) other = states.difference(isdel) self.session._remove_newly_deleted(isdel) self.session._register_newly_persistent(other) class IterateMappersMixin(object): def _mappers(self, uow): if self.fromparent: return iter( m for m in self.dependency_processor.parent.self_and_descendants if uow._mapper_for_dep[(m, self.dependency_processor)] ) else: return self.dependency_processor.mapper.self_and_descendants class Preprocess(IterateMappersMixin): def __init__(self, dependency_processor, fromparent): self.dependency_processor = dependency_processor self.fromparent = fromparent self.processed = set() self.setup_flush_actions = False def execute(self, uow): delete_states = set() save_states = set() for mapper in self._mappers(uow): for state in uow.mappers[mapper].difference(self.processed): (isdelete, listonly) = uow.states[state] if not listonly: if isdelete: delete_states.add(state) else: save_states.add(state) if delete_states: self.dependency_processor.presort_deletes(uow, delete_states) self.processed.update(delete_states) if save_states: self.dependency_processor.presort_saves(uow, save_states) self.processed.update(save_states) if (delete_states or save_states): if not self.setup_flush_actions and ( self.dependency_processor.\ prop_has_changes(uow, delete_states, True) or self.dependency_processor.\ prop_has_changes(uow, save_states, False) ): self.dependency_processor.per_property_flush_actions(uow) self.setup_flush_actions = True return True else: return False class PostSortRec(object): disabled = False def __new__(cls, uow, *args): key = (cls, ) + args if key in uow.postsort_actions: return uow.postsort_actions[key] else: uow.postsort_actions[key] = \ ret = \ object.__new__(cls) return ret def execute_aggregate(self, uow, recs): self.execute(uow) def __repr__(self): return "%s(%s)" % ( self.__class__.__name__, ",".join(str(x) for x in self.__dict__.values()) ) class ProcessAll(IterateMappersMixin, PostSortRec): def __init__(self, uow, dependency_processor, delete, fromparent): self.dependency_processor = dependency_processor self.delete = delete self.fromparent = fromparent uow.deps[dependency_processor.parent.base_mapper].\ add(dependency_processor) def execute(self, uow): states = self._elements(uow) if self.delete: self.dependency_processor.process_deletes(uow, states) else: self.dependency_processor.process_saves(uow, states) def per_state_flush_actions(self, uow): # this is handled by SaveUpdateAll and DeleteAll, # since a ProcessAll should unconditionally be pulled # into per-state if either the parent/child mappers # are part of a cycle return iter([]) def __repr__(self): return "%s(%s, delete=%s)" % ( self.__class__.__name__, self.dependency_processor, self.delete ) def _elements(self, uow): for mapper in self._mappers(uow): for state in uow.mappers[mapper]: (isdelete, listonly) = uow.states[state] if isdelete == self.delete and not listonly: yield state class IssuePostUpdate(PostSortRec): def __init__(self, uow, mapper, isdelete): self.mapper = mapper self.isdelete = isdelete def execute(self, uow): states, cols = uow.post_update_states[self.mapper] states = [s for s in states if uow.states[s][0] == self.isdelete] persistence.post_update(self.mapper, states, uow, cols) class SaveUpdateAll(PostSortRec): def __init__(self, uow, mapper): self.mapper = mapper assert mapper is mapper.base_mapper def execute(self, uow): persistence.save_obj(self.mapper, uow.states_for_mapper_hierarchy(self.mapper, False, False), uow ) def per_state_flush_actions(self, uow): states = list(uow.states_for_mapper_hierarchy( self.mapper, False, False)) base_mapper = self.mapper.base_mapper delete_all = DeleteAll(uow, base_mapper) for state in states: # keep saves before deletes - # this ensures 'row switch' operations work action = SaveUpdateState(uow, state, base_mapper) uow.dependencies.add((action, delete_all)) yield action for dep in uow.deps[self.mapper]: states_for_prop = uow.filter_states_for_dep(dep, states) dep.per_state_flush_actions(uow, states_for_prop, False) class DeleteAll(PostSortRec): def __init__(self, uow, mapper): self.mapper = mapper assert mapper is mapper.base_mapper def execute(self, uow): persistence.delete_obj(self.mapper, uow.states_for_mapper_hierarchy(self.mapper, True, False), uow ) def per_state_flush_actions(self, uow): states = list(uow.states_for_mapper_hierarchy( self.mapper, True, False)) base_mapper = self.mapper.base_mapper save_all = SaveUpdateAll(uow, base_mapper) for state in states: # keep saves before deletes - # this ensures 'row switch' operations work action = DeleteState(uow, state, base_mapper) uow.dependencies.add((save_all, action)) yield action for dep in uow.deps[self.mapper]: states_for_prop = uow.filter_states_for_dep(dep, states) dep.per_state_flush_actions(uow, states_for_prop, True) class ProcessState(PostSortRec): def __init__(self, uow, dependency_processor, delete, state): self.dependency_processor = dependency_processor self.delete = delete self.state = state def execute_aggregate(self, uow, recs): cls_ = self.__class__ dependency_processor = self.dependency_processor delete = self.delete our_recs = [r for r in recs if r.__class__ is cls_ and r.dependency_processor is dependency_processor and r.delete is delete] recs.difference_update(our_recs) states = [self.state] + [r.state for r in our_recs] if delete: dependency_processor.process_deletes(uow, states) else: dependency_processor.process_saves(uow, states) def __repr__(self): return "%s(%s, %s, delete=%s)" % ( self.__class__.__name__, self.dependency_processor, orm_util.state_str(self.state), self.delete ) class SaveUpdateState(PostSortRec): def __init__(self, uow, state, mapper): self.state = state self.mapper = mapper def execute_aggregate(self, uow, recs): cls_ = self.__class__ mapper = self.mapper our_recs = [r for r in recs if r.__class__ is cls_ and r.mapper is mapper] recs.difference_update(our_recs) persistence.save_obj(mapper, [self.state] + [r.state for r in our_recs], uow) def __repr__(self): return "%s(%s)" % ( self.__class__.__name__, orm_util.state_str(self.state) ) class DeleteState(PostSortRec): def __init__(self, uow, state, mapper): self.state = state self.mapper = mapper def execute_aggregate(self, uow, recs): cls_ = self.__class__ mapper = self.mapper our_recs = [r for r in recs if r.__class__ is cls_ and r.mapper is mapper] recs.difference_update(our_recs) states = [self.state] + [r.state for r in our_recs] persistence.delete_obj(mapper, [s for s in states if uow.states[s][0]], uow) def __repr__(self): return "%s(%s)" % ( self.__class__.__name__, orm_util.state_str(self.state) )
gpl-3.0
dav1x/ansible
test/units/inventory/test_inventory.py
44
5937
# Copyright 2015 Abhijit Menon-Sen <ams@2ndQuadrant.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import string from ansible.compat.tests import unittest from ansible.compat.tests.mock import patch from ansible.inventory import Inventory from ansible.inventory.expand_hosts import expand_hostname_range from ansible.vars import VariableManager from units.mock.loader import DictDataLoader class TestInventory(unittest.TestCase): patterns = { 'a': ['a'], 'a, b': ['a', 'b'], 'a , b': ['a', 'b'], ' a,b ,c[1:2] ': ['a', 'b', 'c[1:2]'], '9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9'], '9a01:7f8:191:7701::9,9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9'], '9a01:7f8:191:7701::9,9a01:7f8:191:7701::9,foo': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9','foo'], 'foo[1:2]': ['foo[1:2]'], 'a::b': ['a::b'], 'a:b': ['a', 'b'], ' a : b ': ['a', 'b'], 'foo:bar:baz[1:2]': ['foo', 'bar', 'baz[1:2]'], } pattern_lists = [ [['a'], ['a']], [['a', 'b'], ['a', 'b']], [['a, b'], ['a', 'b']], [['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9,foo'], ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9','foo']] ] # pattern_string: [ ('base_pattern', (a,b)), ['x','y','z'] ] # a,b are the bounds of the subscript; x..z are the results of the subscript # when applied to string.ascii_letters. subscripts = { 'a': [('a',None), list(string.ascii_letters)], 'a[0]': [('a', (0, None)), ['a']], 'a[1]': [('a', (1, None)), ['b']], 'a[2:3]': [('a', (2, 3)), ['c', 'd']], 'a[-1]': [('a', (-1, None)), ['Z']], 'a[-2]': [('a', (-2, None)), ['Y']], 'a[48:]': [('a', (48, -1)), ['W', 'X', 'Y', 'Z']], 'a[49:]': [('a', (49, -1)), ['X', 'Y', 'Z']], 'a[1:]': [('a', (1, -1)), list(string.ascii_letters[1:])], } ranges_to_expand = { 'a[1:2]': ['a1', 'a2'], 'a[1:10:2]': ['a1', 'a3', 'a5', 'a7', 'a9'], 'a[a:b]': ['aa', 'ab'], 'a[a:i:3]': ['aa', 'ad', 'ag'], 'a[a:b][c:d]': ['aac', 'aad', 'abc', 'abd'], 'a[0:1][2:3]': ['a02', 'a03', 'a12', 'a13'], 'a[a:b][2:3]': ['aa2', 'aa3', 'ab2', 'ab3'], } def setUp(self): v = VariableManager() fake_loader = DictDataLoader({}) self.i = Inventory(loader=fake_loader, variable_manager=v, host_list='') def test_split_patterns(self): for p in self.patterns: r = self.patterns[p] self.assertEqual(r, self.i.split_host_pattern(p)) for p, r in self.pattern_lists: self.assertEqual(r, self.i.split_host_pattern(p)) def test_ranges(self): for s in self.subscripts: r = self.subscripts[s] self.assertEqual(r[0], self.i._split_subscript(s)) self.assertEqual( r[1], self.i._apply_subscript( list(string.ascii_letters), r[0][1] ) ) def test_expand_hostname_range(self): for e in self.ranges_to_expand: r = self.ranges_to_expand[e] self.assertEqual(r, expand_hostname_range(e)) class InventoryDefaultGroup(unittest.TestCase): def test_empty_inventory(self): inventory = self._get_inventory('') self.assertIn('all', inventory.groups) self.assertIn('ungrouped', inventory.groups) self.assertFalse(inventory.groups['all'].get_hosts()) self.assertFalse(inventory.groups['ungrouped'].get_hosts()) def test_ini(self): self._test_default_groups(""" host1 host2 host3 [servers] host3 host4 host5 """) def test_ini_explicit_ungrouped(self): self._test_default_groups(""" [ungrouped] host1 host2 host3 [servers] host3 host4 host5 """) def _get_inventory(self, inventory_content): v = VariableManager() fake_loader = DictDataLoader({ 'hosts': inventory_content }) with patch.object(Inventory, 'basedir') as mock_basedir: mock_basedir.return_value = './' return Inventory(loader=fake_loader, variable_manager=v, host_list='hosts') def _test_default_groups(self, inventory_content): inventory = self._get_inventory(inventory_content) self.assertIn('all', inventory.groups) self.assertIn('ungrouped', inventory.groups) all_hosts = set(host.name for host in inventory.groups['all'].get_hosts()) self.assertEqual(set(['host1', 'host2', 'host3', 'host4', 'host5']), all_hosts) ungrouped_hosts = set(host.name for host in inventory.groups['ungrouped'].get_hosts()) self.assertEqual(set(['host1', 'host2', 'host3']), ungrouped_hosts) servers_hosts = set(host.name for host in inventory.groups['servers'].get_hosts()) self.assertEqual(set(['host3', 'host4', 'host5']), servers_hosts)
gpl-3.0
mancoast/CPythonPyc_test
fail/335_test_memoryio.py
2
28747
"""Unit tests for memory-based file-like objects. StringIO -- for unicode strings BytesIO -- for bytes """ import unittest from test import support import io import _pyio as pyio import pickle class MemorySeekTestMixin: def testInit(self): buf = self.buftype("1234567890") bytesIo = self.ioclass(buf) def testRead(self): buf = self.buftype("1234567890") bytesIo = self.ioclass(buf) self.assertEqual(buf[:1], bytesIo.read(1)) self.assertEqual(buf[1:5], bytesIo.read(4)) self.assertEqual(buf[5:], bytesIo.read(900)) self.assertEqual(self.EOF, bytesIo.read()) def testReadNoArgs(self): buf = self.buftype("1234567890") bytesIo = self.ioclass(buf) self.assertEqual(buf, bytesIo.read()) self.assertEqual(self.EOF, bytesIo.read()) def testSeek(self): buf = self.buftype("1234567890") bytesIo = self.ioclass(buf) bytesIo.read(5) bytesIo.seek(0) self.assertEqual(buf, bytesIo.read()) bytesIo.seek(3) self.assertEqual(buf[3:], bytesIo.read()) self.assertRaises(TypeError, bytesIo.seek, 0.0) def testTell(self): buf = self.buftype("1234567890") bytesIo = self.ioclass(buf) self.assertEqual(0, bytesIo.tell()) bytesIo.seek(5) self.assertEqual(5, bytesIo.tell()) bytesIo.seek(10000) self.assertEqual(10000, bytesIo.tell()) class MemoryTestMixin: def test_detach(self): buf = self.ioclass() self.assertRaises(self.UnsupportedOperation, buf.detach) def write_ops(self, f, t): self.assertEqual(f.write(t("blah.")), 5) self.assertEqual(f.seek(0), 0) self.assertEqual(f.write(t("Hello.")), 6) self.assertEqual(f.tell(), 6) self.assertEqual(f.seek(5), 5) self.assertEqual(f.tell(), 5) self.assertEqual(f.write(t(" world\n\n\n")), 9) self.assertEqual(f.seek(0), 0) self.assertEqual(f.write(t("h")), 1) self.assertEqual(f.truncate(12), 12) self.assertEqual(f.tell(), 1) def test_write(self): buf = self.buftype("hello world\n") memio = self.ioclass(buf) self.write_ops(memio, self.buftype) self.assertEqual(memio.getvalue(), buf) memio = self.ioclass() self.write_ops(memio, self.buftype) self.assertEqual(memio.getvalue(), buf) self.assertRaises(TypeError, memio.write, None) memio.close() self.assertRaises(ValueError, memio.write, self.buftype("")) def test_writelines(self): buf = self.buftype("1234567890") memio = self.ioclass() self.assertEqual(memio.writelines([buf] * 100), None) self.assertEqual(memio.getvalue(), buf * 100) memio.writelines([]) self.assertEqual(memio.getvalue(), buf * 100) memio = self.ioclass() self.assertRaises(TypeError, memio.writelines, [buf] + [1]) self.assertEqual(memio.getvalue(), buf) self.assertRaises(TypeError, memio.writelines, None) memio.close() self.assertRaises(ValueError, memio.writelines, []) def test_writelines_error(self): memio = self.ioclass() def error_gen(): yield self.buftype('spam') raise KeyboardInterrupt self.assertRaises(KeyboardInterrupt, memio.writelines, error_gen()) def test_truncate(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) self.assertRaises(ValueError, memio.truncate, -1) memio.seek(6) self.assertEqual(memio.truncate(), 6) self.assertEqual(memio.getvalue(), buf[:6]) self.assertEqual(memio.truncate(4), 4) self.assertEqual(memio.getvalue(), buf[:4]) self.assertEqual(memio.tell(), 6) memio.seek(0, 2) memio.write(buf) self.assertEqual(memio.getvalue(), buf[:4] + buf) pos = memio.tell() self.assertEqual(memio.truncate(None), pos) self.assertEqual(memio.tell(), pos) self.assertRaises(TypeError, memio.truncate, '0') memio.close() self.assertRaises(ValueError, memio.truncate, 0) def test_init(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) self.assertEqual(memio.getvalue(), buf) memio = self.ioclass(None) self.assertEqual(memio.getvalue(), self.EOF) memio.__init__(buf * 2) self.assertEqual(memio.getvalue(), buf * 2) memio.__init__(buf) self.assertEqual(memio.getvalue(), buf) self.assertRaises(TypeError, memio.__init__, []) def test_read(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) self.assertEqual(memio.read(0), self.EOF) self.assertEqual(memio.read(1), buf[:1]) self.assertEqual(memio.read(4), buf[1:5]) self.assertEqual(memio.read(900), buf[5:]) self.assertEqual(memio.read(), self.EOF) memio.seek(0) self.assertEqual(memio.read(), buf) self.assertEqual(memio.read(), self.EOF) self.assertEqual(memio.tell(), 10) memio.seek(0) self.assertEqual(memio.read(-1), buf) memio.seek(0) self.assertEqual(type(memio.read()), type(buf)) memio.seek(100) self.assertEqual(type(memio.read()), type(buf)) memio.seek(0) self.assertEqual(memio.read(None), buf) self.assertRaises(TypeError, memio.read, '') memio.close() self.assertRaises(ValueError, memio.read) def test_readline(self): buf = self.buftype("1234567890\n") memio = self.ioclass(buf * 2) self.assertEqual(memio.readline(0), self.EOF) self.assertEqual(memio.readline(), buf) self.assertEqual(memio.readline(), buf) self.assertEqual(memio.readline(), self.EOF) memio.seek(0) self.assertEqual(memio.readline(5), buf[:5]) self.assertEqual(memio.readline(5), buf[5:10]) self.assertEqual(memio.readline(5), buf[10:15]) memio.seek(0) self.assertEqual(memio.readline(-1), buf) memio.seek(0) self.assertEqual(memio.readline(0), self.EOF) buf = self.buftype("1234567890\n") memio = self.ioclass((buf * 3)[:-1]) self.assertEqual(memio.readline(), buf) self.assertEqual(memio.readline(), buf) self.assertEqual(memio.readline(), buf[:-1]) self.assertEqual(memio.readline(), self.EOF) memio.seek(0) self.assertEqual(type(memio.readline()), type(buf)) self.assertEqual(memio.readline(), buf) self.assertRaises(TypeError, memio.readline, '') memio.close() self.assertRaises(ValueError, memio.readline) def test_readlines(self): buf = self.buftype("1234567890\n") memio = self.ioclass(buf * 10) self.assertEqual(memio.readlines(), [buf] * 10) memio.seek(5) self.assertEqual(memio.readlines(), [buf[5:]] + [buf] * 9) memio.seek(0) self.assertEqual(memio.readlines(15), [buf] * 2) memio.seek(0) self.assertEqual(memio.readlines(-1), [buf] * 10) memio.seek(0) self.assertEqual(memio.readlines(0), [buf] * 10) memio.seek(0) self.assertEqual(type(memio.readlines()[0]), type(buf)) memio.seek(0) self.assertEqual(memio.readlines(None), [buf] * 10) self.assertRaises(TypeError, memio.readlines, '') memio.close() self.assertRaises(ValueError, memio.readlines) def test_iterator(self): buf = self.buftype("1234567890\n") memio = self.ioclass(buf * 10) self.assertEqual(iter(memio), memio) self.assertTrue(hasattr(memio, '__iter__')) self.assertTrue(hasattr(memio, '__next__')) i = 0 for line in memio: self.assertEqual(line, buf) i += 1 self.assertEqual(i, 10) memio.seek(0) i = 0 for line in memio: self.assertEqual(line, buf) i += 1 self.assertEqual(i, 10) memio = self.ioclass(buf * 2) memio.close() self.assertRaises(ValueError, memio.__next__) def test_getvalue(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) self.assertEqual(memio.getvalue(), buf) memio.read() self.assertEqual(memio.getvalue(), buf) self.assertEqual(type(memio.getvalue()), type(buf)) memio = self.ioclass(buf * 1000) self.assertEqual(memio.getvalue()[-3:], self.buftype("890")) memio = self.ioclass(buf) memio.close() self.assertRaises(ValueError, memio.getvalue) def test_seek(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) memio.read(5) self.assertRaises(ValueError, memio.seek, -1) self.assertRaises(ValueError, memio.seek, 1, -1) self.assertRaises(ValueError, memio.seek, 1, 3) self.assertEqual(memio.seek(0), 0) self.assertEqual(memio.seek(0, 0), 0) self.assertEqual(memio.read(), buf) self.assertEqual(memio.seek(3), 3) self.assertEqual(memio.seek(0, 1), 3) self.assertEqual(memio.read(), buf[3:]) self.assertEqual(memio.seek(len(buf)), len(buf)) self.assertEqual(memio.read(), self.EOF) memio.seek(len(buf) + 1) self.assertEqual(memio.read(), self.EOF) self.assertEqual(memio.seek(0, 2), len(buf)) self.assertEqual(memio.read(), self.EOF) memio.close() self.assertRaises(ValueError, memio.seek, 0) def test_overseek(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) self.assertEqual(memio.seek(len(buf) + 1), 11) self.assertEqual(memio.read(), self.EOF) self.assertEqual(memio.tell(), 11) self.assertEqual(memio.getvalue(), buf) memio.write(self.EOF) self.assertEqual(memio.getvalue(), buf) memio.write(buf) self.assertEqual(memio.getvalue(), buf + self.buftype('\0') + buf) def test_tell(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) self.assertEqual(memio.tell(), 0) memio.seek(5) self.assertEqual(memio.tell(), 5) memio.seek(10000) self.assertEqual(memio.tell(), 10000) memio.close() self.assertRaises(ValueError, memio.tell) def test_flush(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) self.assertEqual(memio.flush(), None) def test_flags(self): memio = self.ioclass() self.assertEqual(memio.writable(), True) self.assertEqual(memio.readable(), True) self.assertEqual(memio.seekable(), True) self.assertEqual(memio.isatty(), False) self.assertEqual(memio.closed, False) memio.close() self.assertRaises(ValueError, memio.writable) self.assertRaises(ValueError, memio.readable) self.assertRaises(ValueError, memio.seekable) self.assertRaises(ValueError, memio.isatty) self.assertEqual(memio.closed, True) def test_subclassing(self): buf = self.buftype("1234567890") def test1(): class MemIO(self.ioclass): pass m = MemIO(buf) return m.getvalue() def test2(): class MemIO(self.ioclass): def __init__(me, a, b): self.ioclass.__init__(me, a) m = MemIO(buf, None) return m.getvalue() self.assertEqual(test1(), buf) self.assertEqual(test2(), buf) def test_instance_dict_leak(self): # Test case for issue #6242. # This will be caught by regrtest.py -R if this leak. for _ in range(100): memio = self.ioclass() memio.foo = 1 def test_pickling(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) memio.foo = 42 memio.seek(2) class PickleTestMemIO(self.ioclass): def __init__(me, initvalue, foo): self.ioclass.__init__(me, initvalue) me.foo = foo # __getnewargs__ is undefined on purpose. This checks that PEP 307 # is used to provide pickling support. # Pickle expects the class to be on the module level. Here we use a # little hack to allow the PickleTestMemIO class to derive from # self.ioclass without having to define all combinations explictly on # the module-level. import __main__ PickleTestMemIO.__module__ = '__main__' __main__.PickleTestMemIO = PickleTestMemIO submemio = PickleTestMemIO(buf, 80) submemio.seek(2) # We only support pickle protocol 2 and onward since we use extended # __reduce__ API of PEP 307 to provide pickling support. for proto in range(2, pickle.HIGHEST_PROTOCOL): for obj in (memio, submemio): obj2 = pickle.loads(pickle.dumps(obj, protocol=proto)) self.assertEqual(obj.getvalue(), obj2.getvalue()) self.assertEqual(obj.__class__, obj2.__class__) self.assertEqual(obj.foo, obj2.foo) self.assertEqual(obj.tell(), obj2.tell()) obj2.close() self.assertRaises(ValueError, pickle.dumps, obj2, proto) del __main__.PickleTestMemIO class BytesIOMixin: def test_getbuffer(self): memio = self.ioclass(b"1234567890") buf = memio.getbuffer() self.assertEqual(bytes(buf), b"1234567890") memio.seek(5) buf = memio.getbuffer() self.assertEqual(bytes(buf), b"1234567890") # Trying to change the size of the BytesIO while a buffer is exported # raises a BufferError. self.assertRaises(BufferError, memio.write, b'x' * 100) self.assertRaises(BufferError, memio.truncate) # Mutating the buffer updates the BytesIO buf[3:6] = b"abc" self.assertEqual(bytes(buf), b"123abc7890") self.assertEqual(memio.getvalue(), b"123abc7890") # After the buffer gets released, we can resize the BytesIO again del buf support.gc_collect() memio.truncate() class PyBytesIOTest(MemoryTestMixin, MemorySeekTestMixin, BytesIOMixin, unittest.TestCase): UnsupportedOperation = pyio.UnsupportedOperation @staticmethod def buftype(s): return s.encode("ascii") ioclass = pyio.BytesIO EOF = b"" def test_read1(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) self.assertRaises(TypeError, memio.read1) self.assertEqual(memio.read(), buf) def test_readinto(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) b = bytearray(b"hello") self.assertEqual(memio.readinto(b), 5) self.assertEqual(b, b"12345") self.assertEqual(memio.readinto(b), 5) self.assertEqual(b, b"67890") self.assertEqual(memio.readinto(b), 0) self.assertEqual(b, b"67890") b = bytearray(b"hello world") memio.seek(0) self.assertEqual(memio.readinto(b), 10) self.assertEqual(b, b"1234567890d") b = bytearray(b"") memio.seek(0) self.assertEqual(memio.readinto(b), 0) self.assertEqual(b, b"") self.assertRaises(TypeError, memio.readinto, '') import array a = array.array('b', b"hello world") memio = self.ioclass(buf) memio.readinto(a) self.assertEqual(a.tobytes(), b"1234567890d") memio.close() self.assertRaises(ValueError, memio.readinto, b) memio = self.ioclass(b"123") b = bytearray() memio.seek(42) memio.readinto(b) self.assertEqual(b, b"") def test_relative_seek(self): buf = self.buftype("1234567890") memio = self.ioclass(buf) self.assertEqual(memio.seek(-1, 1), 0) self.assertEqual(memio.seek(3, 1), 3) self.assertEqual(memio.seek(-4, 1), 0) self.assertEqual(memio.seek(-1, 2), 9) self.assertEqual(memio.seek(1, 1), 10) self.assertEqual(memio.seek(1, 2), 11) memio.seek(-3, 2) self.assertEqual(memio.read(), buf[-3:]) memio.seek(0) memio.seek(1, 1) self.assertEqual(memio.read(), buf[1:]) def test_unicode(self): memio = self.ioclass() self.assertRaises(TypeError, self.ioclass, "1234567890") self.assertRaises(TypeError, memio.write, "1234567890") self.assertRaises(TypeError, memio.writelines, ["1234567890"]) def test_bytes_array(self): buf = b"1234567890" import array a = array.array('b', list(buf)) memio = self.ioclass(a) self.assertEqual(memio.getvalue(), buf) self.assertEqual(memio.write(a), 10) self.assertEqual(memio.getvalue(), buf) def test_issue5449(self): buf = self.buftype("1234567890") self.ioclass(initial_bytes=buf) self.assertRaises(TypeError, self.ioclass, buf, foo=None) class TextIOTestMixin: def test_newlines_property(self): memio = self.ioclass(newline=None) # The C StringIO decodes newlines in write() calls, but the Python # implementation only does when reading. This function forces them to # be decoded for testing. def force_decode(): memio.seek(0) memio.read() self.assertEqual(memio.newlines, None) memio.write("a\n") force_decode() self.assertEqual(memio.newlines, "\n") memio.write("b\r\n") force_decode() self.assertEqual(memio.newlines, ("\n", "\r\n")) memio.write("c\rd") force_decode() self.assertEqual(memio.newlines, ("\r", "\n", "\r\n")) def test_relative_seek(self): memio = self.ioclass() self.assertRaises(IOError, memio.seek, -1, 1) self.assertRaises(IOError, memio.seek, 3, 1) self.assertRaises(IOError, memio.seek, -3, 1) self.assertRaises(IOError, memio.seek, -1, 2) self.assertRaises(IOError, memio.seek, 1, 1) self.assertRaises(IOError, memio.seek, 1, 2) def test_textio_properties(self): memio = self.ioclass() # These are just dummy values but we nevertheless check them for fear # of unexpected breakage. self.assertIsNone(memio.encoding) self.assertIsNone(memio.errors) self.assertFalse(memio.line_buffering) def test_newline_default(self): memio = self.ioclass("a\nb\r\nc\rd") self.assertEqual(list(memio), ["a\n", "b\r\n", "c\rd"]) self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd") memio = self.ioclass() self.assertEqual(memio.write("a\nb\r\nc\rd"), 8) memio.seek(0) self.assertEqual(list(memio), ["a\n", "b\r\n", "c\rd"]) self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd") def test_newline_none(self): # newline=None memio = self.ioclass("a\nb\r\nc\rd", newline=None) self.assertEqual(list(memio), ["a\n", "b\n", "c\n", "d"]) memio.seek(0) self.assertEqual(memio.read(1), "a") self.assertEqual(memio.read(2), "\nb") self.assertEqual(memio.read(2), "\nc") self.assertEqual(memio.read(1), "\n") self.assertEqual(memio.getvalue(), "a\nb\nc\nd") memio = self.ioclass(newline=None) self.assertEqual(2, memio.write("a\n")) self.assertEqual(3, memio.write("b\r\n")) self.assertEqual(3, memio.write("c\rd")) memio.seek(0) self.assertEqual(memio.read(), "a\nb\nc\nd") self.assertEqual(memio.getvalue(), "a\nb\nc\nd") memio = self.ioclass("a\r\nb", newline=None) self.assertEqual(memio.read(3), "a\nb") def test_newline_empty(self): # newline="" memio = self.ioclass("a\nb\r\nc\rd", newline="") self.assertEqual(list(memio), ["a\n", "b\r\n", "c\r", "d"]) memio.seek(0) self.assertEqual(memio.read(4), "a\nb\r") self.assertEqual(memio.read(2), "\nc") self.assertEqual(memio.read(1), "\r") self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd") memio = self.ioclass(newline="") self.assertEqual(2, memio.write("a\n")) self.assertEqual(2, memio.write("b\r")) self.assertEqual(2, memio.write("\nc")) self.assertEqual(2, memio.write("\rd")) memio.seek(0) self.assertEqual(list(memio), ["a\n", "b\r\n", "c\r", "d"]) self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd") def test_newline_lf(self): # newline="\n" memio = self.ioclass("a\nb\r\nc\rd", newline="\n") self.assertEqual(list(memio), ["a\n", "b\r\n", "c\rd"]) self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd") memio = self.ioclass(newline="\n") self.assertEqual(memio.write("a\nb\r\nc\rd"), 8) memio.seek(0) self.assertEqual(list(memio), ["a\n", "b\r\n", "c\rd"]) self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd") def test_newline_cr(self): # newline="\r" memio = self.ioclass("a\nb\r\nc\rd", newline="\r") self.assertEqual(memio.read(), "a\rb\r\rc\rd") memio.seek(0) self.assertEqual(list(memio), ["a\r", "b\r", "\r", "c\r", "d"]) self.assertEqual(memio.getvalue(), "a\rb\r\rc\rd") memio = self.ioclass(newline="\r") self.assertEqual(memio.write("a\nb\r\nc\rd"), 8) memio.seek(0) self.assertEqual(list(memio), ["a\r", "b\r", "\r", "c\r", "d"]) memio.seek(0) self.assertEqual(memio.readlines(), ["a\r", "b\r", "\r", "c\r", "d"]) self.assertEqual(memio.getvalue(), "a\rb\r\rc\rd") def test_newline_crlf(self): # newline="\r\n" memio = self.ioclass("a\nb\r\nc\rd", newline="\r\n") self.assertEqual(memio.read(), "a\r\nb\r\r\nc\rd") memio.seek(0) self.assertEqual(list(memio), ["a\r\n", "b\r\r\n", "c\rd"]) memio.seek(0) self.assertEqual(memio.readlines(), ["a\r\n", "b\r\r\n", "c\rd"]) self.assertEqual(memio.getvalue(), "a\r\nb\r\r\nc\rd") memio = self.ioclass(newline="\r\n") self.assertEqual(memio.write("a\nb\r\nc\rd"), 8) memio.seek(0) self.assertEqual(list(memio), ["a\r\n", "b\r\r\n", "c\rd"]) self.assertEqual(memio.getvalue(), "a\r\nb\r\r\nc\rd") def test_issue5265(self): # StringIO can duplicate newlines in universal newlines mode memio = self.ioclass("a\r\nb\r\n", newline=None) self.assertEqual(memio.read(5), "a\nb\n") self.assertEqual(memio.getvalue(), "a\nb\n") def test_newline_argument(self): self.assertRaises(TypeError, self.ioclass, newline=b"\n") self.assertRaises(ValueError, self.ioclass, newline="error") # These should not raise an error for newline in (None, "", "\n", "\r", "\r\n"): self.ioclass(newline=newline) class PyStringIOTest(MemoryTestMixin, MemorySeekTestMixin, TextIOTestMixin, unittest.TestCase): buftype = str ioclass = pyio.StringIO UnsupportedOperation = pyio.UnsupportedOperation EOF = "" def test_lone_surrogates(self): # Issue #20424 memio = self.ioclass('\ud800') self.assertEqual(memio.read(), '\ud800') memio = self.ioclass() memio.write('\ud800') self.assertEqual(memio.getvalue(), '\ud800') class PyStringIOPickleTest(TextIOTestMixin, unittest.TestCase): """Test if pickle restores properly the internal state of StringIO. """ buftype = str UnsupportedOperation = pyio.UnsupportedOperation EOF = "" class ioclass(pyio.StringIO): def __new__(cls, *args, **kwargs): return pickle.loads(pickle.dumps(pyio.StringIO(*args, **kwargs))) def __init__(self, *args, **kwargs): pass class CBytesIOTest(PyBytesIOTest): ioclass = io.BytesIO UnsupportedOperation = io.UnsupportedOperation def test_getstate(self): memio = self.ioclass() state = memio.__getstate__() self.assertEqual(len(state), 3) bytearray(state[0]) # Check if state[0] supports the buffer interface. self.assertIsInstance(state[1], int) self.assertTrue(isinstance(state[2], dict) or state[2] is None) memio.close() self.assertRaises(ValueError, memio.__getstate__) def test_setstate(self): # This checks whether __setstate__ does proper input validation. memio = self.ioclass() memio.__setstate__((b"no error", 0, None)) memio.__setstate__((bytearray(b"no error"), 0, None)) memio.__setstate__((b"no error", 0, {'spam': 3})) self.assertRaises(ValueError, memio.__setstate__, (b"", -1, None)) self.assertRaises(TypeError, memio.__setstate__, ("unicode", 0, None)) self.assertRaises(TypeError, memio.__setstate__, (b"", 0.0, None)) self.assertRaises(TypeError, memio.__setstate__, (b"", 0, 0)) self.assertRaises(TypeError, memio.__setstate__, (b"len-test", 0)) self.assertRaises(TypeError, memio.__setstate__) self.assertRaises(TypeError, memio.__setstate__, 0) memio.close() self.assertRaises(ValueError, memio.__setstate__, (b"closed", 0, None)) check_sizeof = support.check_sizeof @support.cpython_only def test_sizeof(self): basesize = support.calcobjsize('P2nN2Pn') check = self.check_sizeof self.assertEqual(object.__sizeof__(io.BytesIO()), basesize) check(io.BytesIO(), basesize ) check(io.BytesIO(b'a'), basesize + 1 + 1 ) check(io.BytesIO(b'a' * 1000), basesize + 1000 + 1 ) class CStringIOTest(PyStringIOTest): ioclass = io.StringIO UnsupportedOperation = io.UnsupportedOperation # XXX: For the Python version of io.StringIO, this is highly # dependent on the encoding used for the underlying buffer. def test_widechar(self): buf = self.buftype("\U0002030a\U00020347") memio = self.ioclass(buf) self.assertEqual(memio.getvalue(), buf) self.assertEqual(memio.write(buf), len(buf)) self.assertEqual(memio.tell(), len(buf)) self.assertEqual(memio.getvalue(), buf) self.assertEqual(memio.write(buf), len(buf)) self.assertEqual(memio.tell(), len(buf) * 2) self.assertEqual(memio.getvalue(), buf + buf) def test_getstate(self): memio = self.ioclass() state = memio.__getstate__() self.assertEqual(len(state), 4) self.assertIsInstance(state[0], str) self.assertIsInstance(state[1], str) self.assertIsInstance(state[2], int) self.assertTrue(isinstance(state[3], dict) or state[3] is None) memio.close() self.assertRaises(ValueError, memio.__getstate__) def test_setstate(self): # This checks whether __setstate__ does proper input validation. memio = self.ioclass() memio.__setstate__(("no error", "\n", 0, None)) memio.__setstate__(("no error", "", 0, {'spam': 3})) self.assertRaises(ValueError, memio.__setstate__, ("", "f", 0, None)) self.assertRaises(ValueError, memio.__setstate__, ("", "", -1, None)) self.assertRaises(TypeError, memio.__setstate__, (b"", "", 0, None)) self.assertRaises(TypeError, memio.__setstate__, ("", b"", 0, None)) self.assertRaises(TypeError, memio.__setstate__, ("", "", 0.0, None)) self.assertRaises(TypeError, memio.__setstate__, ("", "", 0, 0)) self.assertRaises(TypeError, memio.__setstate__, ("len-test", 0)) self.assertRaises(TypeError, memio.__setstate__) self.assertRaises(TypeError, memio.__setstate__, 0) memio.close() self.assertRaises(ValueError, memio.__setstate__, ("closed", "", 0, None)) class CStringIOPickleTest(PyStringIOPickleTest): UnsupportedOperation = io.UnsupportedOperation class ioclass(io.StringIO): def __new__(cls, *args, **kwargs): return pickle.loads(pickle.dumps(io.StringIO(*args, **kwargs))) def __init__(self, *args, **kwargs): pass def test_main(): tests = [PyBytesIOTest, PyStringIOTest, CBytesIOTest, CStringIOTest, PyStringIOPickleTest, CStringIOPickleTest] support.run_unittest(*tests) if __name__ == '__main__': test_main()
gpl-3.0
korbenzhang/vim-ycm-win
third_party/ycmd/ycmd/tests/request_wrap_test.py
33
4576
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2014 Google Inc. # # YouCompleteMe is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # YouCompleteMe is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>. from nose.tools import eq_ from ..request_wrap import RequestWrap def PrepareJson( contents = '', line_num = 1, column_num = 1, filetype = '' ): return { 'line_num': line_num, 'column_num': column_num, 'filepath': '/foo', 'file_data': { '/foo': { 'filetypes': [ filetype ], 'contents': contents } } } def LineValue_OneLine_test(): eq_( 'zoo', RequestWrap( PrepareJson( line_num = 1, contents = 'zoo' ) )[ 'line_value' ] ) def LineValue_LastLine_test(): eq_( 'zoo', RequestWrap( PrepareJson( line_num = 3, contents = 'goo\nbar\nzoo' ) )[ 'line_value' ] ) def LineValue_MiddleLine_test(): eq_( 'zoo', RequestWrap( PrepareJson( line_num = 2, contents = 'goo\nzoo\nbar' ) )[ 'line_value' ] ) def LineValue_WindowsLines_test(): eq_( 'zoo', RequestWrap( PrepareJson( line_num = 3, contents = 'goo\r\nbar\r\nzoo' ) )[ 'line_value' ] ) def LineValue_MixedFormatLines_test(): eq_( 'zoo', RequestWrap( PrepareJson( line_num = 3, contents = 'goo\nbar\r\nzoo' ) )[ 'line_value' ] ) def LineValue_EmptyContents_test(): eq_( '', RequestWrap( PrepareJson( line_num = 1, contents = '' ) )[ 'line_value' ] ) def StartColumn_RightAfterDot_test(): eq_( 5, RequestWrap( PrepareJson( column_num = 5, contents = 'foo.') )[ 'start_column' ] ) def StartColumn_Dot_test(): eq_( 5, RequestWrap( PrepareJson( column_num = 8, contents = 'foo.bar') )[ 'start_column' ] ) def StartColumn_DotWithUnicode_test(): eq_( 7, RequestWrap( PrepareJson( column_num = 11, contents = 'fäö.bär') )[ 'start_column' ] ) def StartColumn_Paren_test(): eq_( 5, RequestWrap( PrepareJson( column_num = 8, contents = 'foo(bar') )[ 'start_column' ] ) def StartColumn_AfterWholeWord_test(): eq_( 1, RequestWrap( PrepareJson( column_num = 7, contents = 'foobar') )[ 'start_column' ] ) def StartColumn_AfterWholeWord_Html_test(): eq_( 1, RequestWrap( PrepareJson( column_num = 7, filetype = 'html', contents = 'fo-bar') )[ 'start_column' ] ) def StartColumn_AfterWholeUnicodeWord_test(): eq_( 1, RequestWrap( PrepareJson( column_num = 6, contents = u'fäö') )[ 'start_column' ] ) def StartColumn_InMiddleOfWholeWord_test(): eq_( 1, RequestWrap( PrepareJson( column_num = 4, contents = 'foobar') )[ 'start_column' ] ) def StartColumn_ColumnOne_test(): eq_( 1, RequestWrap( PrepareJson( column_num = 1, contents = 'foobar') )[ 'start_column' ] ) def Query_AtWordEnd_test(): eq_( 'foo', RequestWrap( PrepareJson( column_num = 4, contents = 'foo') )[ 'query' ] ) def Query_InWordMiddle_test(): eq_( 'foo', RequestWrap( PrepareJson( column_num = 4, contents = 'foobar') )[ 'query' ] ) def Query_StartOfLine_test(): eq_( '', RequestWrap( PrepareJson( column_num = 1, contents = 'foobar') )[ 'query' ] ) def Query_StopsAtParen_test(): eq_( 'bar', RequestWrap( PrepareJson( column_num = 8, contents = 'foo(bar') )[ 'query' ] ) def Query_InWhiteSpace_test(): eq_( '', RequestWrap( PrepareJson( column_num = 8, contents = 'foo ') )[ 'query' ] )
apache-2.0
TanguyPatte/phantomjs-packaging
src/qt/qtwebkit/Source/JavaScriptCore/disassembler/udis86/ud_optable.py
119
4042
# udis86 - scripts/ud_optable.py (optable.xml parser) # # Copyright (c) 2009 Vivek Thampi # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import sys from xml.dom import minidom class UdOptableXmlParser: def parseDef( self, node ): ven = '' pfx = [] opc = [] opr = [] for def_node in node.childNodes: if not def_node.localName: continue if def_node.localName == 'pfx': pfx = def_node.firstChild.data.split(); elif def_node.localName == 'opc': opc = def_node.firstChild.data.split(); elif def_node.localName == 'opr': opr = def_node.firstChild.data.split(); elif def_node.localName == 'mode': pfx.extend( def_node.firstChild.data.split() ); elif def_node.localName == 'syn': pfx.extend( def_node.firstChild.data.split() ); elif def_node.localName == 'vendor': ven = ( def_node.firstChild.data ); else: print("warning: invalid node - %s" % def_node.localName) continue return ( pfx, opc, opr, ven ) def parse( self, xml, fn ): xmlDoc = minidom.parse( xml ) self.TlNode = xmlDoc.firstChild while self.TlNode and self.TlNode.localName != "x86optable": self.TlNode = self.TlNode.nextSibling for insnNode in self.TlNode.childNodes: if not insnNode.localName: continue if insnNode.localName != "instruction": print("warning: invalid insn node - %s" % insnNode.localName) continue mnemonic = insnNode.getElementsByTagName( 'mnemonic' )[ 0 ].firstChild.data vendor = '' for node in insnNode.childNodes: if node.localName == 'vendor': vendor = node.firstChild.data elif node.localName == 'def': ( prefixes, opcodes, operands, local_vendor ) = \ self.parseDef( node ) if ( len( local_vendor ) ): vendor = local_vendor # callback fn( prefixes, mnemonic, opcodes, operands, vendor ) def printFn( pfx, mnm, opc, opr, ven ): print('def: '), if len( pfx ): print(' '.join( pfx )), print("%s %s %s %s" % \ ( mnm, ' '.join( opc ), ' '.join( opr ), ven )) def parse( xml, callback ): parser = UdOptableXmlParser() parser.parse( xml, callback ) def main(): parser = UdOptableXmlParser() parser.parse( sys.argv[ 1 ], printFn ) if __name__ == "__main__": main()
bsd-3-clause
philsch/ansible
test/units/modules/network/nxos/test_nxos_vlan.py
47
3851
# (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json from ansible.compat.tests.mock import patch from ansible.modules.network.nxos import nxos_vlan from .nxos_module import TestNxosModule, load_fixture, set_module_args class TestNxosVlanModule(TestNxosModule): module = nxos_vlan def setUp(self): self.mock_run_commands = patch('ansible.modules.network.nxos.nxos_vlan.run_commands') self.run_commands = self.mock_run_commands.start() self.mock_load_config = patch('ansible.modules.network.nxos.nxos_vlan.load_config') self.load_config = self.mock_load_config.start() self.mock_get_config = patch('ansible.modules.network.nxos.nxos_vlan.get_config') self.get_config = self.mock_get_config.start() def tearDown(self): self.mock_run_commands.stop() self.mock_load_config.stop() def load_fixtures(self, commands=None, device=''): def load_from_file(*args, **kwargs): module, commands = args output = list() for item in commands: try: obj = json.loads(item) command = obj['command'] except ValueError: command = item filename = '%s.txt' % str(command).split(' | ')[0].replace(' ', '_') output.append(load_fixture('nxos_vlan', filename)) return output self.run_commands.side_effect = load_from_file self.load_config.return_value = None def test_nxos_vlan_range(self): set_module_args(dict(vlan_range='6-10')) result = self.execute_module(changed=True) self.assertEqual(result['commands'], ['vlan 6', 'vlan 7', 'vlan 8', 'vlan 9', 'vlan 10']) def test_nxos_vlan_range_absent(self): set_module_args(dict(vlan_range='1-5', state='absent')) result = self.execute_module(changed=True) self.assertEqual(result['commands'], ['no vlan 1']) def test_nxos_vlan_id(self): set_module_args(dict(vlan_id='15', state='present')) result = self.execute_module(changed=True) self.assertEqual(result['commands'], ['vlan 15', 'exit']) def test_nxos_vlan_id_absent(self): set_module_args(dict(vlan_id='1', state='absent')) result = self.execute_module(changed=True) self.assertEqual(result['commands'], ['no vlan 1']) def test_nxos_vlan_named_vlan(self): set_module_args(dict(vlan_id='15', name='WEB')) result = self.execute_module(changed=True) self.assertEqual(result['commands'], ['vlan 15', 'name WEB', 'exit']) def test_nxos_vlan_shut_down(self): set_module_args(dict(vlan_id='1', admin_state='down')) result = self.execute_module(changed=True) self.assertEqual(result['commands'], ['vlan 1', 'shutdown', 'exit']) def test_nxos_vlan_no_change(self): set_module_args(dict(vlan_id='1', name='default', vlan_state='active', admin_state='up')) result = self.execute_module(changed=False) self.assertEqual(result['commands'], [])
gpl-3.0
kkuunnddaannkk/vispy
examples/tutorial/visuals/T02_measurements.py
17
8277
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (c) 2015, Vispy Development Team. All Rights Reserved. # Distributed under the (new) BSD License. See LICENSE.txt for more info. # ----------------------------------------------------------------------------- """ Tutorial: Creating Visuals ========================== 02. Making physical measurements -------------------------------- In the last tutorial we created a simple Visual subclass that draws a rectangle. In this tutorial, we will make two additions: 1. Draw a rectangular border instead of a solid rectangle 2. Make the border a fixed pixel width, even when displayed inside a user-zoomable ViewBox. The border is made by drawing a line_strip with 10 vertices:: 1--------------3 | | | 2------4 | [ note that points 9 and 10 are | | | | the same as points 1 and 2 ] | 8------6 | | | 7--------------5 In order to ensure that the border has a fixed width in pixels, we need to adjust the spacing between the inner and outer rectangles whenever the user changes the zoom of the ViewBox. How? Recall that each time the visual is drawn, it is given a TransformSystem instance that carries information about the size of logical and physical pixels relative to the visual [link to TransformSystem documentation]. Essentially, we have 4 coordinate systems: Visual -> Document -> Framebuffer -> Render The user specifies the position and size of the rectangle in Visual coordinates, and in [tutorial 1] we used the vertex shader to convert directly from Visual coordinates to render coordinates. In this tutorial we will convert first to document coordinates, then make the adjustment for the border width, then convert the remainder of the way to render coordinates. Let's say, for example that the user specifies the box width to be 20, and the border width to be 5. To draw the border correctly, we cannot simply add/subtract 5 from the inner rectangle coordinates; if the user zooms in by a factor of 2 then the border would become 10 px wide. Another way to say this is that a vector with length=1 in Visual coordinates does not _necessarily_ have a length of 1 pixel on the canvas. Instead, we must make use of the Document coordinate system, in which a vector of length=1 does correspond to 1 pixel. There are a few ways we could make this measurement of pixel length. Here's how we'll do it in this tutorial: 1. Begin with vertices for a rectangle with border width 0 (that is, vertex 1 is the same as vertex 2, 3=4, and so on). 2. In the vertex shader, first map the vertices to the document coordinate system using the visual->document transform. 3. Add/subtract the line width from the mapped vertices. 4. Map the rest of the way to render coordinates with a second transform: document->framebuffer->render. Note that this problem _cannot_ be solved using a simple scale factor! It is necessary to use these transformations in order to draw correctly when there is rotation or anosotropic scaling involved. """ from vispy import app, gloo, visuals, scene import numpy as np vertex_shader = """ void main() { // First map the vertex to document coordinates vec4 doc_pos = $visual_to_doc(vec4($position, 0, 1)); // Also need to map the adjustment direction vector, but this is tricky! // We need to adjust separately for each component of the vector: vec4 adjusted; if ( $adjust_dir.x == 0 ) { // If this is an outer vertex, no adjustment for line weight is needed. // (In fact, trying to make the adjustment would result in no // triangles being drawn, hence the if/else block) adjusted = doc_pos; } else { // Inner vertexes must be adjusted for line width, but this is // surprisingly tricky given that the rectangle may have been scaled // and rotated! vec4 doc_x = $visual_to_doc(vec4($adjust_dir.x, 0, 0, 0)) - $visual_to_doc(vec4(0, 0, 0, 0)); vec4 doc_y = $visual_to_doc(vec4(0, $adjust_dir.y, 0, 0)) - $visual_to_doc(vec4(0, 0, 0, 0)); doc_x = normalize(doc_x); doc_y = normalize(doc_y); // Now doc_x + doc_y points in the direction we need in order to // correct the line weight of _both_ segments, but the magnitude of // that correction is wrong. To correct it we first need to // measure the width that would result from using doc_x + doc_y: vec4 proj_y_x = dot(doc_x, doc_y) * doc_x; // project y onto x float cur_width = length(doc_y - proj_y_x); // measure current weight // And now we can adjust vertex position for line width: adjusted = doc_pos + ($line_width / cur_width) * (doc_x + doc_y); } // Finally map the remainder of the way to render coordinates gl_Position = $doc_to_render(adjusted); } """ fragment_shader = """ void main() { gl_FragColor = $color; } """ class MyRectVisual(visuals.Visual): """Visual that draws a rectangular outline. Parameters ---------- x : float x coordinate of rectangle origin y : float y coordinate of rectangle origin w : float width of rectangle h : float height of rectangle weight : float width of border (in px) """ def __init__(self, x, y, w, h, weight=4.0): visuals.Visual.__init__(self, vertex_shader, fragment_shader) # 10 vertices for 8 triangles (using triangle_strip) forming a # rectangular outline self.vert_buffer = gloo.VertexBuffer(np.array([ [x, y], [x, y], [x+w, y], [x+w, y], [x+w, y+h], [x+w, y+h], [x, y+h], [x, y+h], [x, y], [x, y], ], dtype=np.float32)) # Direction each vertex should move to correct for line width # (the length of this vector will be corrected in the shader) self.adj_buffer = gloo.VertexBuffer(np.array([ [0, 0], [1, 1], [0, 0], [-1, 1], [0, 0], [-1, -1], [0, 0], [1, -1], [0, 0], [1, 1], ], dtype=np.float32)) self.shared_program.vert['position'] = self.vert_buffer self.shared_program.vert['adjust_dir'] = self.adj_buffer self.shared_program.vert['line_width'] = weight self.shared_program.frag['color'] = (1, 0, 0, 1) self.set_gl_state(cull_face=False) self._draw_mode = 'triangle_strip' def _prepare_transforms(self, view): # Set the two transforms required by the vertex shader: tr = view.transforms view_vert = view.view_program.vert view_vert['visual_to_doc'] = tr.get_transform('visual', 'document') view_vert['doc_to_render'] = tr.get_transform('document', 'render') # As in the previous tutorial, we auto-generate a Visual+Node class for use # in the scenegraph. MyRect = scene.visuals.create_visual_node(MyRectVisual) # Finally we will test the visual by displaying in a scene. canvas = scene.SceneCanvas(keys='interactive', show=True) # This time we add a ViewBox to let the user zoom/pan view = canvas.central_widget.add_view() view.camera = 'panzoom' view.camera.rect = (0, 0, 800, 800) # ..and add the rects to the view instead of canvas.scene rects = [MyRect(100, 100, 200, 300, parent=view.scene), MyRect(500, 100, 200, 300, parent=view.scene)] # Again, rotate one rectangle to ensure the transforms are working as we # expect. tr = visuals.transforms.MatrixTransform() tr.rotate(25, (0, 0, 1)) rects[1].transform = tr # Add some text instructions text = scene.visuals.Text("Drag right mouse button to zoom.", color='w', anchor_x='left', parent=view, pos=(20, 30)) # ..and optionally start the event loop if __name__ == '__main__': import sys if sys.flags.interactive != 1: app.run()
bsd-3-clause
aldencolerain/mc2kernel
Documentation/networking/cxacru-cf.py
14668
1626
#!/usr/bin/env python # Copyright 2009 Simon Arlott # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 59 # Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # Usage: cxacru-cf.py < cxacru-cf.bin # Output: values string suitable for the sysfs adsl_config attribute # # Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110 # contains mis-aligned values which will stop the modem from being able # to make a connection. If the first and last two bytes are removed then # the values become valid, but the modulation will be forced to ANSI # T1.413 only which may not be appropriate. # # The original binary format is a packed list of le32 values. import sys import struct i = 0 while True: buf = sys.stdin.read(4) if len(buf) == 0: break elif len(buf) != 4: sys.stdout.write("\n") sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf))) sys.exit(1) if i > 0: sys.stdout.write(" ") sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0])) i += 1 sys.stdout.write("\n")
gpl-2.0
martintrojer/mtorrent
mt_config.py
1
3068
# mtorrent # Copyright (c) 2012 Martin Trojer <martin.trojer@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # Configuration #export PYTHONPATH=/usr/local/lib/python2.7/site-packages/ import libtorrent as LT import mt_logger as L import ConfigParser as CP import os MTORRENT_VERSION = "0.3.3" SESSION_PATH = "session" WATCH_PATH = "watch" default = { "log_level": L.WARN, "log_echo": False, # echo log to STDOUT "write_html": False, "version": MTORRENT_VERSION, "version_long": ".oO mtorrent " + MTORRENT_VERSION + " (libtorrent-rasterbar " + LT.version + ") Oo.", "version_short": "mtorrent" + MTORRENT_VERSION + "/libtorrent" + LT.version, #seconds "state_update_delay": 2.5, "ui_update_delay": 0.5, "scan_update_delay": 10, "html_update_delay": 15, "message_delay": 10, "session_path": SESSION_PATH, "session_file": SESSION_PATH + "/mtorrent_session", "watch_path": WATCH_PATH, "html_file": "web/mtorrent_status.html", "log_file": "mtorrent.log", "lock_file": SESSION_PATH + "/mtorrent_lock", "save_path": ".", "storage_mode": LT.storage_mode_t.storage_mode_sparse, "dup_is_error": False, "auto_managed": True, "max_connections": 50, "max_uploads": -1, "ratio": 0.0, "upload_limit": 0, "download_limit": 0, "resolve_countries": False, "start_as_paused": False, "auto_upload_slots": True, "ann_all_trackers": False, "optimize_hash_speed": False, "disk_cache_algo": LT.disk_cache_algo_t.largest_contiguous, "dht_as_fallback": False, "listen_on": (6881, 6891), "use_dht" : True, "use_upnp": True, "dht_router1": ("router.bittorrent.com", 6881), "dht_router2": ("router.utorrent.com", 6881), "dht_router3": ("router.bitcomet.com", 6881) } class Config: def __init__(self): self.c = CP.ConfigParser() self.c.read(os.environ["HOME"] + "/.mtorrentrc") def __getitem__(self,key): try: r = self.c.get("mtorrent",key) return eval(r) except: return default[key]
gpl-3.0
thezawad/kivy
kivy/input/providers/probesysfs.py
34
8792
''' Auto Create Input Provider Config Entry for Available MT Hardware (linux only). =============================================================================== Thanks to Marc Tardif for the probing code, taken from scan-for-mt-device. The device discovery is done by this provider. However, the reading of input can be performed by other providers like: hidinput, mtdev and linuxwacom. mtdev is used prior to other providers. For more information about mtdev, check :py:class:`~kivy.input.providers.mtdev`. Here is an example of auto creation:: [input] # using mtdev device_%(name)s = probesysfs,provider=mtdev # using hidinput device_%(name)s = probesysfs,provider=hidinput # using mtdev with a match on name device_%(name)s = probesysfs,provider=mtdev,match=acer # using hidinput with custom parameters to hidinput (all on one line) %(name)s = probesysfs, provider=hidinput,param=min_pressure=1,param=max_pressure=99 # you can also match your wacom touchscreen touch = probesysfs,match=E3 Finger,provider=linuxwacom, select_all=1,param=mode=touch # and your wacom pen pen = probesysfs,match=E3 Pen,provider=linuxwacom, select_all=1,param=mode=pen By default, ProbeSysfs module will enumerate hardware from the /sys/class/input device, and configure hardware with ABS_MT_POSITION_X capability. But for example, the wacom screen doesn't support this capability. You can prevent this behavior by putting select_all=1 in your config line. ''' __all__ = ('ProbeSysfsHardwareProbe', ) import os from os.path import sep if 'KIVY_DOC' in os.environ: ProbeSysfsHardwareProbe = None else: from re import match, IGNORECASE from glob import glob from subprocess import Popen, PIPE from kivy.logger import Logger from kivy.input.provider import MotionEventProvider from kivy.input.providers.mouse import MouseMotionEventProvider from kivy.input.factory import MotionEventFactory from kivy.config import _is_rpi EventLoop = None # See linux/input.h ABS_MT_POSITION_X = 0x35 _cache_input = None _cache_xinput = None class Input(object): def __init__(self, path): query_xinput() self.path = path @property def device(self): base = os.path.basename(self.path) return os.path.join("/dev", "input", base) @property def name(self): path = os.path.join(self.path, "device", "name") return read_line(path) def get_capabilities(self): path = os.path.join(self.path, "device", "capabilities", "abs") line = read_line(path) capabilities = [] long_bit = getconf("LONG_BIT") for i, word in enumerate(line.split(" ")): word = int(word, 16) subcapabilities = [bool(word & 1 << i) for i in range(long_bit)] capabilities[:0] = subcapabilities return capabilities def has_capability(self, capability): capabilities = self.get_capabilities() return len(capabilities) > capability and capabilities[capability] @property def is_mouse(self): return self.device in _cache_xinput def getout(*args): try: return Popen(args, stdout=PIPE).communicate()[0] except OSError: return '' def getconf(var): output = getout("getconf", var) return int(output) def query_xinput(): global _cache_xinput if _cache_xinput is None: _cache_xinput = [] devids = getout('xinput', '--list', '--id-only') for did in devids.splitlines(): devprops = getout('xinput', '--list-props', did) evpath = None for prop in devprops.splitlines(): prop = prop.strip() if (prop.startswith(b'Device Enabled') and prop.endswith(b'0')): evpath = None break if prop.startswith(b'Device Node'): try: evpath = prop.split('"')[1] except Exception: evpath = None if evpath: _cache_xinput.append(evpath) def get_inputs(path): global _cache_input if _cache_input is None: event_glob = os.path.join(path, "event*") _cache_input = [Input(x) for x in glob(event_glob)] return _cache_input def read_line(path): f = open(path) try: return f.readline().strip() finally: f.close() class ProbeSysfsHardwareProbe(MotionEventProvider): def __new__(self, device, args): # hack to not return an instance of this provider. # :) instance = super(ProbeSysfsHardwareProbe, self).__new__(self) instance.__init__(device, args) def __init__(self, device, args): super(ProbeSysfsHardwareProbe, self).__init__(device, args) self.provider = 'mtdev' self.match = None self.input_path = '/sys/class/input' self.select_all = True if _is_rpi else False self.use_regex = False self.args = [] args = args.split(',') for arg in args: if arg == '': continue arg = arg.split('=', 1) # ensure it's a key = value if len(arg) != 2: Logger.error('ProbeSysfs: invalid parameters %s, not' ' key=value format' % arg) continue key, value = arg if key == 'match': self.match = value elif key == 'provider': self.provider = value elif key == 'use_regex': self.use_regex = bool(value) elif key == 'select_all': self.select_all = bool(value) elif key == 'param': self.args.append(value) else: Logger.error('ProbeSysfs: unknown %s option' % key) continue self.probe() def should_use_mouse(self): return not any(p for p in EventLoop.input_providers if isinstance(p, MouseMotionEventProvider)) def probe(self): global EventLoop from kivy.base import EventLoop inputs = get_inputs(self.input_path) Logger.debug('ProbeSysfs: using probesysfs!') use_mouse = self.should_use_mouse() if not self.select_all: inputs = [x for x in inputs if x.has_capability(ABS_MT_POSITION_X) and (use_mouse or not x.is_mouse)] for device in inputs: Logger.debug('ProbeSysfs: found device: %s at %s' % ( device.name, device.device)) # must ignore ? if self.match: if self.use_regex: if not match(self.match, device.name, IGNORECASE): Logger.debug('ProbeSysfs: device not match the' ' rule in config, ignoring.') continue else: if self.match not in device.name: continue Logger.info('ProbeSysfs: device match: %s' % device.device) d = device.device devicename = self.device % dict(name=d.split(sep)[-1]) provider = MotionEventFactory.get(self.provider) if provider is None: Logger.info('ProbeSysfs: unable to found provider %s' % self.provider) Logger.info('ProbeSysfs: fallback on hidinput') provider = MotionEventFactory.get('hidinput') if provider is None: Logger.critical('ProbeSysfs: no input provider found' ' to handle this device !') continue instance = provider(devicename, '%s,%s' % ( device.device, ','.join(self.args))) if instance: EventLoop.add_input_provider(instance) MotionEventFactory.register('probesysfs', ProbeSysfsHardwareProbe)
mit
dgjustice/ansible
lib/ansible/plugins/strategy/debug.py
22
5050
from __future__ import (absolute_import, division, print_function) __metaclass__ = type import cmd import pprint import sys from ansible.plugins.strategy.linear import StrategyModule as LinearStrategyModule from ansible.compat.six.moves import reduce try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class NextAction(object): """ The next action after an interpreter's exit. """ REDO = 1 CONTINUE = 2 EXIT = 3 def __init__(self, result=EXIT): self.result = result class StrategyModule(LinearStrategyModule): def __init__(self, tqm): self.curr_tqm = tqm super(StrategyModule, self).__init__(tqm) def _queue_task(self, host, task, task_vars, play_context): self.curr_host = host self.curr_task = task self.curr_task_vars = task_vars self.curr_play_context = play_context super(StrategyModule, self)._queue_task(host, task, task_vars, play_context) def _process_pending_results(self, iterator, one_pass=False, max_passes=None): if not hasattr(self, "curr_host"): return super(StrategyModule, self)._process_pending_results(iterator, one_pass, max_passes) prev_host_state = iterator.get_host_state(self.curr_host) results = super(StrategyModule, self)._process_pending_results(iterator, one_pass) while self._need_debug(results): next_action = NextAction() dbg = Debugger(self, results, next_action) dbg.cmdloop() if next_action.result == NextAction.REDO: # rollback host state self.curr_tqm.clear_failed_hosts() iterator._host_states[self.curr_host.name] = prev_host_state if reduce(lambda total, res : res.is_failed() or total, results, False): self._tqm._stats.failures[self.curr_host.name] -= 1 elif reduce(lambda total, res : res.is_unreachable() or total, results, False): self._tqm._stats.dark[self.curr_host.name] -= 1 # redo super(StrategyModule, self)._queue_task(self.curr_host, self.curr_task, self.curr_task_vars, self.curr_play_context) results = super(StrategyModule, self)._process_pending_results(iterator, one_pass) elif next_action.result == NextAction.CONTINUE: break elif next_action.result == NextAction.EXIT: exit(1) return results def _need_debug(self, results): return reduce(lambda total, res : res.is_failed() or res.is_unreachable() or total, results, False) class Debugger(cmd.Cmd): prompt = '(debug) ' # debugger prompt_continuous = '> ' # multiple lines def __init__(self, strategy_module, results, next_action): # cmd.Cmd is old-style class cmd.Cmd.__init__(self) self.intro = "Debugger invoked" self.scope = {} self.scope['task'] = strategy_module.curr_task self.scope['vars'] = strategy_module.curr_task_vars self.scope['host'] = strategy_module.curr_host self.scope['result'] = results[0]._result self.scope['results'] = results # for debug of this debugger self.next_action = next_action def cmdloop(self): try: cmd.Cmd.cmdloop(self) except KeyboardInterrupt: pass def do_EOF(self, args): return self.do_quit(args) def do_quit(self, args): display.display('aborted') self.next_action.result = NextAction.EXIT return True do_q = do_quit def do_continue(self, args): self.next_action.result = NextAction.CONTINUE return True do_c = do_continue def do_redo(self, args): self.next_action.result = NextAction.REDO return True do_r = do_redo def evaluate(self, args): try: return eval(args, globals(), self.scope) except: t, v = sys.exc_info()[:2] if isinstance(t, str): exc_type_name = t else: exc_type_name = t.__name__ display.display('***%s:%s' % (exc_type_name, repr(v))) raise def do_p(self, args): try: result = self.evaluate(args) display.display(pprint.pformat(result)) except: pass def execute(self, args): try: code = compile(args + '\n', '<stdin>', 'single') exec(code, globals(), self.scope) except: t, v = sys.exc_info()[:2] if isinstance(t, str): exc_type_name = t else: exc_type_name = t.__name__ display.display('***%s:%s' % (exc_type_name, repr(v))) raise def default(self, line): try: self.execute(line) display.display(pprint.pformat(result)) except: pass
gpl-3.0
GitHublong/hue
desktop/core/ext-py/boto-2.38.0/boto/cloudfront/distribution.py
92
31275
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import uuid import base64 import time from boto.compat import six, json from boto.cloudfront.identity import OriginAccessIdentity from boto.cloudfront.object import Object, StreamingObject from boto.cloudfront.signers import ActiveTrustedSigners, TrustedSigners from boto.cloudfront.logging import LoggingInfo from boto.cloudfront.origin import S3Origin, CustomOrigin from boto.s3.acl import ACL class DistributionConfig(object): def __init__(self, connection=None, origin=None, enabled=False, caller_reference='', cnames=None, comment='', trusted_signers=None, default_root_object=None, logging=None): """ :param origin: Origin information to associate with the distribution. If your distribution will use an Amazon S3 origin, then this should be an S3Origin object. If your distribution will use a custom origin (non Amazon S3), then this should be a CustomOrigin object. :type origin: :class:`boto.cloudfront.origin.S3Origin` or :class:`boto.cloudfront.origin.CustomOrigin` :param enabled: Whether the distribution is enabled to accept end user requests for content. :type enabled: bool :param caller_reference: A unique number that ensures the request can't be replayed. If no caller_reference is provided, boto will generate a type 4 UUID for use as the caller reference. :type enabled: str :param cnames: A CNAME alias you want to associate with this distribution. You can have up to 10 CNAME aliases per distribution. :type enabled: array of str :param comment: Any comments you want to include about the distribution. :type comment: str :param trusted_signers: Specifies any AWS accounts you want to permit to create signed URLs for private content. If you want the distribution to use signed URLs, this should contain a TrustedSigners object; if you want the distribution to use basic URLs, leave this None. :type trusted_signers: :class`boto.cloudfront.signers.TrustedSigners` :param default_root_object: Designates a default root object. Only include a DefaultRootObject value if you are going to assign a default root object for the distribution. :type comment: str :param logging: Controls whether access logs are written for the distribution. If you want to turn on access logs, this should contain a LoggingInfo object; otherwise it should contain None. :type logging: :class`boto.cloudfront.logging.LoggingInfo` """ self.connection = connection self.origin = origin self.enabled = enabled if caller_reference: self.caller_reference = caller_reference else: self.caller_reference = str(uuid.uuid4()) self.cnames = [] if cnames: self.cnames = cnames self.comment = comment self.trusted_signers = trusted_signers self.logging = logging self.default_root_object = default_root_object def __repr__(self): return "DistributionConfig:%s" % self.origin def to_xml(self): s = '<?xml version="1.0" encoding="UTF-8"?>\n' s += '<DistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n' if self.origin: s += self.origin.to_xml() s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference for cname in self.cnames: s += ' <CNAME>%s</CNAME>\n' % cname if self.comment: s += ' <Comment>%s</Comment>\n' % self.comment s += ' <Enabled>' if self.enabled: s += 'true' else: s += 'false' s += '</Enabled>\n' if self.trusted_signers: s += '<TrustedSigners>\n' for signer in self.trusted_signers: if signer == 'Self': s += ' <Self></Self>\n' else: s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer s += '</TrustedSigners>\n' if self.logging: s += '<Logging>\n' s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix s += '</Logging>\n' if self.default_root_object: dro = self.default_root_object s += '<DefaultRootObject>%s</DefaultRootObject>\n' % dro s += '</DistributionConfig>\n' return s def startElement(self, name, attrs, connection): if name == 'TrustedSigners': self.trusted_signers = TrustedSigners() return self.trusted_signers elif name == 'Logging': self.logging = LoggingInfo() return self.logging elif name == 'S3Origin': self.origin = S3Origin() return self.origin elif name == 'CustomOrigin': self.origin = CustomOrigin() return self.origin else: return None def endElement(self, name, value, connection): if name == 'CNAME': self.cnames.append(value) elif name == 'Comment': self.comment = value elif name == 'Enabled': if value.lower() == 'true': self.enabled = True else: self.enabled = False elif name == 'CallerReference': self.caller_reference = value elif name == 'DefaultRootObject': self.default_root_object = value else: setattr(self, name, value) class StreamingDistributionConfig(DistributionConfig): def __init__(self, connection=None, origin='', enabled=False, caller_reference='', cnames=None, comment='', trusted_signers=None, logging=None): super(StreamingDistributionConfig, self).__init__(connection=connection, origin=origin, enabled=enabled, caller_reference=caller_reference, cnames=cnames, comment=comment, trusted_signers=trusted_signers, logging=logging) def to_xml(self): s = '<?xml version="1.0" encoding="UTF-8"?>\n' s += '<StreamingDistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n' if self.origin: s += self.origin.to_xml() s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference for cname in self.cnames: s += ' <CNAME>%s</CNAME>\n' % cname if self.comment: s += ' <Comment>%s</Comment>\n' % self.comment s += ' <Enabled>' if self.enabled: s += 'true' else: s += 'false' s += '</Enabled>\n' if self.trusted_signers: s += '<TrustedSigners>\n' for signer in self.trusted_signers: if signer == 'Self': s += ' <Self/>\n' else: s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer s += '</TrustedSigners>\n' if self.logging: s += '<Logging>\n' s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix s += '</Logging>\n' s += '</StreamingDistributionConfig>\n' return s class DistributionSummary(object): def __init__(self, connection=None, domain_name='', id='', last_modified_time=None, status='', origin=None, cname='', comment='', enabled=False): self.connection = connection self.domain_name = domain_name self.id = id self.last_modified_time = last_modified_time self.status = status self.origin = origin self.enabled = enabled self.cnames = [] if cname: self.cnames.append(cname) self.comment = comment self.trusted_signers = None self.etag = None self.streaming = False def __repr__(self): return "DistributionSummary:%s" % self.domain_name def startElement(self, name, attrs, connection): if name == 'TrustedSigners': self.trusted_signers = TrustedSigners() return self.trusted_signers elif name == 'S3Origin': self.origin = S3Origin() return self.origin elif name == 'CustomOrigin': self.origin = CustomOrigin() return self.origin return None def endElement(self, name, value, connection): if name == 'Id': self.id = value elif name == 'Status': self.status = value elif name == 'LastModifiedTime': self.last_modified_time = value elif name == 'DomainName': self.domain_name = value elif name == 'Origin': self.origin = value elif name == 'CNAME': self.cnames.append(value) elif name == 'Comment': self.comment = value elif name == 'Enabled': if value.lower() == 'true': self.enabled = True else: self.enabled = False elif name == 'StreamingDistributionSummary': self.streaming = True else: setattr(self, name, value) def get_distribution(self): return self.connection.get_distribution_info(self.id) class StreamingDistributionSummary(DistributionSummary): def get_distribution(self): return self.connection.get_streaming_distribution_info(self.id) class Distribution(object): def __init__(self, connection=None, config=None, domain_name='', id='', last_modified_time=None, status=''): self.connection = connection self.config = config self.domain_name = domain_name self.id = id self.last_modified_time = last_modified_time self.status = status self.in_progress_invalidation_batches = 0 self.active_signers = None self.etag = None self._bucket = None self._object_class = Object def __repr__(self): return "Distribution:%s" % self.domain_name def startElement(self, name, attrs, connection): if name == 'DistributionConfig': self.config = DistributionConfig() return self.config elif name == 'ActiveTrustedSigners': self.active_signers = ActiveTrustedSigners() return self.active_signers else: return None def endElement(self, name, value, connection): if name == 'Id': self.id = value elif name == 'LastModifiedTime': self.last_modified_time = value elif name == 'Status': self.status = value elif name == 'InProgressInvalidationBatches': self.in_progress_invalidation_batches = int(value) elif name == 'DomainName': self.domain_name = value else: setattr(self, name, value) def update(self, enabled=None, cnames=None, comment=None): """ Update the configuration of the Distribution. The only values of the DistributionConfig that can be directly updated are: * CNAMES * Comment * Whether the Distribution is enabled or not Any changes to the ``trusted_signers`` or ``origin`` properties of this distribution's current config object will also be included in the update. Therefore, to set the origin access identity for this distribution, set ``Distribution.config.origin.origin_access_identity`` before calling this update method. :type enabled: bool :param enabled: Whether the Distribution is active or not. :type cnames: list of str :param cnames: The DNS CNAME's associated with this Distribution. Maximum of 10 values. :type comment: str or unicode :param comment: The comment associated with the Distribution. """ new_config = DistributionConfig(self.connection, self.config.origin, self.config.enabled, self.config.caller_reference, self.config.cnames, self.config.comment, self.config.trusted_signers, self.config.default_root_object) if enabled is not None: new_config.enabled = enabled if cnames is not None: new_config.cnames = cnames if comment is not None: new_config.comment = comment self.etag = self.connection.set_distribution_config(self.id, self.etag, new_config) self.config = new_config self._object_class = Object def enable(self): """ Activate the Distribution. A convenience wrapper around the update method. """ self.update(enabled=True) def disable(self): """ Deactivate the Distribution. A convenience wrapper around the update method. """ self.update(enabled=False) def delete(self): """ Delete this CloudFront Distribution. The content associated with the Distribution is not deleted from the underlying Origin bucket in S3. """ self.connection.delete_distribution(self.id, self.etag) def _get_bucket(self): if isinstance(self.config.origin, S3Origin): if not self._bucket: bucket_dns_name = self.config.origin.dns_name bucket_name = bucket_dns_name.replace('.s3.amazonaws.com', '') from boto.s3.connection import S3Connection s3 = S3Connection(self.connection.aws_access_key_id, self.connection.aws_secret_access_key, proxy=self.connection.proxy, proxy_port=self.connection.proxy_port, proxy_user=self.connection.proxy_user, proxy_pass=self.connection.proxy_pass) self._bucket = s3.get_bucket(bucket_name) self._bucket.distribution = self self._bucket.set_key_class(self._object_class) return self._bucket else: raise NotImplementedError('Unable to get_objects on CustomOrigin') def get_objects(self): """ Return a list of all content objects in this distribution. :rtype: list of :class:`boto.cloudfront.object.Object` :return: The content objects """ bucket = self._get_bucket() objs = [] for key in bucket: objs.append(key) return objs def set_permissions(self, object, replace=False): """ Sets the S3 ACL grants for the given object to the appropriate value based on the type of Distribution. If the Distribution is serving private content the ACL will be set to include the Origin Access Identity associated with the Distribution. If the Distribution is serving public content the content will be set up with "public-read". :type object: :class:`boto.cloudfront.object.Object` :param enabled: The Object whose ACL is being set :type replace: bool :param replace: If False, the Origin Access Identity will be appended to the existing ACL for the object. If True, the ACL for the object will be completely replaced with one that grants READ permission to the Origin Access Identity. """ if isinstance(self.config.origin, S3Origin): if self.config.origin.origin_access_identity: id = self.config.origin.origin_access_identity.split('/')[-1] oai = self.connection.get_origin_access_identity_info(id) policy = object.get_acl() if replace: policy.acl = ACL() policy.acl.add_user_grant('READ', oai.s3_user_id) object.set_acl(policy) else: object.set_canned_acl('public-read') def set_permissions_all(self, replace=False): """ Sets the S3 ACL grants for all objects in the Distribution to the appropriate value based on the type of Distribution. :type replace: bool :param replace: If False, the Origin Access Identity will be appended to the existing ACL for the object. If True, the ACL for the object will be completely replaced with one that grants READ permission to the Origin Access Identity. """ bucket = self._get_bucket() for key in bucket: self.set_permissions(key, replace) def add_object(self, name, content, headers=None, replace=True): """ Adds a new content object to the Distribution. The content for the object will be copied to a new Key in the S3 Bucket and the permissions will be set appropriately for the type of Distribution. :type name: str or unicode :param name: The name or key of the new object. :type content: file-like object :param content: A file-like object that contains the content for the new object. :type headers: dict :param headers: A dictionary containing additional headers you would like associated with the new object in S3. :rtype: :class:`boto.cloudfront.object.Object` :return: The newly created object. """ if self.config.origin.origin_access_identity: policy = 'private' else: policy = 'public-read' bucket = self._get_bucket() object = bucket.new_key(name) object.set_contents_from_file(content, headers=headers, policy=policy) if self.config.origin.origin_access_identity: self.set_permissions(object, replace) return object def create_signed_url(self, url, keypair_id, expire_time=None, valid_after_time=None, ip_address=None, policy_url=None, private_key_file=None, private_key_string=None): """ Creates a signed CloudFront URL that is only valid within the specified parameters. :type url: str :param url: The URL of the protected object. :type keypair_id: str :param keypair_id: The keypair ID of the Amazon KeyPair used to sign theURL. This ID MUST correspond to the private key specified with private_key_file or private_key_string. :type expire_time: int :param expire_time: The expiry time of the URL. If provided, the URL will expire after the time has passed. If not provided the URL will never expire. Format is a unix epoch. Use time.time() + duration_in_sec. :type valid_after_time: int :param valid_after_time: If provided, the URL will not be valid until after valid_after_time. Format is a unix epoch. Use time.time() + secs_until_valid. :type ip_address: str :param ip_address: If provided, only allows access from the specified IP address. Use '192.168.0.10' for a single IP or use '192.168.0.0/24' CIDR notation for a subnet. :type policy_url: str :param policy_url: If provided, allows the signature to contain wildcard globs in the URL. For example, you could provide: 'http://example.com/media/\*' and the policy and signature would allow access to all contents of the media subdirectory. If not specified, only allow access to the exact url provided in 'url'. :type private_key_file: str or file object. :param private_key_file: If provided, contains the filename of the private key file used for signing or an open file object containing the private key contents. Only one of private_key_file or private_key_string can be provided. :type private_key_string: str :param private_key_string: If provided, contains the private key string used for signing. Only one of private_key_file or private_key_string can be provided. :rtype: str :return: The signed URL. """ # Get the required parameters params = self._create_signing_params( url=url, keypair_id=keypair_id, expire_time=expire_time, valid_after_time=valid_after_time, ip_address=ip_address, policy_url=policy_url, private_key_file=private_key_file, private_key_string=private_key_string) #combine these into a full url if "?" in url: sep = "&" else: sep = "?" signed_url_params = [] for key in ["Expires", "Policy", "Signature", "Key-Pair-Id"]: if key in params: param = "%s=%s" % (key, params[key]) signed_url_params.append(param) signed_url = url + sep + "&".join(signed_url_params) return signed_url def _create_signing_params(self, url, keypair_id, expire_time=None, valid_after_time=None, ip_address=None, policy_url=None, private_key_file=None, private_key_string=None): """ Creates the required URL parameters for a signed URL. """ params = {} # Check if we can use a canned policy if expire_time and not valid_after_time and not ip_address and not policy_url: # we manually construct this policy string to ensure formatting # matches signature policy = self._canned_policy(url, expire_time) params["Expires"] = str(expire_time) else: # If no policy_url is specified, default to the full url. if policy_url is None: policy_url = url # Can't use canned policy policy = self._custom_policy(policy_url, expires=expire_time, valid_after=valid_after_time, ip_address=ip_address) encoded_policy = self._url_base64_encode(policy) params["Policy"] = encoded_policy #sign the policy signature = self._sign_string(policy, private_key_file, private_key_string) #now base64 encode the signature (URL safe as well) encoded_signature = self._url_base64_encode(signature) params["Signature"] = encoded_signature params["Key-Pair-Id"] = keypair_id return params @staticmethod def _canned_policy(resource, expires): """ Creates a canned policy string. """ policy = ('{"Statement":[{"Resource":"%(resource)s",' '"Condition":{"DateLessThan":{"AWS:EpochTime":' '%(expires)s}}}]}' % locals()) return policy @staticmethod def _custom_policy(resource, expires=None, valid_after=None, ip_address=None): """ Creates a custom policy string based on the supplied parameters. """ condition = {} # SEE: http://docs.amazonwebservices.com/AmazonCloudFront/latest/DeveloperGuide/RestrictingAccessPrivateContent.html#CustomPolicy # The 'DateLessThan' property is required. if not expires: # Defaults to ONE day expires = int(time.time()) + 86400 condition["DateLessThan"] = {"AWS:EpochTime": expires} if valid_after: condition["DateGreaterThan"] = {"AWS:EpochTime": valid_after} if ip_address: if '/' not in ip_address: ip_address += "/32" condition["IpAddress"] = {"AWS:SourceIp": ip_address} policy = {"Statement": [{ "Resource": resource, "Condition": condition}]} return json.dumps(policy, separators=(",", ":")) @staticmethod def _sign_string(message, private_key_file=None, private_key_string=None): """ Signs a string for use with Amazon CloudFront. Requires the rsa library be installed. """ try: import rsa except ImportError: raise NotImplementedError("Boto depends on the python rsa " "library to generate signed URLs for " "CloudFront") # Make sure only one of private_key_file and private_key_string is set if private_key_file and private_key_string: raise ValueError("Only specify the private_key_file or the private_key_string not both") if not private_key_file and not private_key_string: raise ValueError("You must specify one of private_key_file or private_key_string") # If private_key_file is a file name, open it and read it if private_key_string is None: if isinstance(private_key_file, six.string_types): with open(private_key_file, 'r') as file_handle: private_key_string = file_handle.read() # Otherwise, treat it like a file else: private_key_string = private_key_file.read() # Sign it! private_key = rsa.PrivateKey.load_pkcs1(private_key_string) signature = rsa.sign(str(message), private_key, 'SHA-1') return signature @staticmethod def _url_base64_encode(msg): """ Base64 encodes a string using the URL-safe characters specified by Amazon. """ msg_base64 = base64.b64encode(msg) msg_base64 = msg_base64.replace('+', '-') msg_base64 = msg_base64.replace('=', '_') msg_base64 = msg_base64.replace('/', '~') return msg_base64 class StreamingDistribution(Distribution): def __init__(self, connection=None, config=None, domain_name='', id='', last_modified_time=None, status=''): super(StreamingDistribution, self).__init__(connection, config, domain_name, id, last_modified_time, status) self._object_class = StreamingObject def startElement(self, name, attrs, connection): if name == 'StreamingDistributionConfig': self.config = StreamingDistributionConfig() return self.config else: return super(StreamingDistribution, self).startElement(name, attrs, connection) def update(self, enabled=None, cnames=None, comment=None): """ Update the configuration of the StreamingDistribution. The only values of the StreamingDistributionConfig that can be directly updated are: * CNAMES * Comment * Whether the Distribution is enabled or not Any changes to the ``trusted_signers`` or ``origin`` properties of this distribution's current config object will also be included in the update. Therefore, to set the origin access identity for this distribution, set ``StreamingDistribution.config.origin.origin_access_identity`` before calling this update method. :type enabled: bool :param enabled: Whether the StreamingDistribution is active or not. :type cnames: list of str :param cnames: The DNS CNAME's associated with this Distribution. Maximum of 10 values. :type comment: str or unicode :param comment: The comment associated with the Distribution. """ new_config = StreamingDistributionConfig(self.connection, self.config.origin, self.config.enabled, self.config.caller_reference, self.config.cnames, self.config.comment, self.config.trusted_signers) if enabled is not None: new_config.enabled = enabled if cnames is not None: new_config.cnames = cnames if comment is not None: new_config.comment = comment self.etag = self.connection.set_streaming_distribution_config(self.id, self.etag, new_config) self.config = new_config self._object_class = StreamingObject def delete(self): self.connection.delete_streaming_distribution(self.id, self.etag)
apache-2.0
mattvonrocketstein/smash
smashlib/ipy3x/parallel/tests/test_mongodb.py
1
1544
"""Tests for mongodb backend Authors: * Min RK """ #------------------------------------------------------------------------- # Copyright (C) 2011 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #------------------------------------------------------------------------- #------------------------------------------------------------------------- # Imports #------------------------------------------------------------------------- import os from unittest import TestCase from nose import SkipTest from pymongo import Connection from IPython.parallel.controller.mongodb import MongoDB from . import test_db conn_kwargs = {} if 'DB_IP' in os.environ: conn_kwargs['host'] = os.environ['DB_IP'] if 'DBA_MONGODB_ADMIN_URI' in os.environ: # On ShiningPanda, we need a username and password to connect. They are # passed in a mongodb:// URI. conn_kwargs['host'] = os.environ['DBA_MONGODB_ADMIN_URI'] if 'DB_PORT' in os.environ: conn_kwargs['port'] = int(os.environ['DB_PORT']) try: c = Connection(**conn_kwargs) except Exception: c = None class TestMongoBackend(test_db.TaskDBTest, TestCase): """MongoDB backend tests""" def create_db(self): try: return MongoDB(database='iptestdb', _connection=c) except Exception: raise SkipTest("Couldn't connect to mongodb") def teardown(self): if c is not None: c.drop_database('iptestdb')
mit
jamezpolley/pip
pip/_vendor/requests/packages/chardet/big5freq.py
3133
82594
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # Big5 frequency table # by Taiwan's Mandarin Promotion Council # <http://www.edu.tw:81/mandr/> # # 128 --> 0.42261 # 256 --> 0.57851 # 512 --> 0.74851 # 1024 --> 0.89384 # 2048 --> 0.97583 # # Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98 # Random Distribution Ration = 512/(5401-512)=0.105 # # Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75 #Char to FreqOrder table BIG5_TABLE_SIZE = 5376 Big5CharToFreqOrder = ( 1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16 3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32 1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48 63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64 3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80 4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96 5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112 630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128 179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144 995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160 2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176 1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192 3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208 706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224 1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240 3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256 2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272 437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288 3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304 1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320 5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336 266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352 5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368 1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384 32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400 188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416 3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432 3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448 324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464 2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480 2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496 314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512 287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528 3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544 1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560 1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576 1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592 2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608 265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624 4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640 1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656 5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672 2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688 383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704 98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720 523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736 710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752 5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768 379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784 1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800 585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816 690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832 5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848 1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864 544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880 3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896 4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912 3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928 279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944 610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960 1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976 4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992 3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008 3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024 2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040 5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056 3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072 5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088 1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104 2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120 1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136 78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152 1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168 4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184 3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200 534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216 165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232 626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248 2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264 5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280 1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296 2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312 1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328 1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344 5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360 5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376 5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392 3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408 4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424 4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440 2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456 5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472 3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488 598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504 5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520 5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536 1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552 2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568 3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584 4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600 5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616 3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632 4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648 1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664 1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680 4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696 1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712 240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728 1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744 1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760 3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776 619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792 5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808 2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824 1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840 1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856 5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872 829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888 4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904 375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920 2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936 444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952 1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968 1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984 730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000 4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016 4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032 1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048 3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064 5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080 5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096 1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112 2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128 1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144 3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160 2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176 3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192 2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208 4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224 4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240 3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256 97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272 3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288 424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304 3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320 4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336 3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352 1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368 5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384 199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400 5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416 1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432 391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448 4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464 4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480 397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496 2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512 2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528 3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544 1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560 4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576 2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592 1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608 1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624 2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640 3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656 1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672 5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688 1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704 4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720 1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736 135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752 1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768 4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784 4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800 2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816 1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832 4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848 660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864 5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880 2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896 3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912 4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928 790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944 5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960 5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976 1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992 4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008 4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024 2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040 3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056 3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072 2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088 1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104 4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120 3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136 3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152 2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168 4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184 5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200 3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216 2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232 3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248 1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264 2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280 3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296 4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312 2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328 2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344 5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360 1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376 2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392 1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408 3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424 4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440 2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456 3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472 3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488 2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504 4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520 2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536 3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552 4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568 5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584 3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600 194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616 1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632 4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648 1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664 4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680 5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696 510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712 5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728 5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744 2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760 3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776 2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792 2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808 681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824 1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840 4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856 3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872 3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888 838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904 2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920 625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936 2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952 4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968 1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984 4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000 1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016 3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032 574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048 3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064 5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080 5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096 3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112 3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128 1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144 2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160 5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176 1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192 1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208 3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224 919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240 1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256 4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272 5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288 2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304 3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320 516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336 1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352 2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368 2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384 5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400 5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416 5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432 2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448 2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464 1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480 4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496 3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512 3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528 4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544 4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560 2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576 2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592 5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608 4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624 5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640 4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656 502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672 121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688 1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704 3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720 4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736 1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752 5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768 2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784 2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800 3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816 5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832 1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848 3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864 5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880 1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896 5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912 2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928 3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944 2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960 3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976 3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992 3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008 4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024 803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040 2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056 4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072 3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088 5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104 1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120 5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136 425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152 1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168 479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184 4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200 1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216 4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232 1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248 433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264 3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280 4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296 5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312 938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328 3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344 890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360 2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 #last 512 #Everything below is of no interest for detection purpose 2522,1613,4812,5799,3345,3945,2523,5800,4162,5801,1637,4163,2471,4813,3946,5802, # 5392 2500,3034,3800,5803,5804,2195,4814,5805,2163,5806,5807,5808,5809,5810,5811,5812, # 5408 5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828, # 5424 5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844, # 5440 5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856,5857,5858,5859,5860, # 5456 5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872,5873,5874,5875,5876, # 5472 5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888,5889,5890,5891,5892, # 5488 5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,5906,5907,5908, # 5504 5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,5921,5922,5923,5924, # 5520 5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936,5937,5938,5939,5940, # 5536 5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952,5953,5954,5955,5956, # 5552 5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968,5969,5970,5971,5972, # 5568 5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984,5985,5986,5987,5988, # 5584 5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004, # 5600 6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020, # 5616 6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032,6033,6034,6035,6036, # 5632 6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052, # 5648 6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068, # 5664 6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084, # 5680 6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100, # 5696 6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116, # 5712 6117,6118,6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,6132, # 5728 6133,6134,6135,6136,6137,6138,6139,6140,6141,6142,6143,6144,6145,6146,6147,6148, # 5744 6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,6164, # 5760 6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,6180, # 5776 6181,6182,6183,6184,6185,6186,6187,6188,6189,6190,6191,6192,6193,6194,6195,6196, # 5792 6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,6211,6212, # 5808 6213,6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,3670,6224,6225,6226,6227, # 5824 6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,6242,6243, # 5840 6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,6254,6255,6256,6257,6258,6259, # 5856 6260,6261,6262,6263,6264,6265,6266,6267,6268,6269,6270,6271,6272,6273,6274,6275, # 5872 6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,4815,6286,6287,6288,6289,6290, # 5888 6291,6292,4816,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,6303,6304,6305, # 5904 6306,6307,6308,6309,6310,6311,4817,4818,6312,6313,6314,6315,6316,6317,6318,4819, # 5920 6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,6334, # 5936 6335,6336,6337,4820,6338,6339,6340,6341,6342,6343,6344,6345,6346,6347,6348,6349, # 5952 6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,6364,6365, # 5968 6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,6380,6381, # 5984 6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,6396,6397, # 6000 6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,3441,6411,6412, # 6016 6413,6414,6415,6416,6417,6418,6419,6420,6421,6422,6423,6424,6425,4440,6426,6427, # 6032 6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,6439,6440,6441,6442,6443, # 6048 6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,4821,6455,6456,6457,6458, # 6064 6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,6473,6474, # 6080 6475,6476,6477,3947,3948,6478,6479,6480,6481,3272,4441,6482,6483,6484,6485,4442, # 6096 6486,6487,6488,6489,6490,6491,6492,6493,6494,6495,6496,4822,6497,6498,6499,6500, # 6112 6501,6502,6503,6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516, # 6128 6517,6518,6519,6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532, # 6144 6533,6534,6535,6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548, # 6160 6549,6550,6551,6552,6553,6554,6555,6556,2784,6557,4823,6558,6559,6560,6561,6562, # 6176 6563,6564,6565,6566,6567,6568,6569,3949,6570,6571,6572,4824,6573,6574,6575,6576, # 6192 6577,6578,6579,6580,6581,6582,6583,4825,6584,6585,6586,3950,2785,6587,6588,6589, # 6208 6590,6591,6592,6593,6594,6595,6596,6597,6598,6599,6600,6601,6602,6603,6604,6605, # 6224 6606,6607,6608,6609,6610,6611,6612,4826,6613,6614,6615,4827,6616,6617,6618,6619, # 6240 6620,6621,6622,6623,6624,6625,4164,6626,6627,6628,6629,6630,6631,6632,6633,6634, # 6256 3547,6635,4828,6636,6637,6638,6639,6640,6641,6642,3951,2984,6643,6644,6645,6646, # 6272 6647,6648,6649,4165,6650,4829,6651,6652,4830,6653,6654,6655,6656,6657,6658,6659, # 6288 6660,6661,6662,4831,6663,6664,6665,6666,6667,6668,6669,6670,6671,4166,6672,4832, # 6304 3952,6673,6674,6675,6676,4833,6677,6678,6679,4167,6680,6681,6682,3198,6683,6684, # 6320 6685,6686,6687,6688,6689,6690,6691,6692,6693,6694,6695,6696,6697,4834,6698,6699, # 6336 6700,6701,6702,6703,6704,6705,6706,6707,6708,6709,6710,6711,6712,6713,6714,6715, # 6352 6716,6717,6718,6719,6720,6721,6722,6723,6724,6725,6726,6727,6728,6729,6730,6731, # 6368 6732,6733,6734,4443,6735,6736,6737,6738,6739,6740,6741,6742,6743,6744,6745,4444, # 6384 6746,6747,6748,6749,6750,6751,6752,6753,6754,6755,6756,6757,6758,6759,6760,6761, # 6400 6762,6763,6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777, # 6416 6778,6779,6780,6781,4168,6782,6783,3442,6784,6785,6786,6787,6788,6789,6790,6791, # 6432 4169,6792,6793,6794,6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806, # 6448 6807,6808,6809,6810,6811,4835,6812,6813,6814,4445,6815,6816,4446,6817,6818,6819, # 6464 6820,6821,6822,6823,6824,6825,6826,6827,6828,6829,6830,6831,6832,6833,6834,6835, # 6480 3548,6836,6837,6838,6839,6840,6841,6842,6843,6844,6845,6846,4836,6847,6848,6849, # 6496 6850,6851,6852,6853,6854,3953,6855,6856,6857,6858,6859,6860,6861,6862,6863,6864, # 6512 6865,6866,6867,6868,6869,6870,6871,6872,6873,6874,6875,6876,6877,3199,6878,6879, # 6528 6880,6881,6882,4447,6883,6884,6885,6886,6887,6888,6889,6890,6891,6892,6893,6894, # 6544 6895,6896,6897,6898,6899,6900,6901,6902,6903,6904,4170,6905,6906,6907,6908,6909, # 6560 6910,6911,6912,6913,6914,6915,6916,6917,6918,6919,6920,6921,6922,6923,6924,6925, # 6576 6926,6927,4837,6928,6929,6930,6931,6932,6933,6934,6935,6936,3346,6937,6938,4838, # 6592 6939,6940,6941,4448,6942,6943,6944,6945,6946,4449,6947,6948,6949,6950,6951,6952, # 6608 6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,6967,6968, # 6624 6969,6970,6971,6972,6973,6974,6975,6976,6977,6978,6979,6980,6981,6982,6983,6984, # 6640 6985,6986,6987,6988,6989,6990,6991,6992,6993,6994,3671,6995,6996,6997,6998,4839, # 6656 6999,7000,7001,7002,3549,7003,7004,7005,7006,7007,7008,7009,7010,7011,7012,7013, # 6672 7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,7028,7029, # 6688 7030,4840,7031,7032,7033,7034,7035,7036,7037,7038,4841,7039,7040,7041,7042,7043, # 6704 7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,7059, # 6720 7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,2985,7071,7072,7073,7074, # 6736 7075,7076,7077,7078,7079,7080,4842,7081,7082,7083,7084,7085,7086,7087,7088,7089, # 6752 7090,7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105, # 6768 7106,7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,4450,7119,7120, # 6784 7121,7122,7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136, # 6800 7137,7138,7139,7140,7141,7142,7143,4843,7144,7145,7146,7147,7148,7149,7150,7151, # 6816 7152,7153,7154,7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167, # 6832 7168,7169,7170,7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183, # 6848 7184,7185,7186,7187,7188,4171,4172,7189,7190,7191,7192,7193,7194,7195,7196,7197, # 6864 7198,7199,7200,7201,7202,7203,7204,7205,7206,7207,7208,7209,7210,7211,7212,7213, # 6880 7214,7215,7216,7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229, # 6896 7230,7231,7232,7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245, # 6912 7246,7247,7248,7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261, # 6928 7262,7263,7264,7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277, # 6944 7278,7279,7280,7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293, # 6960 7294,7295,7296,4844,7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308, # 6976 7309,7310,7311,7312,7313,7314,7315,7316,4451,7317,7318,7319,7320,7321,7322,7323, # 6992 7324,7325,7326,7327,7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339, # 7008 7340,7341,7342,7343,7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,4173,7354, # 7024 7355,4845,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369, # 7040 7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385, # 7056 7386,7387,7388,4846,7389,7390,7391,7392,7393,7394,7395,7396,7397,7398,7399,7400, # 7072 7401,7402,7403,7404,7405,3672,7406,7407,7408,7409,7410,7411,7412,7413,7414,7415, # 7088 7416,7417,7418,7419,7420,7421,7422,7423,7424,7425,7426,7427,7428,7429,7430,7431, # 7104 7432,7433,7434,7435,7436,7437,7438,7439,7440,7441,7442,7443,7444,7445,7446,7447, # 7120 7448,7449,7450,7451,7452,7453,4452,7454,3200,7455,7456,7457,7458,7459,7460,7461, # 7136 7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,7472,7473,7474,4847,7475,7476, # 7152 7477,3133,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,7488,7489,7490,7491, # 7168 7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,3347,7503,7504,7505,7506, # 7184 7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,7520,7521,4848, # 7200 7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,7536,7537, # 7216 7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,3801,4849,7550,7551, # 7232 7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567, # 7248 7568,7569,3035,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582, # 7264 7583,7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598, # 7280 7599,7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614, # 7296 7615,7616,4850,7617,7618,3802,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628, # 7312 7629,7630,7631,7632,4851,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643, # 7328 7644,7645,7646,7647,7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659, # 7344 7660,7661,7662,7663,7664,7665,7666,7667,7668,7669,7670,4453,7671,7672,7673,7674, # 7360 7675,7676,7677,7678,7679,7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690, # 7376 7691,7692,7693,7694,7695,7696,7697,3443,7698,7699,7700,7701,7702,4454,7703,7704, # 7392 7705,7706,7707,7708,7709,7710,7711,7712,7713,2472,7714,7715,7716,7717,7718,7719, # 7408 7720,7721,7722,7723,7724,7725,7726,7727,7728,7729,7730,7731,3954,7732,7733,7734, # 7424 7735,7736,7737,7738,7739,7740,7741,7742,7743,7744,7745,7746,7747,7748,7749,7750, # 7440 3134,7751,7752,4852,7753,7754,7755,4853,7756,7757,7758,7759,7760,4174,7761,7762, # 7456 7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,7777,7778, # 7472 7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,7792,7793,7794, # 7488 7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,4854,7806,7807,7808,7809, # 7504 7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824,7825, # 7520 4855,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7536 7841,7842,7843,7844,7845,7846,7847,3955,7848,7849,7850,7851,7852,7853,7854,7855, # 7552 7856,7857,7858,7859,7860,3444,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870, # 7568 7871,7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886, # 7584 7887,7888,7889,7890,7891,4175,7892,7893,7894,7895,7896,4856,4857,7897,7898,7899, # 7600 7900,2598,7901,7902,7903,7904,7905,7906,7907,7908,4455,7909,7910,7911,7912,7913, # 7616 7914,3201,7915,7916,7917,7918,7919,7920,7921,4858,7922,7923,7924,7925,7926,7927, # 7632 7928,7929,7930,7931,7932,7933,7934,7935,7936,7937,7938,7939,7940,7941,7942,7943, # 7648 7944,7945,7946,7947,7948,7949,7950,7951,7952,7953,7954,7955,7956,7957,7958,7959, # 7664 7960,7961,7962,7963,7964,7965,7966,7967,7968,7969,7970,7971,7972,7973,7974,7975, # 7680 7976,7977,7978,7979,7980,7981,4859,7982,7983,7984,7985,7986,7987,7988,7989,7990, # 7696 7991,7992,7993,7994,7995,7996,4860,7997,7998,7999,8000,8001,8002,8003,8004,8005, # 7712 8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,8016,4176,8017,8018,8019,8020, # 7728 8021,8022,8023,4861,8024,8025,8026,8027,8028,8029,8030,8031,8032,8033,8034,8035, # 7744 8036,4862,4456,8037,8038,8039,8040,4863,8041,8042,8043,8044,8045,8046,8047,8048, # 7760 8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,8064, # 7776 8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,8080, # 7792 8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,8096, # 7808 8097,8098,8099,4864,4177,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110, # 7824 8111,8112,8113,8114,8115,8116,8117,8118,8119,8120,4178,8121,8122,8123,8124,8125, # 7840 8126,8127,8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141, # 7856 8142,8143,8144,8145,4865,4866,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155, # 7872 8156,8157,8158,8159,8160,8161,8162,8163,8164,8165,4179,8166,8167,8168,8169,8170, # 7888 8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181,4457,8182,8183,8184,8185, # 7904 8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201, # 7920 8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213,8214,8215,8216,8217, # 7936 8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229,8230,8231,8232,8233, # 7952 8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245,8246,8247,8248,8249, # 7968 8250,8251,8252,8253,8254,8255,8256,3445,8257,8258,8259,8260,8261,8262,4458,8263, # 7984 8264,8265,8266,8267,8268,8269,8270,8271,8272,4459,8273,8274,8275,8276,3550,8277, # 8000 8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,4460,8290,8291,8292, # 8016 8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,4867, # 8032 8308,8309,8310,8311,8312,3551,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322, # 8048 8323,8324,8325,8326,4868,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337, # 8064 8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353, # 8080 8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,4869,4461,8364,8365,8366,8367, # 8096 8368,8369,8370,4870,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382, # 8112 8383,8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398, # 8128 8399,8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,4871,8411,8412,8413, # 8144 8414,8415,8416,8417,8418,8419,8420,8421,8422,4462,8423,8424,8425,8426,8427,8428, # 8160 8429,8430,8431,8432,8433,2986,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443, # 8176 8444,8445,8446,8447,8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459, # 8192 8460,8461,8462,8463,8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475, # 8208 8476,8477,8478,4180,8479,8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490, # 8224 8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506, # 8240 8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522, # 8256 8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538, # 8272 8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554, # 8288 8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,4872,8565,8566,8567,8568,8569, # 8304 8570,8571,8572,8573,4873,8574,8575,8576,8577,8578,8579,8580,8581,8582,8583,8584, # 8320 8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597,8598,8599,8600, # 8336 8601,8602,8603,8604,8605,3803,8606,8607,8608,8609,8610,8611,8612,8613,4874,3804, # 8352 8614,8615,8616,8617,8618,8619,8620,8621,3956,8622,8623,8624,8625,8626,8627,8628, # 8368 8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,2865,8639,8640,8641,8642,8643, # 8384 8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,4463,8657,8658, # 8400 8659,4875,4876,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672, # 8416 8673,8674,8675,8676,8677,8678,8679,8680,8681,4464,8682,8683,8684,8685,8686,8687, # 8432 8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703, # 8448 8704,8705,8706,8707,8708,8709,2261,8710,8711,8712,8713,8714,8715,8716,8717,8718, # 8464 8719,8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,4181, # 8480 8734,8735,8736,8737,8738,8739,8740,8741,8742,8743,8744,8745,8746,8747,8748,8749, # 8496 8750,8751,8752,8753,8754,8755,8756,8757,8758,8759,8760,8761,8762,8763,4877,8764, # 8512 8765,8766,8767,8768,8769,8770,8771,8772,8773,8774,8775,8776,8777,8778,8779,8780, # 8528 8781,8782,8783,8784,8785,8786,8787,8788,4878,8789,4879,8790,8791,8792,4880,8793, # 8544 8794,8795,8796,8797,8798,8799,8800,8801,4881,8802,8803,8804,8805,8806,8807,8808, # 8560 8809,8810,8811,8812,8813,8814,8815,3957,8816,8817,8818,8819,8820,8821,8822,8823, # 8576 8824,8825,8826,8827,8828,8829,8830,8831,8832,8833,8834,8835,8836,8837,8838,8839, # 8592 8840,8841,8842,8843,8844,8845,8846,8847,4882,8848,8849,8850,8851,8852,8853,8854, # 8608 8855,8856,8857,8858,8859,8860,8861,8862,8863,8864,8865,8866,8867,8868,8869,8870, # 8624 8871,8872,8873,8874,8875,8876,8877,8878,8879,8880,8881,8882,8883,8884,3202,8885, # 8640 8886,8887,8888,8889,8890,8891,8892,8893,8894,8895,8896,8897,8898,8899,8900,8901, # 8656 8902,8903,8904,8905,8906,8907,8908,8909,8910,8911,8912,8913,8914,8915,8916,8917, # 8672 8918,8919,8920,8921,8922,8923,8924,4465,8925,8926,8927,8928,8929,8930,8931,8932, # 8688 4883,8933,8934,8935,8936,8937,8938,8939,8940,8941,8942,8943,2214,8944,8945,8946, # 8704 8947,8948,8949,8950,8951,8952,8953,8954,8955,8956,8957,8958,8959,8960,8961,8962, # 8720 8963,8964,8965,4884,8966,8967,8968,8969,8970,8971,8972,8973,8974,8975,8976,8977, # 8736 8978,8979,8980,8981,8982,8983,8984,8985,8986,8987,8988,8989,8990,8991,8992,4885, # 8752 8993,8994,8995,8996,8997,8998,8999,9000,9001,9002,9003,9004,9005,9006,9007,9008, # 8768 9009,9010,9011,9012,9013,9014,9015,9016,9017,9018,9019,9020,9021,4182,9022,9023, # 8784 9024,9025,9026,9027,9028,9029,9030,9031,9032,9033,9034,9035,9036,9037,9038,9039, # 8800 9040,9041,9042,9043,9044,9045,9046,9047,9048,9049,9050,9051,9052,9053,9054,9055, # 8816 9056,9057,9058,9059,9060,9061,9062,9063,4886,9064,9065,9066,9067,9068,9069,4887, # 8832 9070,9071,9072,9073,9074,9075,9076,9077,9078,9079,9080,9081,9082,9083,9084,9085, # 8848 9086,9087,9088,9089,9090,9091,9092,9093,9094,9095,9096,9097,9098,9099,9100,9101, # 8864 9102,9103,9104,9105,9106,9107,9108,9109,9110,9111,9112,9113,9114,9115,9116,9117, # 8880 9118,9119,9120,9121,9122,9123,9124,9125,9126,9127,9128,9129,9130,9131,9132,9133, # 8896 9134,9135,9136,9137,9138,9139,9140,9141,3958,9142,9143,9144,9145,9146,9147,9148, # 8912 9149,9150,9151,4888,9152,9153,9154,9155,9156,9157,9158,9159,9160,9161,9162,9163, # 8928 9164,9165,9166,9167,9168,9169,9170,9171,9172,9173,9174,9175,4889,9176,9177,9178, # 8944 9179,9180,9181,9182,9183,9184,9185,9186,9187,9188,9189,9190,9191,9192,9193,9194, # 8960 9195,9196,9197,9198,9199,9200,9201,9202,9203,4890,9204,9205,9206,9207,9208,9209, # 8976 9210,9211,9212,9213,9214,9215,9216,9217,9218,9219,9220,9221,9222,4466,9223,9224, # 8992 9225,9226,9227,9228,9229,9230,9231,9232,9233,9234,9235,9236,9237,9238,9239,9240, # 9008 9241,9242,9243,9244,9245,4891,9246,9247,9248,9249,9250,9251,9252,9253,9254,9255, # 9024 9256,9257,4892,9258,9259,9260,9261,4893,4894,9262,9263,9264,9265,9266,9267,9268, # 9040 9269,9270,9271,9272,9273,4467,9274,9275,9276,9277,9278,9279,9280,9281,9282,9283, # 9056 9284,9285,3673,9286,9287,9288,9289,9290,9291,9292,9293,9294,9295,9296,9297,9298, # 9072 9299,9300,9301,9302,9303,9304,9305,9306,9307,9308,9309,9310,9311,9312,9313,9314, # 9088 9315,9316,9317,9318,9319,9320,9321,9322,4895,9323,9324,9325,9326,9327,9328,9329, # 9104 9330,9331,9332,9333,9334,9335,9336,9337,9338,9339,9340,9341,9342,9343,9344,9345, # 9120 9346,9347,4468,9348,9349,9350,9351,9352,9353,9354,9355,9356,9357,9358,9359,9360, # 9136 9361,9362,9363,9364,9365,9366,9367,9368,9369,9370,9371,9372,9373,4896,9374,4469, # 9152 9375,9376,9377,9378,9379,4897,9380,9381,9382,9383,9384,9385,9386,9387,9388,9389, # 9168 9390,9391,9392,9393,9394,9395,9396,9397,9398,9399,9400,9401,9402,9403,9404,9405, # 9184 9406,4470,9407,2751,9408,9409,3674,3552,9410,9411,9412,9413,9414,9415,9416,9417, # 9200 9418,9419,9420,9421,4898,9422,9423,9424,9425,9426,9427,9428,9429,3959,9430,9431, # 9216 9432,9433,9434,9435,9436,4471,9437,9438,9439,9440,9441,9442,9443,9444,9445,9446, # 9232 9447,9448,9449,9450,3348,9451,9452,9453,9454,9455,9456,9457,9458,9459,9460,9461, # 9248 9462,9463,9464,9465,9466,9467,9468,9469,9470,9471,9472,4899,9473,9474,9475,9476, # 9264 9477,4900,9478,9479,9480,9481,9482,9483,9484,9485,9486,9487,9488,3349,9489,9490, # 9280 9491,9492,9493,9494,9495,9496,9497,9498,9499,9500,9501,9502,9503,9504,9505,9506, # 9296 9507,9508,9509,9510,9511,9512,9513,9514,9515,9516,9517,9518,9519,9520,4901,9521, # 9312 9522,9523,9524,9525,9526,4902,9527,9528,9529,9530,9531,9532,9533,9534,9535,9536, # 9328 9537,9538,9539,9540,9541,9542,9543,9544,9545,9546,9547,9548,9549,9550,9551,9552, # 9344 9553,9554,9555,9556,9557,9558,9559,9560,9561,9562,9563,9564,9565,9566,9567,9568, # 9360 9569,9570,9571,9572,9573,9574,9575,9576,9577,9578,9579,9580,9581,9582,9583,9584, # 9376 3805,9585,9586,9587,9588,9589,9590,9591,9592,9593,9594,9595,9596,9597,9598,9599, # 9392 9600,9601,9602,4903,9603,9604,9605,9606,9607,4904,9608,9609,9610,9611,9612,9613, # 9408 9614,4905,9615,9616,9617,9618,9619,9620,9621,9622,9623,9624,9625,9626,9627,9628, # 9424 9629,9630,9631,9632,4906,9633,9634,9635,9636,9637,9638,9639,9640,9641,9642,9643, # 9440 4907,9644,9645,9646,9647,9648,9649,9650,9651,9652,9653,9654,9655,9656,9657,9658, # 9456 9659,9660,9661,9662,9663,9664,9665,9666,9667,9668,9669,9670,9671,9672,4183,9673, # 9472 9674,9675,9676,9677,4908,9678,9679,9680,9681,4909,9682,9683,9684,9685,9686,9687, # 9488 9688,9689,9690,4910,9691,9692,9693,3675,9694,9695,9696,2945,9697,9698,9699,9700, # 9504 9701,9702,9703,9704,9705,4911,9706,9707,9708,9709,9710,9711,9712,9713,9714,9715, # 9520 9716,9717,9718,9719,9720,9721,9722,9723,9724,9725,9726,9727,9728,9729,9730,9731, # 9536 9732,9733,9734,9735,4912,9736,9737,9738,9739,9740,4913,9741,9742,9743,9744,9745, # 9552 9746,9747,9748,9749,9750,9751,9752,9753,9754,9755,9756,9757,9758,4914,9759,9760, # 9568 9761,9762,9763,9764,9765,9766,9767,9768,9769,9770,9771,9772,9773,9774,9775,9776, # 9584 9777,9778,9779,9780,9781,9782,4915,9783,9784,9785,9786,9787,9788,9789,9790,9791, # 9600 9792,9793,4916,9794,9795,9796,9797,9798,9799,9800,9801,9802,9803,9804,9805,9806, # 9616 9807,9808,9809,9810,9811,9812,9813,9814,9815,9816,9817,9818,9819,9820,9821,9822, # 9632 9823,9824,9825,9826,9827,9828,9829,9830,9831,9832,9833,9834,9835,9836,9837,9838, # 9648 9839,9840,9841,9842,9843,9844,9845,9846,9847,9848,9849,9850,9851,9852,9853,9854, # 9664 9855,9856,9857,9858,9859,9860,9861,9862,9863,9864,9865,9866,9867,9868,4917,9869, # 9680 9870,9871,9872,9873,9874,9875,9876,9877,9878,9879,9880,9881,9882,9883,9884,9885, # 9696 9886,9887,9888,9889,9890,9891,9892,4472,9893,9894,9895,9896,9897,3806,9898,9899, # 9712 9900,9901,9902,9903,9904,9905,9906,9907,9908,9909,9910,9911,9912,9913,9914,4918, # 9728 9915,9916,9917,4919,9918,9919,9920,9921,4184,9922,9923,9924,9925,9926,9927,9928, # 9744 9929,9930,9931,9932,9933,9934,9935,9936,9937,9938,9939,9940,9941,9942,9943,9944, # 9760 9945,9946,4920,9947,9948,9949,9950,9951,9952,9953,9954,9955,4185,9956,9957,9958, # 9776 9959,9960,9961,9962,9963,9964,9965,4921,9966,9967,9968,4473,9969,9970,9971,9972, # 9792 9973,9974,9975,9976,9977,4474,9978,9979,9980,9981,9982,9983,9984,9985,9986,9987, # 9808 9988,9989,9990,9991,9992,9993,9994,9995,9996,9997,9998,9999,10000,10001,10002,10003, # 9824 10004,10005,10006,10007,10008,10009,10010,10011,10012,10013,10014,10015,10016,10017,10018,10019, # 9840 10020,10021,4922,10022,4923,10023,10024,10025,10026,10027,10028,10029,10030,10031,10032,10033, # 9856 10034,10035,10036,10037,10038,10039,10040,10041,10042,10043,10044,10045,10046,10047,10048,4924, # 9872 10049,10050,10051,10052,10053,10054,10055,10056,10057,10058,10059,10060,10061,10062,10063,10064, # 9888 10065,10066,10067,10068,10069,10070,10071,10072,10073,10074,10075,10076,10077,10078,10079,10080, # 9904 10081,10082,10083,10084,10085,10086,10087,4475,10088,10089,10090,10091,10092,10093,10094,10095, # 9920 10096,10097,4476,10098,10099,10100,10101,10102,10103,10104,10105,10106,10107,10108,10109,10110, # 9936 10111,2174,10112,10113,10114,10115,10116,10117,10118,10119,10120,10121,10122,10123,10124,10125, # 9952 10126,10127,10128,10129,10130,10131,10132,10133,10134,10135,10136,10137,10138,10139,10140,3807, # 9968 4186,4925,10141,10142,10143,10144,10145,10146,10147,4477,4187,10148,10149,10150,10151,10152, # 9984 10153,4188,10154,10155,10156,10157,10158,10159,10160,10161,4926,10162,10163,10164,10165,10166, #10000 10167,10168,10169,10170,10171,10172,10173,10174,10175,10176,10177,10178,10179,10180,10181,10182, #10016 10183,10184,10185,10186,10187,10188,10189,10190,10191,10192,3203,10193,10194,10195,10196,10197, #10032 10198,10199,10200,4478,10201,10202,10203,10204,4479,10205,10206,10207,10208,10209,10210,10211, #10048 10212,10213,10214,10215,10216,10217,10218,10219,10220,10221,10222,10223,10224,10225,10226,10227, #10064 10228,10229,10230,10231,10232,10233,10234,4927,10235,10236,10237,10238,10239,10240,10241,10242, #10080 10243,10244,10245,10246,10247,10248,10249,10250,10251,10252,10253,10254,10255,10256,10257,10258, #10096 10259,10260,10261,10262,10263,10264,10265,10266,10267,10268,10269,10270,10271,10272,10273,4480, #10112 4928,4929,10274,10275,10276,10277,10278,10279,10280,10281,10282,10283,10284,10285,10286,10287, #10128 10288,10289,10290,10291,10292,10293,10294,10295,10296,10297,10298,10299,10300,10301,10302,10303, #10144 10304,10305,10306,10307,10308,10309,10310,10311,10312,10313,10314,10315,10316,10317,10318,10319, #10160 10320,10321,10322,10323,10324,10325,10326,10327,10328,10329,10330,10331,10332,10333,10334,4930, #10176 10335,10336,10337,10338,10339,10340,10341,10342,4931,10343,10344,10345,10346,10347,10348,10349, #10192 10350,10351,10352,10353,10354,10355,3088,10356,2786,10357,10358,10359,10360,4189,10361,10362, #10208 10363,10364,10365,10366,10367,10368,10369,10370,10371,10372,10373,10374,10375,4932,10376,10377, #10224 10378,10379,10380,10381,10382,10383,10384,10385,10386,10387,10388,10389,10390,10391,10392,4933, #10240 10393,10394,10395,4934,10396,10397,10398,10399,10400,10401,10402,10403,10404,10405,10406,10407, #10256 10408,10409,10410,10411,10412,3446,10413,10414,10415,10416,10417,10418,10419,10420,10421,10422, #10272 10423,4935,10424,10425,10426,10427,10428,10429,10430,4936,10431,10432,10433,10434,10435,10436, #10288 10437,10438,10439,10440,10441,10442,10443,4937,10444,10445,10446,10447,4481,10448,10449,10450, #10304 10451,10452,10453,10454,10455,10456,10457,10458,10459,10460,10461,10462,10463,10464,10465,10466, #10320 10467,10468,10469,10470,10471,10472,10473,10474,10475,10476,10477,10478,10479,10480,10481,10482, #10336 10483,10484,10485,10486,10487,10488,10489,10490,10491,10492,10493,10494,10495,10496,10497,10498, #10352 10499,10500,10501,10502,10503,10504,10505,4938,10506,10507,10508,10509,10510,2552,10511,10512, #10368 10513,10514,10515,10516,3447,10517,10518,10519,10520,10521,10522,10523,10524,10525,10526,10527, #10384 10528,10529,10530,10531,10532,10533,10534,10535,10536,10537,10538,10539,10540,10541,10542,10543, #10400 4482,10544,4939,10545,10546,10547,10548,10549,10550,10551,10552,10553,10554,10555,10556,10557, #10416 10558,10559,10560,10561,10562,10563,10564,10565,10566,10567,3676,4483,10568,10569,10570,10571, #10432 10572,3448,10573,10574,10575,10576,10577,10578,10579,10580,10581,10582,10583,10584,10585,10586, #10448 10587,10588,10589,10590,10591,10592,10593,10594,10595,10596,10597,10598,10599,10600,10601,10602, #10464 10603,10604,10605,10606,10607,10608,10609,10610,10611,10612,10613,10614,10615,10616,10617,10618, #10480 10619,10620,10621,10622,10623,10624,10625,10626,10627,4484,10628,10629,10630,10631,10632,4940, #10496 10633,10634,10635,10636,10637,10638,10639,10640,10641,10642,10643,10644,10645,10646,10647,10648, #10512 10649,10650,10651,10652,10653,10654,10655,10656,4941,10657,10658,10659,2599,10660,10661,10662, #10528 10663,10664,10665,10666,3089,10667,10668,10669,10670,10671,10672,10673,10674,10675,10676,10677, #10544 10678,10679,10680,4942,10681,10682,10683,10684,10685,10686,10687,10688,10689,10690,10691,10692, #10560 10693,10694,10695,10696,10697,4485,10698,10699,10700,10701,10702,10703,10704,4943,10705,3677, #10576 10706,10707,10708,10709,10710,10711,10712,4944,10713,10714,10715,10716,10717,10718,10719,10720, #10592 10721,10722,10723,10724,10725,10726,10727,10728,4945,10729,10730,10731,10732,10733,10734,10735, #10608 10736,10737,10738,10739,10740,10741,10742,10743,10744,10745,10746,10747,10748,10749,10750,10751, #10624 10752,10753,10754,10755,10756,10757,10758,10759,10760,10761,4946,10762,10763,10764,10765,10766, #10640 10767,4947,4948,10768,10769,10770,10771,10772,10773,10774,10775,10776,10777,10778,10779,10780, #10656 10781,10782,10783,10784,10785,10786,10787,10788,10789,10790,10791,10792,10793,10794,10795,10796, #10672 10797,10798,10799,10800,10801,10802,10803,10804,10805,10806,10807,10808,10809,10810,10811,10812, #10688 10813,10814,10815,10816,10817,10818,10819,10820,10821,10822,10823,10824,10825,10826,10827,10828, #10704 10829,10830,10831,10832,10833,10834,10835,10836,10837,10838,10839,10840,10841,10842,10843,10844, #10720 10845,10846,10847,10848,10849,10850,10851,10852,10853,10854,10855,10856,10857,10858,10859,10860, #10736 10861,10862,10863,10864,10865,10866,10867,10868,10869,10870,10871,10872,10873,10874,10875,10876, #10752 10877,10878,4486,10879,10880,10881,10882,10883,10884,10885,4949,10886,10887,10888,10889,10890, #10768 10891,10892,10893,10894,10895,10896,10897,10898,10899,10900,10901,10902,10903,10904,10905,10906, #10784 10907,10908,10909,10910,10911,10912,10913,10914,10915,10916,10917,10918,10919,4487,10920,10921, #10800 10922,10923,10924,10925,10926,10927,10928,10929,10930,10931,10932,4950,10933,10934,10935,10936, #10816 10937,10938,10939,10940,10941,10942,10943,10944,10945,10946,10947,10948,10949,4488,10950,10951, #10832 10952,10953,10954,10955,10956,10957,10958,10959,4190,10960,10961,10962,10963,10964,10965,10966, #10848 10967,10968,10969,10970,10971,10972,10973,10974,10975,10976,10977,10978,10979,10980,10981,10982, #10864 10983,10984,10985,10986,10987,10988,10989,10990,10991,10992,10993,10994,10995,10996,10997,10998, #10880 10999,11000,11001,11002,11003,11004,11005,11006,3960,11007,11008,11009,11010,11011,11012,11013, #10896 11014,11015,11016,11017,11018,11019,11020,11021,11022,11023,11024,11025,11026,11027,11028,11029, #10912 11030,11031,11032,4951,11033,11034,11035,11036,11037,11038,11039,11040,11041,11042,11043,11044, #10928 11045,11046,11047,4489,11048,11049,11050,11051,4952,11052,11053,11054,11055,11056,11057,11058, #10944 4953,11059,11060,11061,11062,11063,11064,11065,11066,11067,11068,11069,11070,11071,4954,11072, #10960 11073,11074,11075,11076,11077,11078,11079,11080,11081,11082,11083,11084,11085,11086,11087,11088, #10976 11089,11090,11091,11092,11093,11094,11095,11096,11097,11098,11099,11100,11101,11102,11103,11104, #10992 11105,11106,11107,11108,11109,11110,11111,11112,11113,11114,11115,3808,11116,11117,11118,11119, #11008 11120,11121,11122,11123,11124,11125,11126,11127,11128,11129,11130,11131,11132,11133,11134,4955, #11024 11135,11136,11137,11138,11139,11140,11141,11142,11143,11144,11145,11146,11147,11148,11149,11150, #11040 11151,11152,11153,11154,11155,11156,11157,11158,11159,11160,11161,4956,11162,11163,11164,11165, #11056 11166,11167,11168,11169,11170,11171,11172,11173,11174,11175,11176,11177,11178,11179,11180,4957, #11072 11181,11182,11183,11184,11185,11186,4958,11187,11188,11189,11190,11191,11192,11193,11194,11195, #11088 11196,11197,11198,11199,11200,3678,11201,11202,11203,11204,11205,11206,4191,11207,11208,11209, #11104 11210,11211,11212,11213,11214,11215,11216,11217,11218,11219,11220,11221,11222,11223,11224,11225, #11120 11226,11227,11228,11229,11230,11231,11232,11233,11234,11235,11236,11237,11238,11239,11240,11241, #11136 11242,11243,11244,11245,11246,11247,11248,11249,11250,11251,4959,11252,11253,11254,11255,11256, #11152 11257,11258,11259,11260,11261,11262,11263,11264,11265,11266,11267,11268,11269,11270,11271,11272, #11168 11273,11274,11275,11276,11277,11278,11279,11280,11281,11282,11283,11284,11285,11286,11287,11288, #11184 11289,11290,11291,11292,11293,11294,11295,11296,11297,11298,11299,11300,11301,11302,11303,11304, #11200 11305,11306,11307,11308,11309,11310,11311,11312,11313,11314,3679,11315,11316,11317,11318,4490, #11216 11319,11320,11321,11322,11323,11324,11325,11326,11327,11328,11329,11330,11331,11332,11333,11334, #11232 11335,11336,11337,11338,11339,11340,11341,11342,11343,11344,11345,11346,11347,4960,11348,11349, #11248 11350,11351,11352,11353,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365, #11264 11366,11367,11368,11369,11370,11371,11372,11373,11374,11375,11376,11377,3961,4961,11378,11379, #11280 11380,11381,11382,11383,11384,11385,11386,11387,11388,11389,11390,11391,11392,11393,11394,11395, #11296 11396,11397,4192,11398,11399,11400,11401,11402,11403,11404,11405,11406,11407,11408,11409,11410, #11312 11411,4962,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11424,11425, #11328 11426,11427,11428,11429,11430,11431,11432,11433,11434,11435,11436,11437,11438,11439,11440,11441, #11344 11442,11443,11444,11445,11446,11447,11448,11449,11450,11451,11452,11453,11454,11455,11456,11457, #11360 11458,11459,11460,11461,11462,11463,11464,11465,11466,11467,11468,11469,4963,11470,11471,4491, #11376 11472,11473,11474,11475,4964,11476,11477,11478,11479,11480,11481,11482,11483,11484,11485,11486, #11392 11487,11488,11489,11490,11491,11492,4965,11493,11494,11495,11496,11497,11498,11499,11500,11501, #11408 11502,11503,11504,11505,11506,11507,11508,11509,11510,11511,11512,11513,11514,11515,11516,11517, #11424 11518,11519,11520,11521,11522,11523,11524,11525,11526,11527,11528,11529,3962,11530,11531,11532, #11440 11533,11534,11535,11536,11537,11538,11539,11540,11541,11542,11543,11544,11545,11546,11547,11548, #11456 11549,11550,11551,11552,11553,11554,11555,11556,11557,11558,11559,11560,11561,11562,11563,11564, #11472 4193,4194,11565,11566,11567,11568,11569,11570,11571,11572,11573,11574,11575,11576,11577,11578, #11488 11579,11580,11581,11582,11583,11584,11585,11586,11587,11588,11589,11590,11591,4966,4195,11592, #11504 11593,11594,11595,11596,11597,11598,11599,11600,11601,11602,11603,11604,3090,11605,11606,11607, #11520 11608,11609,11610,4967,11611,11612,11613,11614,11615,11616,11617,11618,11619,11620,11621,11622, #11536 11623,11624,11625,11626,11627,11628,11629,11630,11631,11632,11633,11634,11635,11636,11637,11638, #11552 11639,11640,11641,11642,11643,11644,11645,11646,11647,11648,11649,11650,11651,11652,11653,11654, #11568 11655,11656,11657,11658,11659,11660,11661,11662,11663,11664,11665,11666,11667,11668,11669,11670, #11584 11671,11672,11673,11674,4968,11675,11676,11677,11678,11679,11680,11681,11682,11683,11684,11685, #11600 11686,11687,11688,11689,11690,11691,11692,11693,3809,11694,11695,11696,11697,11698,11699,11700, #11616 11701,11702,11703,11704,11705,11706,11707,11708,11709,11710,11711,11712,11713,11714,11715,11716, #11632 11717,11718,3553,11719,11720,11721,11722,11723,11724,11725,11726,11727,11728,11729,11730,4969, #11648 11731,11732,11733,11734,11735,11736,11737,11738,11739,11740,4492,11741,11742,11743,11744,11745, #11664 11746,11747,11748,11749,11750,11751,11752,4970,11753,11754,11755,11756,11757,11758,11759,11760, #11680 11761,11762,11763,11764,11765,11766,11767,11768,11769,11770,11771,11772,11773,11774,11775,11776, #11696 11777,11778,11779,11780,11781,11782,11783,11784,11785,11786,11787,11788,11789,11790,4971,11791, #11712 11792,11793,11794,11795,11796,11797,4972,11798,11799,11800,11801,11802,11803,11804,11805,11806, #11728 11807,11808,11809,11810,4973,11811,11812,11813,11814,11815,11816,11817,11818,11819,11820,11821, #11744 11822,11823,11824,11825,11826,11827,11828,11829,11830,11831,11832,11833,11834,3680,3810,11835, #11760 11836,4974,11837,11838,11839,11840,11841,11842,11843,11844,11845,11846,11847,11848,11849,11850, #11776 11851,11852,11853,11854,11855,11856,11857,11858,11859,11860,11861,11862,11863,11864,11865,11866, #11792 11867,11868,11869,11870,11871,11872,11873,11874,11875,11876,11877,11878,11879,11880,11881,11882, #11808 11883,11884,4493,11885,11886,11887,11888,11889,11890,11891,11892,11893,11894,11895,11896,11897, #11824 11898,11899,11900,11901,11902,11903,11904,11905,11906,11907,11908,11909,11910,11911,11912,11913, #11840 11914,11915,4975,11916,11917,11918,11919,11920,11921,11922,11923,11924,11925,11926,11927,11928, #11856 11929,11930,11931,11932,11933,11934,11935,11936,11937,11938,11939,11940,11941,11942,11943,11944, #11872 11945,11946,11947,11948,11949,4976,11950,11951,11952,11953,11954,11955,11956,11957,11958,11959, #11888 11960,11961,11962,11963,11964,11965,11966,11967,11968,11969,11970,11971,11972,11973,11974,11975, #11904 11976,11977,11978,11979,11980,11981,11982,11983,11984,11985,11986,11987,4196,11988,11989,11990, #11920 11991,11992,4977,11993,11994,11995,11996,11997,11998,11999,12000,12001,12002,12003,12004,12005, #11936 12006,12007,12008,12009,12010,12011,12012,12013,12014,12015,12016,12017,12018,12019,12020,12021, #11952 12022,12023,12024,12025,12026,12027,12028,12029,12030,12031,12032,12033,12034,12035,12036,12037, #11968 12038,12039,12040,12041,12042,12043,12044,12045,12046,12047,12048,12049,12050,12051,12052,12053, #11984 12054,12055,12056,12057,12058,12059,12060,12061,4978,12062,12063,12064,12065,12066,12067,12068, #12000 12069,12070,12071,12072,12073,12074,12075,12076,12077,12078,12079,12080,12081,12082,12083,12084, #12016 12085,12086,12087,12088,12089,12090,12091,12092,12093,12094,12095,12096,12097,12098,12099,12100, #12032 12101,12102,12103,12104,12105,12106,12107,12108,12109,12110,12111,12112,12113,12114,12115,12116, #12048 12117,12118,12119,12120,12121,12122,12123,4979,12124,12125,12126,12127,12128,4197,12129,12130, #12064 12131,12132,12133,12134,12135,12136,12137,12138,12139,12140,12141,12142,12143,12144,12145,12146, #12080 12147,12148,12149,12150,12151,12152,12153,12154,4980,12155,12156,12157,12158,12159,12160,4494, #12096 12161,12162,12163,12164,3811,12165,12166,12167,12168,12169,4495,12170,12171,4496,12172,12173, #12112 12174,12175,12176,3812,12177,12178,12179,12180,12181,12182,12183,12184,12185,12186,12187,12188, #12128 12189,12190,12191,12192,12193,12194,12195,12196,12197,12198,12199,12200,12201,12202,12203,12204, #12144 12205,12206,12207,12208,12209,12210,12211,12212,12213,12214,12215,12216,12217,12218,12219,12220, #12160 12221,4981,12222,12223,12224,12225,12226,12227,12228,12229,12230,12231,12232,12233,12234,12235, #12176 4982,12236,12237,12238,12239,12240,12241,12242,12243,12244,12245,4983,12246,12247,12248,12249, #12192 4984,12250,12251,12252,12253,12254,12255,12256,12257,12258,12259,12260,12261,12262,12263,12264, #12208 4985,12265,4497,12266,12267,12268,12269,12270,12271,12272,12273,12274,12275,12276,12277,12278, #12224 12279,12280,12281,12282,12283,12284,12285,12286,12287,4986,12288,12289,12290,12291,12292,12293, #12240 12294,12295,12296,2473,12297,12298,12299,12300,12301,12302,12303,12304,12305,12306,12307,12308, #12256 12309,12310,12311,12312,12313,12314,12315,12316,12317,12318,12319,3963,12320,12321,12322,12323, #12272 12324,12325,12326,12327,12328,12329,12330,12331,12332,4987,12333,12334,12335,12336,12337,12338, #12288 12339,12340,12341,12342,12343,12344,12345,12346,12347,12348,12349,12350,12351,12352,12353,12354, #12304 12355,12356,12357,12358,12359,3964,12360,12361,12362,12363,12364,12365,12366,12367,12368,12369, #12320 12370,3965,12371,12372,12373,12374,12375,12376,12377,12378,12379,12380,12381,12382,12383,12384, #12336 12385,12386,12387,12388,12389,12390,12391,12392,12393,12394,12395,12396,12397,12398,12399,12400, #12352 12401,12402,12403,12404,12405,12406,12407,12408,4988,12409,12410,12411,12412,12413,12414,12415, #12368 12416,12417,12418,12419,12420,12421,12422,12423,12424,12425,12426,12427,12428,12429,12430,12431, #12384 12432,12433,12434,12435,12436,12437,12438,3554,12439,12440,12441,12442,12443,12444,12445,12446, #12400 12447,12448,12449,12450,12451,12452,12453,12454,12455,12456,12457,12458,12459,12460,12461,12462, #12416 12463,12464,4989,12465,12466,12467,12468,12469,12470,12471,12472,12473,12474,12475,12476,12477, #12432 12478,12479,12480,4990,12481,12482,12483,12484,12485,12486,12487,12488,12489,4498,12490,12491, #12448 12492,12493,12494,12495,12496,12497,12498,12499,12500,12501,12502,12503,12504,12505,12506,12507, #12464 12508,12509,12510,12511,12512,12513,12514,12515,12516,12517,12518,12519,12520,12521,12522,12523, #12480 12524,12525,12526,12527,12528,12529,12530,12531,12532,12533,12534,12535,12536,12537,12538,12539, #12496 12540,12541,12542,12543,12544,12545,12546,12547,12548,12549,12550,12551,4991,12552,12553,12554, #12512 12555,12556,12557,12558,12559,12560,12561,12562,12563,12564,12565,12566,12567,12568,12569,12570, #12528 12571,12572,12573,12574,12575,12576,12577,12578,3036,12579,12580,12581,12582,12583,3966,12584, #12544 12585,12586,12587,12588,12589,12590,12591,12592,12593,12594,12595,12596,12597,12598,12599,12600, #12560 12601,12602,12603,12604,12605,12606,12607,12608,12609,12610,12611,12612,12613,12614,12615,12616, #12576 12617,12618,12619,12620,12621,12622,12623,12624,12625,12626,12627,12628,12629,12630,12631,12632, #12592 12633,12634,12635,12636,12637,12638,12639,12640,12641,12642,12643,12644,12645,12646,4499,12647, #12608 12648,12649,12650,12651,12652,12653,12654,12655,12656,12657,12658,12659,12660,12661,12662,12663, #12624 12664,12665,12666,12667,12668,12669,12670,12671,12672,12673,12674,12675,12676,12677,12678,12679, #12640 12680,12681,12682,12683,12684,12685,12686,12687,12688,12689,12690,12691,12692,12693,12694,12695, #12656 12696,12697,12698,4992,12699,12700,12701,12702,12703,12704,12705,12706,12707,12708,12709,12710, #12672 12711,12712,12713,12714,12715,12716,12717,12718,12719,12720,12721,12722,12723,12724,12725,12726, #12688 12727,12728,12729,12730,12731,12732,12733,12734,12735,12736,12737,12738,12739,12740,12741,12742, #12704 12743,12744,12745,12746,12747,12748,12749,12750,12751,12752,12753,12754,12755,12756,12757,12758, #12720 12759,12760,12761,12762,12763,12764,12765,12766,12767,12768,12769,12770,12771,12772,12773,12774, #12736 12775,12776,12777,12778,4993,2175,12779,12780,12781,12782,12783,12784,12785,12786,4500,12787, #12752 12788,12789,12790,12791,12792,12793,12794,12795,12796,12797,12798,12799,12800,12801,12802,12803, #12768 12804,12805,12806,12807,12808,12809,12810,12811,12812,12813,12814,12815,12816,12817,12818,12819, #12784 12820,12821,12822,12823,12824,12825,12826,4198,3967,12827,12828,12829,12830,12831,12832,12833, #12800 12834,12835,12836,12837,12838,12839,12840,12841,12842,12843,12844,12845,12846,12847,12848,12849, #12816 12850,12851,12852,12853,12854,12855,12856,12857,12858,12859,12860,12861,4199,12862,12863,12864, #12832 12865,12866,12867,12868,12869,12870,12871,12872,12873,12874,12875,12876,12877,12878,12879,12880, #12848 12881,12882,12883,12884,12885,12886,12887,4501,12888,12889,12890,12891,12892,12893,12894,12895, #12864 12896,12897,12898,12899,12900,12901,12902,12903,12904,12905,12906,12907,12908,12909,12910,12911, #12880 12912,4994,12913,12914,12915,12916,12917,12918,12919,12920,12921,12922,12923,12924,12925,12926, #12896 12927,12928,12929,12930,12931,12932,12933,12934,12935,12936,12937,12938,12939,12940,12941,12942, #12912 12943,12944,12945,12946,12947,12948,12949,12950,12951,12952,12953,12954,12955,12956,1772,12957, #12928 12958,12959,12960,12961,12962,12963,12964,12965,12966,12967,12968,12969,12970,12971,12972,12973, #12944 12974,12975,12976,12977,12978,12979,12980,12981,12982,12983,12984,12985,12986,12987,12988,12989, #12960 12990,12991,12992,12993,12994,12995,12996,12997,4502,12998,4503,12999,13000,13001,13002,13003, #12976 4504,13004,13005,13006,13007,13008,13009,13010,13011,13012,13013,13014,13015,13016,13017,13018, #12992 13019,13020,13021,13022,13023,13024,13025,13026,13027,13028,13029,3449,13030,13031,13032,13033, #13008 13034,13035,13036,13037,13038,13039,13040,13041,13042,13043,13044,13045,13046,13047,13048,13049, #13024 13050,13051,13052,13053,13054,13055,13056,13057,13058,13059,13060,13061,13062,13063,13064,13065, #13040 13066,13067,13068,13069,13070,13071,13072,13073,13074,13075,13076,13077,13078,13079,13080,13081, #13056 13082,13083,13084,13085,13086,13087,13088,13089,13090,13091,13092,13093,13094,13095,13096,13097, #13072 13098,13099,13100,13101,13102,13103,13104,13105,13106,13107,13108,13109,13110,13111,13112,13113, #13088 13114,13115,13116,13117,13118,3968,13119,4995,13120,13121,13122,13123,13124,13125,13126,13127, #13104 4505,13128,13129,13130,13131,13132,13133,13134,4996,4506,13135,13136,13137,13138,13139,4997, #13120 13140,13141,13142,13143,13144,13145,13146,13147,13148,13149,13150,13151,13152,13153,13154,13155, #13136 13156,13157,13158,13159,4998,13160,13161,13162,13163,13164,13165,13166,13167,13168,13169,13170, #13152 13171,13172,13173,13174,13175,13176,4999,13177,13178,13179,13180,13181,13182,13183,13184,13185, #13168 13186,13187,13188,13189,13190,13191,13192,13193,13194,13195,13196,13197,13198,13199,13200,13201, #13184 13202,13203,13204,13205,13206,5000,13207,13208,13209,13210,13211,13212,13213,13214,13215,13216, #13200 13217,13218,13219,13220,13221,13222,13223,13224,13225,13226,13227,4200,5001,13228,13229,13230, #13216 13231,13232,13233,13234,13235,13236,13237,13238,13239,13240,3969,13241,13242,13243,13244,3970, #13232 13245,13246,13247,13248,13249,13250,13251,13252,13253,13254,13255,13256,13257,13258,13259,13260, #13248 13261,13262,13263,13264,13265,13266,13267,13268,3450,13269,13270,13271,13272,13273,13274,13275, #13264 13276,5002,13277,13278,13279,13280,13281,13282,13283,13284,13285,13286,13287,13288,13289,13290, #13280 13291,13292,13293,13294,13295,13296,13297,13298,13299,13300,13301,13302,3813,13303,13304,13305, #13296 13306,13307,13308,13309,13310,13311,13312,13313,13314,13315,13316,13317,13318,13319,13320,13321, #13312 13322,13323,13324,13325,13326,13327,13328,4507,13329,13330,13331,13332,13333,13334,13335,13336, #13328 13337,13338,13339,13340,13341,5003,13342,13343,13344,13345,13346,13347,13348,13349,13350,13351, #13344 13352,13353,13354,13355,13356,13357,13358,13359,13360,13361,13362,13363,13364,13365,13366,13367, #13360 5004,13368,13369,13370,13371,13372,13373,13374,13375,13376,13377,13378,13379,13380,13381,13382, #13376 13383,13384,13385,13386,13387,13388,13389,13390,13391,13392,13393,13394,13395,13396,13397,13398, #13392 13399,13400,13401,13402,13403,13404,13405,13406,13407,13408,13409,13410,13411,13412,13413,13414, #13408 13415,13416,13417,13418,13419,13420,13421,13422,13423,13424,13425,13426,13427,13428,13429,13430, #13424 13431,13432,4508,13433,13434,13435,4201,13436,13437,13438,13439,13440,13441,13442,13443,13444, #13440 13445,13446,13447,13448,13449,13450,13451,13452,13453,13454,13455,13456,13457,5005,13458,13459, #13456 13460,13461,13462,13463,13464,13465,13466,13467,13468,13469,13470,4509,13471,13472,13473,13474, #13472 13475,13476,13477,13478,13479,13480,13481,13482,13483,13484,13485,13486,13487,13488,13489,13490, #13488 13491,13492,13493,13494,13495,13496,13497,13498,13499,13500,13501,13502,13503,13504,13505,13506, #13504 13507,13508,13509,13510,13511,13512,13513,13514,13515,13516,13517,13518,13519,13520,13521,13522, #13520 13523,13524,13525,13526,13527,13528,13529,13530,13531,13532,13533,13534,13535,13536,13537,13538, #13536 13539,13540,13541,13542,13543,13544,13545,13546,13547,13548,13549,13550,13551,13552,13553,13554, #13552 13555,13556,13557,13558,13559,13560,13561,13562,13563,13564,13565,13566,13567,13568,13569,13570, #13568 13571,13572,13573,13574,13575,13576,13577,13578,13579,13580,13581,13582,13583,13584,13585,13586, #13584 13587,13588,13589,13590,13591,13592,13593,13594,13595,13596,13597,13598,13599,13600,13601,13602, #13600 13603,13604,13605,13606,13607,13608,13609,13610,13611,13612,13613,13614,13615,13616,13617,13618, #13616 13619,13620,13621,13622,13623,13624,13625,13626,13627,13628,13629,13630,13631,13632,13633,13634, #13632 13635,13636,13637,13638,13639,13640,13641,13642,5006,13643,13644,13645,13646,13647,13648,13649, #13648 13650,13651,5007,13652,13653,13654,13655,13656,13657,13658,13659,13660,13661,13662,13663,13664, #13664 13665,13666,13667,13668,13669,13670,13671,13672,13673,13674,13675,13676,13677,13678,13679,13680, #13680 13681,13682,13683,13684,13685,13686,13687,13688,13689,13690,13691,13692,13693,13694,13695,13696, #13696 13697,13698,13699,13700,13701,13702,13703,13704,13705,13706,13707,13708,13709,13710,13711,13712, #13712 13713,13714,13715,13716,13717,13718,13719,13720,13721,13722,13723,13724,13725,13726,13727,13728, #13728 13729,13730,13731,13732,13733,13734,13735,13736,13737,13738,13739,13740,13741,13742,13743,13744, #13744 13745,13746,13747,13748,13749,13750,13751,13752,13753,13754,13755,13756,13757,13758,13759,13760, #13760 13761,13762,13763,13764,13765,13766,13767,13768,13769,13770,13771,13772,13773,13774,3273,13775, #13776 13776,13777,13778,13779,13780,13781,13782,13783,13784,13785,13786,13787,13788,13789,13790,13791, #13792 13792,13793,13794,13795,13796,13797,13798,13799,13800,13801,13802,13803,13804,13805,13806,13807, #13808 13808,13809,13810,13811,13812,13813,13814,13815,13816,13817,13818,13819,13820,13821,13822,13823, #13824 13824,13825,13826,13827,13828,13829,13830,13831,13832,13833,13834,13835,13836,13837,13838,13839, #13840 13840,13841,13842,13843,13844,13845,13846,13847,13848,13849,13850,13851,13852,13853,13854,13855, #13856 13856,13857,13858,13859,13860,13861,13862,13863,13864,13865,13866,13867,13868,13869,13870,13871, #13872 13872,13873,13874,13875,13876,13877,13878,13879,13880,13881,13882,13883,13884,13885,13886,13887, #13888 13888,13889,13890,13891,13892,13893,13894,13895,13896,13897,13898,13899,13900,13901,13902,13903, #13904 13904,13905,13906,13907,13908,13909,13910,13911,13912,13913,13914,13915,13916,13917,13918,13919, #13920 13920,13921,13922,13923,13924,13925,13926,13927,13928,13929,13930,13931,13932,13933,13934,13935, #13936 13936,13937,13938,13939,13940,13941,13942,13943,13944,13945,13946,13947,13948,13949,13950,13951, #13952 13952,13953,13954,13955,13956,13957,13958,13959,13960,13961,13962,13963,13964,13965,13966,13967, #13968 13968,13969,13970,13971,13972) #13973 # flake8: noqa
mit
jjx02230808/project0223
sklearn/decomposition/tests/test_dict_learning.py
67
9084
import numpy as np from sklearn.utils import check_array from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_raises from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import TempMemmap from sklearn.decomposition import DictionaryLearning from sklearn.decomposition import MiniBatchDictionaryLearning from sklearn.decomposition import SparseCoder from sklearn.decomposition import dict_learning_online from sklearn.decomposition import sparse_encode rng_global = np.random.RandomState(0) n_samples, n_features = 10, 8 X = rng_global.randn(n_samples, n_features) def test_dict_learning_shapes(): n_components = 5 dico = DictionaryLearning(n_components, random_state=0).fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_overcomplete(): n_components = 12 dico = DictionaryLearning(n_components, random_state=0).fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_reconstruction(): n_components = 12 dico = DictionaryLearning(n_components, transform_algorithm='omp', transform_alpha=0.001, random_state=0) code = dico.fit(X).transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X) dico.set_params(transform_algorithm='lasso_lars') code = dico.transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2) # used to test lars here too, but there's no guarantee the number of # nonzero atoms is right. def test_dict_learning_reconstruction_parallel(): # regression test that parallel reconstruction works with n_jobs=-1 n_components = 12 dico = DictionaryLearning(n_components, transform_algorithm='omp', transform_alpha=0.001, random_state=0, n_jobs=-1) code = dico.fit(X).transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X) dico.set_params(transform_algorithm='lasso_lars') code = dico.transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2) def test_dict_learning_lassocd_readonly_data(): n_components = 12 with TempMemmap(X) as X_read_only: dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd', transform_alpha=0.001, random_state=0, n_jobs=-1) code = dico.fit(X_read_only).transform(X_read_only) assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2) def test_dict_learning_nonzero_coefs(): n_components = 4 dico = DictionaryLearning(n_components, transform_algorithm='lars', transform_n_nonzero_coefs=3, random_state=0) code = dico.fit(X).transform(X[np.newaxis, 1]) assert_true(len(np.flatnonzero(code)) == 3) dico.set_params(transform_algorithm='omp') code = dico.transform(X[np.newaxis, 1]) assert_equal(len(np.flatnonzero(code)), 3) def test_dict_learning_unknown_fit_algorithm(): n_components = 5 dico = DictionaryLearning(n_components, fit_algorithm='<unknown>') assert_raises(ValueError, dico.fit, X) def test_dict_learning_split(): n_components = 5 dico = DictionaryLearning(n_components, transform_algorithm='threshold', random_state=0) code = dico.fit(X).transform(X) dico.split_sign = True split_code = dico.transform(X) assert_array_equal(split_code[:, :n_components] - split_code[:, n_components:], code) def test_dict_learning_online_shapes(): rng = np.random.RandomState(0) n_components = 8 code, dictionary = dict_learning_online(X, n_components=n_components, alpha=1, random_state=rng) assert_equal(code.shape, (n_samples, n_components)) assert_equal(dictionary.shape, (n_components, n_features)) assert_equal(np.dot(code, dictionary).shape, X.shape) def test_dict_learning_online_verbosity(): n_components = 5 # test verbosity from sklearn.externals.six.moves import cStringIO as StringIO import sys old_stdout = sys.stdout try: sys.stdout = StringIO() dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1, random_state=0) dico.fit(X) dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2, random_state=0) dico.fit(X) dict_learning_online(X, n_components=n_components, alpha=1, verbose=1, random_state=0) dict_learning_online(X, n_components=n_components, alpha=1, verbose=2, random_state=0) finally: sys.stdout = old_stdout assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_estimator_shapes(): n_components = 5 dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0) dico.fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_overcomplete(): n_components = 12 dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0).fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_initialization(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) dico = MiniBatchDictionaryLearning(n_components, n_iter=0, dict_init=V, random_state=0).fit(X) assert_array_equal(dico.components_, V) def test_dict_learning_online_partial_fit(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X), batch_size=1, alpha=1, shuffle=False, dict_init=V, random_state=0).fit(X) dict2 = MiniBatchDictionaryLearning(n_components, alpha=1, n_iter=1, dict_init=V, random_state=0) for i in range(10): for sample in X: dict2.partial_fit(sample[np.newaxis, :]) assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) == 0)) assert_array_almost_equal(dict1.components_, dict2.components_, decimal=2) def test_sparse_encode_shapes(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'): code = sparse_encode(X, V, algorithm=algo) assert_equal(code.shape, (n_samples, n_components)) def test_sparse_encode_input(): n_components = 100 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] Xf = check_array(X, order='F') for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'): a = sparse_encode(X, V, algorithm=algo) b = sparse_encode(Xf, V, algorithm=algo) assert_array_almost_equal(a, b) def test_sparse_encode_error(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] code = sparse_encode(X, V, alpha=0.001) assert_true(not np.all(code == 0)) assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1) def test_sparse_encode_error_default_sparsity(): rng = np.random.RandomState(0) X = rng.randn(100, 64) D = rng.randn(2, 64) code = ignore_warnings(sparse_encode)(X, D, algorithm='omp', n_nonzero_coefs=None) assert_equal(code.shape, (100, 2)) def test_unknown_method(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>") def test_sparse_coder_estimator(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars', transform_alpha=0.001).transform(X) assert_true(not np.all(code == 0)) assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
bsd-3-clause
DataDog/integrations-core
ibm_db2/setup.py
1
1946
# (C) Datadog, Inc. 2019-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) from codecs import open # To use a consistent encoding from os import path from setuptools import setup HERE = path.dirname(path.abspath(__file__)) # Get version info ABOUT = {} with open(path.join(HERE, 'datadog_checks', 'ibm_db2', '__about__.py')) as f: exec(f.read(), ABOUT) # Get the long description from the README file with open(path.join(HERE, 'README.md'), encoding='utf-8') as f: long_description = f.read() def get_dependencies(): dep_file = path.join(HERE, 'requirements.in') if not path.isfile(dep_file): return [] with open(dep_file, encoding='utf-8') as f: return f.readlines() CHECKS_BASE_REQ = 'datadog-checks-base>=11.2.0' setup( name='datadog-ibm_db2', version=ABOUT['__version__'], description='The IBM Db2 check', long_description=long_description, long_description_content_type='text/markdown', keywords='datadog agent ibm_db2 check', # The project's main homepage. url='https://github.com/DataDog/integrations-core', # Author details author='Datadog', author_email='packages@datadoghq.com', # License license='BSD-3-Clause', # See https://pypi.org/classifiers classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Topic :: System :: Monitoring', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.8', ], # The package we're going to ship packages=['datadog_checks.ibm_db2'], # Run-time dependencies install_requires=[CHECKS_BASE_REQ], extras_require={'deps': get_dependencies()}, # Extra files to ship with the wheel package include_package_data=True, )
bsd-3-clause
tgroh/beam
sdks/python/apache_beam/runners/worker/opcounters_test.py
2
7606
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging import math import random import unittest from apache_beam import coders from apache_beam.runners.worker import opcounters from apache_beam.runners.worker import statesampler from apache_beam.runners.worker.opcounters import OperationCounters from apache_beam.transforms.window import GlobalWindows from apache_beam.utils import counters from apache_beam.utils.counters import CounterFactory # Classes to test that we can handle a variety of objects. # These have to be at top level so the pickler can find them. class OldClassThatDoesNotImplementLen: # pylint: disable=old-style-class def __init__(self): pass class ObjectThatDoesNotImplementLen(object): def __init__(self): pass class TransformIoCounterTest(unittest.TestCase): def test_basic_counters(self): counter_factory = CounterFactory() sampler = statesampler.StateSampler('stage1', counter_factory) sampler.start() with sampler.scoped_state('step1', 'stateA'): counter = opcounters.SideInputReadCounter(counter_factory, sampler, declaring_step='step1', input_index=1) with sampler.scoped_state('step2', 'stateB'): with counter: counter.add_bytes_read(10) counter.update_current_step() sampler.stop() sampler.commit_counters() actual_counter_names = set([c.name for c in counter_factory.get_counters()]) expected_counter_names = set([ # Counter names for STEP 1 counters.CounterName('read-sideinput-msecs', stage_name='stage1', step_name='step1', io_target=counters.side_input_id('step1', 1)), counters.CounterName('read-sideinput-byte-count', step_name='step1', io_target=counters.side_input_id('step1', 1)), # Counter names for STEP 2 counters.CounterName('read-sideinput-msecs', stage_name='stage1', step_name='step1', io_target=counters.side_input_id('step2', 1)), counters.CounterName('read-sideinput-byte-count', step_name='step1', io_target=counters.side_input_id('step2', 1)), ]) self.assertTrue(actual_counter_names.issuperset(expected_counter_names)) class OperationCountersTest(unittest.TestCase): def verify_counters(self, opcounts, expected_elements, expected_size=None): self.assertEqual(expected_elements, opcounts.element_counter.value()) if expected_size is not None: if math.isnan(expected_size): self.assertTrue(math.isnan(opcounts.mean_byte_counter.value())) else: self.assertEqual(expected_size, opcounts.mean_byte_counter.value()) def test_update_int(self): opcounts = OperationCounters(CounterFactory(), 'some-name', coders.PickleCoder(), 0) self.verify_counters(opcounts, 0) opcounts.update_from(GlobalWindows.windowed_value(1)) self.verify_counters(opcounts, 1) def test_update_str(self): coder = coders.PickleCoder() opcounts = OperationCounters(CounterFactory(), 'some-name', coder, 0) self.verify_counters(opcounts, 0, float('nan')) value = GlobalWindows.windowed_value('abcde') opcounts.update_from(value) estimated_size = coder.estimate_size(value) self.verify_counters(opcounts, 1, estimated_size) def test_update_old_object(self): coder = coders.PickleCoder() opcounts = OperationCounters(CounterFactory(), 'some-name', coder, 0) self.verify_counters(opcounts, 0, float('nan')) obj = OldClassThatDoesNotImplementLen() value = GlobalWindows.windowed_value(obj) opcounts.update_from(value) estimated_size = coder.estimate_size(value) self.verify_counters(opcounts, 1, estimated_size) def test_update_new_object(self): coder = coders.PickleCoder() opcounts = OperationCounters(CounterFactory(), 'some-name', coder, 0) self.verify_counters(opcounts, 0, float('nan')) obj = ObjectThatDoesNotImplementLen() value = GlobalWindows.windowed_value(obj) opcounts.update_from(value) estimated_size = coder.estimate_size(value) self.verify_counters(opcounts, 1, estimated_size) def test_update_multiple(self): coder = coders.PickleCoder() total_size = 0 opcounts = OperationCounters(CounterFactory(), 'some-name', coder, 0) self.verify_counters(opcounts, 0, float('nan')) value = GlobalWindows.windowed_value('abcde') opcounts.update_from(value) total_size += coder.estimate_size(value) value = GlobalWindows.windowed_value('defghij') opcounts.update_from(value) total_size += coder.estimate_size(value) self.verify_counters(opcounts, 2, float(total_size) / 2) value = GlobalWindows.windowed_value('klmnop') opcounts.update_from(value) total_size += coder.estimate_size(value) self.verify_counters(opcounts, 3, float(total_size) / 3) def test_should_sample(self): # Order of magnitude more buckets than highest constant in code under test. buckets = [0] * 300 # The seed is arbitrary and exists just to ensure this test is robust. # If you don't like this seed, try your own; the test should still pass. random.seed(1717) # Do enough runs that the expected hits even in the last buckets # is big enough to expect some statistical smoothing. total_runs = 10 * len(buckets) # Fill the buckets. for _ in range(total_runs): opcounts = OperationCounters(CounterFactory(), 'some-name', coders.PickleCoder(), 0) for i in range(len(buckets)): if opcounts.should_sample(): buckets[i] += 1 # Look at the buckets to see if they are likely. for i in range(10): self.assertEqual(total_runs, buckets[i]) for i in range(10, len(buckets)): self.assertTrue(buckets[i] > 7 * total_runs / i, 'i=%d, buckets[i]=%d, expected=%d, ratio=%f' % ( i, buckets[i], 10 * total_runs / i, buckets[i] / (10.0 * total_runs / i))) self.assertTrue(buckets[i] < 14 * total_runs / i, 'i=%d, buckets[i]=%d, expected=%d, ratio=%f' % ( i, buckets[i], 10 * total_runs / i, buckets[i] / (10.0 * total_runs / i))) if __name__ == '__main__': logging.getLogger().setLevel(logging.INFO) unittest.main()
apache-2.0
sebest/eve
eve/methods/get.py
8
18042
# -*- coding: utf-8 -*- """ eve.methods.get ~~~~~~~~~~~~~~~ This module implements the API 'GET' methods, supported by both the resources and single item endpoints. :copyright: (c) 2015 by Nicola Iarocci. :license: BSD, see LICENSE for more details. """ import math from flask import current_app as app, abort, request from .common import ratelimit, epoch, pre_event, resolve_embedded_fields, \ build_response_document, resource_link, document_link, last_updated from eve.auth import requires_auth from eve.utils import parse_request, home_link, querydef, config from eve.versioning import synthesize_versioned_document, versioned_id_field, \ get_old_document, diff_document @ratelimit() @requires_auth('resource') @pre_event def get(resource, **lookup): """ Retrieves the resource documents that match the current request. :param resource: the name of the resource. .. versionchanged:: 0.6 Support for HEADER_TOTAL_COUNT returned with response header. .. versionchanged:: 0.5 Support for customisable query parameters. .. versionchanged:: 0.4 Add pagination info whatever the HATEOAS status. 'on_fetched' events now return the whole response (HATEOAS metafields included.) Replaced ID_FIELD by item_lookup_field on self link. item_lookup_field will default to ID_FIELD if blank. Changed ``on_fetch_*`` changed to ``on_fetched_*``. .. versionchanged:: 0.3 Don't return 304 if resource is empty. Fixes #243. Support for media fields. When IF_MATCH is disabled, no etag is included in the payload. When If-Modified-Since header is present, either no documents (304) or all documents (200) are sent per the HTTP spec. Original behavior can be achieved with: /resource?where={"updated":{"$gt":"if-modified-since-date"}} .. versionchanged:: 0.2 Use the new ITEMS configuration setting. Raise 'on_pre_<method>' event. Let cursor add extra info to response. .. versionchanged:: 0.1.0 Support for optional HATEOAS. Support for embeddable documents. .. versionchanged:: 0.0.9 Event hooks renamed to be more robuts and consistent: 'on_getting' renamed to 'on_fetch'. .. versionchanged:: 0.0.8 'on_getting' and 'on_getting_<resource>' events are raised when documents have been read from the database and are about to be sent to the client. .. versionchanged:: 0.0.6 Support for HEAD requests. .. versionchanged:: 0.0.5 Support for user-restricted access to resources. Support for LAST_UPDATED field missing from documents, because they were created outside the API context. .. versionchanged:: 0.0.4 Added the ``requires_auth`` decorator. .. versionchanged:: 0.0.3 Superflous ``response`` container removed. Collection items wrapped with ``_items``. Links wrapped with ``_links``. Links are now properly JSON formatted. """ documents = [] response = {} etag = None req = parse_request(resource) embedded_fields = resolve_embedded_fields(resource, req) # continue processing the full request last_update = epoch() # If-Modified-Since disabled on collections (#334) req.if_modified_since = None cursor = app.data.find(resource, req, lookup) # If soft delete is enabled, data.find will not include items marked # deleted unless req.show_deleted is True for document in cursor: build_response_document(document, resource, embedded_fields) documents.append(document) # build last update for entire response if document[config.LAST_UPDATED] > last_update: last_update = document[config.LAST_UPDATED] status = 200 headers = [] last_modified = last_update if last_update > epoch() else None response[config.ITEMS] = documents count = cursor.count(with_limit_and_skip=False) headers.append((config.HEADER_TOTAL_COUNT, count)) if config.DOMAIN[resource]['hateoas']: response[config.LINKS] = _pagination_links(resource, req, count) # add pagination info if config.DOMAIN[resource]['pagination']: response[config.META] = _meta_links(req, count) # notify registered callback functions. Please note that, should the # functions modify the documents, the last_modified and etag won't be # updated to reflect the changes (they always reflect the documents # state on the database.) getattr(app, "on_fetched_resource")(resource, response) getattr(app, "on_fetched_resource_%s" % resource)(response) # the 'extra' cursor field, if present, will be added to the response. # Can be used by Eve extensions to add extra, custom data to any # response. if hasattr(cursor, 'extra'): getattr(cursor, 'extra')(response) return response, last_modified, etag, status, headers @ratelimit() @requires_auth('item') @pre_event def getitem(resource, **lookup): """ :param resource: the name of the resource to which the document belongs. :param **lookup: the lookup query. .. versionchanged:: 0.6 Handle soft deleted documents .. versionchanged:: 0.5 Allow ``?version=all`` requests to fire ``on_fetched_*`` events. Create pagination links for document versions. (#475) Pagination links reflect current query. (#464) .. versionchanged:: 0.4 HATOEAS link for contains the business unit value even when regexes have been configured for the resource endpoint. 'on_fetched' now returns the whole response (HATEOAS metafields included.) Support for document versioning. Changed ``on_fetch_*`` changed to ``on_fetched_*``. .. versionchanged:: 0.3 Support for media fields. When IF_MATCH is disabled, no etag is included in the payload. .. versionchanged:: 0.1.1 Support for Embeded Resource Serialization. .. versionchanged:: 0.1.0 Support for optional HATEOAS. .. versionchanged: 0.0.8 'on_getting_item' event is raised when a document has been read from the database and is about to be sent to the client. .. versionchanged:: 0.0.7 Support for Rate-Limiting. .. versionchanged:: 0.0.6 Support for HEAD requests. .. versionchanged:: 0.0.6 ETag added to payload. .. versionchanged:: 0.0.5 Support for user-restricted access to resources. Support for LAST_UPDATED field missing from documents, because they were created outside the API context. .. versionchanged:: 0.0.4 Added the ``requires_auth`` decorator. .. versionchanged:: 0.0.3 Superflous ``response`` container removed. Links wrapped with ``_links``. Links are now properly JSON formatted. """ req = parse_request(resource) resource_def = config.DOMAIN[resource] embedded_fields = resolve_embedded_fields(resource, req) soft_delete_enabled = config.DOMAIN[resource]['soft_delete'] if soft_delete_enabled: # GET requests should always fetch soft deleted documents from the db # They are handled and included in 404 responses below. req.show_deleted = True document = app.data.find_one(resource, req, **lookup) if not document: abort(404) response = {} etag = None version = request.args.get(config.VERSION_PARAM) latest_doc = None cursor = None # calculate last_modified before get_old_document rolls back the document, # allowing us to invalidate the cache when _latest_version changes last_modified = last_updated(document) # synthesize old document version(s) if resource_def['versioning'] is True: latest_doc = document document = get_old_document( resource, req, lookup, document, version) # meld into response document build_response_document(document, resource, embedded_fields, latest_doc) if config.IF_MATCH: etag = document[config.ETAG] # check embedded fields resolved in build_response_document() for more # recent last updated timestamps. We don't want to respond 304 if embedded # fields have changed for field in embedded_fields: embedded_document = document.get(field) if isinstance(embedded_document, dict): embedded_last_updated = last_updated(embedded_document) if embedded_last_updated > last_modified: last_modified = embedded_last_updated # facilitate client caching by returning a 304 when appropriate cache_validators = {True: 0, False: 0} if req.if_modified_since: cache_valid = (last_modified <= req.if_modified_since) cache_validators[cache_valid] += 1 if req.if_none_match: if (resource_def['versioning'] is False) or \ (document[app.config['VERSION']] == document[app.config['LATEST_VERSION']]): cache_valid = (etag == req.if_none_match) cache_validators[cache_valid] += 1 # If all cache validators are true, return 304 if (cache_validators[True] > 0) and (cache_validators[False] == 0): return {}, last_modified, etag, 304 if version == 'all' or version == 'diffs': # find all versions lookup[versioned_id_field()] = lookup[app.config['ID_FIELD']] del lookup[app.config['ID_FIELD']] if version == 'diffs' or req.sort is None: # default sort for 'all', required sort for 'diffs' req.sort = '[("%s", 1)]' % config.VERSION req.if_modified_since = None # we always want the full history here cursor = app.data.find(resource + config.VERSIONS, req, lookup) # build all versions documents = [] if cursor.count() == 0: # this is the scenario when the document existed before # document versioning got turned on documents.append(latest_doc) else: last_document = {} # if we aren't starting on page 1, then we need to init last_doc if version == 'diffs' and req.page > 1: # grab the last document on the previous page to diff from last_version = cursor[0][app.config['VERSION']] - 1 last_document = get_old_document( resource, req, lookup, latest_doc, last_version) for i, document in enumerate(cursor): document = synthesize_versioned_document( latest_doc, document, resource_def) build_response_document( document, resource, embedded_fields, latest_doc) if version == 'diffs': if i == 0: documents.append(document) else: documents.append(diff_document( resource_def, last_document, document)) last_document = document else: documents.append(document) # add documents to response if config.DOMAIN[resource]['hateoas']: response[config.ITEMS] = documents else: response = documents elif soft_delete_enabled and document.get(config.DELETED) is True: # This document was soft deleted. Respond with 404 and the deleted # version of the document. document[config.STATUS] = config.STATUS_ERR, document[config.ERROR] = { 'code': 404, 'message': 'The requested URL was not found on this server.' } return document, last_modified, etag, 404 else: response = document # extra hateoas links if config.DOMAIN[resource]['hateoas']: # use the id of the latest document for multi-document requests if cursor: count = cursor.count(with_limit_and_skip=False) response[config.LINKS] = \ _pagination_links(resource, req, count, latest_doc[config.ID_FIELD]) if config.DOMAIN[resource]['pagination']: response[config.META] = _meta_links(req, count) else: response[config.LINKS] = \ _pagination_links(resource, req, None, response[config.ID_FIELD]) # callbacks not supported on version diffs because of partial documents if version != 'diffs': # TODO: callbacks not currently supported with ?version=all # notify registered callback functions. Please note that, should # the functions modify the document, last_modified and etag # won't be updated to reflect the changes (they always reflect the # documents state on the database). if resource_def['versioning'] is True and version == 'all': versions = response if config.DOMAIN[resource]['hateoas']: versions = response[config.ITEMS] for version_item in versions: getattr(app, "on_fetched_item")(resource, version_item) getattr(app, "on_fetched_item_%s" % resource)(version_item) else: getattr(app, "on_fetched_item")(resource, response) getattr(app, "on_fetched_item_%s" % resource)(response) return response, last_modified, etag, 200 def _pagination_links(resource, req, documents_count, document_id=None): """ Returns the appropriate set of resource links depending on the current page and the total number of documents returned by the query. :param resource: the resource name. :param req: and instace of :class:`eve.utils.ParsedRequest`. :param document_count: the number of documents returned by the query. :param document_id: the document id (used for versions). Defaults to None. .. versionchanged:: 0.5 Create pagination links given a document ID to allow paginated versions pages (#475). Pagination links reflect current query. (#464) .. versionchanged:: 0.4 HATOEAS link for contains the business unit value even when regexes have been configured for the resource endpoint. .. versionchanged:: 0.0.8 Link to last page is provided if pagination is enabled (and the current page is not the last one). .. versionchanged:: 0.0.7 Support for Rate-Limiting. .. versionchanged:: 0.0.5 Support for optional pagination. .. versionchanged:: 0.0.3 JSON links """ version = None if config.DOMAIN[resource]['versioning'] is True: version = request.args.get(config.VERSION_PARAM) # construct the default links q = querydef(req.max_results, req.where, req.sort, version, req.page) resource_title = config.DOMAIN[resource]['resource_title'] _links = {'parent': home_link(), 'self': {'title': resource_title, 'href': resource_link()}} # change links if document ID is given if document_id: _links['self'] = document_link(resource, document_id) _links['collection'] = {'title': resource_title, 'href': '%s%s' % (resource_link(), q)} # make more specific links for versioned requests if version in ('all', 'diffs'): _links['parent'] = {'title': resource_title, 'href': resource_link()} _links['collection'] = document_link(resource, document_id) elif version: _links['parent'] = document_link(resource, document_id) _links['collection'] = {'title': resource_title, 'href': '%s?version=all' % _links['parent']['href']} # modify the self link to add query params or version number if documents_count: _links['self']['href'] = '%s%s' % (_links['self']['href'], q) elif not documents_count and version and version not in ('all', 'diffs'): _links['self'] = document_link(resource, document_id, version) # create pagination links if documents_count and config.DOMAIN[resource]['pagination']: # strip any queries from the self link if present _pagination_link = _links['self']['href'].split('?')[0] if req.page * req.max_results < documents_count: q = querydef(req.max_results, req.where, req.sort, version, req.page + 1) _links['next'] = {'title': 'next page', 'href': '%s%s' % (_pagination_link, q)} # in python 2.x dividing 2 ints produces an int and that's rounded # before the ceil call. Have to cast one value to float to get # a correct result. Wonder if 2 casts + ceil() call are actually # faster than documents_count // req.max_results and then adding # 1 if the modulo is non-zero... last_page = int(math.ceil(documents_count / float(req.max_results))) q = querydef(req.max_results, req.where, req.sort, version, last_page) _links['last'] = {'title': 'last page', 'href': '%s%s' % (_pagination_link, q)} if req.page > 1: q = querydef(req.max_results, req.where, req.sort, version, req.page - 1) _links['prev'] = {'title': 'previous page', 'href': '%s%s' % (_pagination_link, q)} return _links def _meta_links(req, count): """ Reterns the meta links for a paginated query. :param req: parsed request object. :param count: total number of documents in a query. .. versionadded:: 0.5 """ return { config.QUERY_PAGE: req.page, config.QUERY_MAX_RESULTS: req.max_results, 'total': count }
bsd-3-clause
epa/sqlalchemy
examples/inheritance/single.py
30
3298
"""Single-table inheritance example.""" from sqlalchemy import MetaData, Table, Column, Integer, String, \ ForeignKey, create_engine from sqlalchemy.orm import mapper, relationship, sessionmaker metadata = MetaData() # a table to store companies companies = Table('companies', metadata, Column('company_id', Integer, primary_key=True), Column('name', String(50))) employees_table = Table('employees', metadata, Column('employee_id', Integer, primary_key=True), Column('company_id', Integer, ForeignKey('companies.company_id')), Column('name', String(50)), Column('type', String(20)), Column('status', String(20)), Column('engineer_name', String(50)), Column('primary_language', String(50)), Column('manager_name', String(50)) ) class Person(object): def __init__(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) def __repr__(self): return "Ordinary person %s" % self.name class Engineer(Person): def __repr__(self): return "Engineer %s, status %s, engineer_name %s, "\ "primary_language %s" % \ (self.name, self.status, self.engineer_name, self.primary_language) class Manager(Person): def __repr__(self): return "Manager %s, status %s, manager_name %s" % \ (self.name, self.status, self.manager_name) class Company(object): def __init__(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) def __repr__(self): return "Company %s" % self.name person_mapper = mapper(Person, employees_table, polymorphic_on=employees_table.c.type, polymorphic_identity='person') manager_mapper = mapper(Manager, inherits=person_mapper, polymorphic_identity='manager') engineer_mapper = mapper(Engineer, inherits=person_mapper, polymorphic_identity='engineer') mapper(Company, companies, properties={ 'employees': relationship(Person, lazy=True, backref='company') }) engine = create_engine('sqlite:///', echo=True) metadata.create_all(engine) session = sessionmaker(engine)() c = Company(name='company1') c.employees.append(Manager(name='pointy haired boss', status='AAB', manager_name='manager1')) c.employees.append(Engineer(name='dilbert', status='BBA', engineer_name='engineer1', primary_language='java')) c.employees.append(Person(name='joesmith', status='HHH')) c.employees.append(Engineer(name='wally', status='CGG', engineer_name='engineer2', primary_language='python' )) c.employees.append(Manager(name='jsmith', status='ABA', manager_name='manager2')) session.add(c) session.commit() c = session.query(Company).get(1) for e in c.employees: print(e, e.company) print("\n") dilbert = session.query(Person).filter_by(name='dilbert').one() dilbert2 = session.query(Engineer).filter_by(name='dilbert').one() assert dilbert is dilbert2 dilbert.engineer_name = 'hes dibert!' session.flush() session.expunge_all() c = session.query(Company).get(1) for e in c.employees: print(e) session.delete(c) session.commit()
mit
ListFranz/tornado
tornado/__init__.py
75
1130
#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The Tornado web server and tools.""" from __future__ import absolute_import, division, print_function, with_statement # version is a human-readable version number. # version_info is a four-tuple for programmatic comparison. The first # three numbers are the components of the version number. The fourth # is zero for an official release, positive for a development branch, # or negative for a release candidate or beta (after the base version # number has been incremented) version = "4.3.dev1" version_info = (4, 3, 0, -100)
apache-2.0
zhihu/redis-shard
redis_shard/hashring.py
1
2982
#!/usr/bin/env python # -*- coding: utf-8 -*- """ consistent hashing for nosql client based on ezmobius client (redis-rb) and see this article http://amix.dk/blog/viewEntry/19367 """ import zlib import bisect from hashlib import md5, sha1 from ._compat import xrange, b, long hash_methods = { 'crc32': lambda x: zlib.crc32(x) & 0xffffffff, 'md5': lambda x: long(md5(x).hexdigest(), 16), 'sha1': lambda x: long(sha1(x).hexdigest(), 16), } class HashRing(object): """Consistent hash for nosql API""" def __init__(self, nodes=[], replicas=128, hash_method='crc32'): """Manages a hash ring. `nodes` is a list of objects that have a proper __str__ representation. `replicas` indicates how many virtual points should be used pr. node, replicas are required to improve the distribution. `hash_method` is the key generator method. """ self.hash_method = hash_methods[hash_method] self.nodes = [] self.replicas = replicas self.ring = {} self.sorted_keys = [] for n in nodes: self.add_node(n) def add_node(self, node): """Adds a `node` to the hash ring (including a number of replicas). """ self.nodes.append(node) for x in xrange(self.replicas): ring_key = self.hash_method(b("%s:%d" % (node, x))) self.ring[ring_key] = node self.sorted_keys.append(ring_key) self.sorted_keys.sort() def remove_node(self, node): """Removes `node` from the hash ring and its replicas. """ self.nodes.remove(node) for x in xrange(self.replicas): ring_key = self.hash_method(b("%s:%d" % (node, x))) self.ring.pop(ring_key) self.sorted_keys.remove(ring_key) def get_node(self, key): """Given a string key a corresponding node in the hash ring is returned. If the hash ring is empty, `None` is returned. """ n, i = self.get_node_pos(key) return n def get_node_pos(self, key): """Given a string key a corresponding node in the hash ring is returned along with it's position in the ring. If the hash ring is empty, (`None`, `None`) is returned. """ if len(self.ring) == 0: return [None, None] crc = self.hash_method(b(key)) idx = bisect.bisect(self.sorted_keys, crc) # prevents out of range index idx = min(idx, (self.replicas * len(self.nodes)) - 1) return [self.ring[self.sorted_keys[idx]], idx] def iter_nodes(self, key): """Given a string key it returns the nodes as a generator that can hold the key. """ if len(self.ring) == 0: yield None, None node, pos = self.get_node_pos(key) for k in self.sorted_keys[pos:]: yield k, self.ring[k] def __call__(self, key): return self.get_node(key)
bsd-2-clause
miptliot/edx-platform
lms/djangoapps/branding/migrations/0001_initial.py
86
1891
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='BrandingApiConfig', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')), ('enabled', models.BooleanField(default=False, verbose_name='Enabled')), ('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')), ], options={ 'ordering': ('-change_date',), 'abstract': False, }, ), migrations.CreateModel( name='BrandingInfoConfig', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')), ('enabled', models.BooleanField(default=False, verbose_name='Enabled')), ('configuration', models.TextField(help_text=b'JSON data of Configuration for Video Branding.')), ('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')), ], options={ 'ordering': ('-change_date',), 'abstract': False, }, ), ]
agpl-3.0
mozilla/verbatim
vendor/lib/python/django/contrib/sitemaps/__init__.py
91
4290
from django.contrib.sites.models import Site from django.core import urlresolvers, paginator from django.core.exceptions import ImproperlyConfigured import urllib PING_URL = "http://www.google.com/webmasters/tools/ping" class SitemapNotFound(Exception): pass def ping_google(sitemap_url=None, ping_url=PING_URL): """ Alerts Google that the sitemap for the current site has been updated. If sitemap_url is provided, it should be an absolute path to the sitemap for this site -- e.g., '/sitemap.xml'. If sitemap_url is not provided, this function will attempt to deduce it by using urlresolvers.reverse(). """ if sitemap_url is None: try: # First, try to get the "index" sitemap URL. sitemap_url = urlresolvers.reverse('django.contrib.sitemaps.views.index') except urlresolvers.NoReverseMatch: try: # Next, try for the "global" sitemap URL. sitemap_url = urlresolvers.reverse('django.contrib.sitemaps.views.sitemap') except urlresolvers.NoReverseMatch: pass if sitemap_url is None: raise SitemapNotFound("You didn't provide a sitemap_url, and the sitemap URL couldn't be auto-detected.") from django.contrib.sites.models import Site current_site = Site.objects.get_current() url = "http://%s%s" % (current_site.domain, sitemap_url) params = urllib.urlencode({'sitemap':url}) urllib.urlopen("%s?%s" % (ping_url, params)) class Sitemap(object): # This limit is defined by Google. See the index documentation at # http://sitemaps.org/protocol.php#index. limit = 50000 # If protocol is None, the URLs in the sitemap will use the protocol # with which the sitemap was requested. protocol = None def __get(self, name, obj, default=None): try: attr = getattr(self, name) except AttributeError: return default if callable(attr): return attr(obj) return attr def items(self): return [] def location(self, obj): return obj.get_absolute_url() def _get_paginator(self): return paginator.Paginator(self.items(), self.limit) paginator = property(_get_paginator) def get_urls(self, page=1, site=None, protocol=None): # Determine protocol if self.protocol is not None: protocol = self.protocol if protocol is None: protocol = 'http' # Determine domain if site is None: if Site._meta.installed: try: site = Site.objects.get_current() except Site.DoesNotExist: pass if site is None: raise ImproperlyConfigured("To use sitemaps, either enable the sites framework or pass a Site/RequestSite object in your view.") domain = site.domain urls = [] for item in self.paginator.page(page).object_list: loc = "%s://%s%s" % (protocol, domain, self.__get('location', item)) priority = self.__get('priority', item, None) url_info = { 'item': item, 'location': loc, 'lastmod': self.__get('lastmod', item, None), 'changefreq': self.__get('changefreq', item, None), 'priority': str(priority is not None and priority or ''), } urls.append(url_info) return urls class FlatPageSitemap(Sitemap): def items(self): current_site = Site.objects.get_current() return current_site.flatpage_set.filter(registration_required=False) class GenericSitemap(Sitemap): priority = None changefreq = None def __init__(self, info_dict, priority=None, changefreq=None): self.queryset = info_dict['queryset'] self.date_field = info_dict.get('date_field', None) self.priority = priority self.changefreq = changefreq def items(self): # Make sure to return a clone; we don't want premature evaluation. return self.queryset.filter() def lastmod(self, item): if self.date_field is not None: return getattr(item, self.date_field) return None
gpl-2.0
nschloe/quadpy
src/quadpy/cn/_phillips.py
1
1881
import numpy as np from sympy import Rational as frac from sympy import sqrt from ..helpers import article, comb, fsd, untangle, z from ._helpers import CnScheme _source = article( authors=["G.M. Phillips"], title="Numerical integration over an N-dimensional rectangular region", journal="Comput J", year="1967", volume="10", number="3", pages="297-299", url="https://doi.org/10.1093/comjnl/10.3.297", ) def phillips(n): if n == 2: p1 = 1 p2 = frac(14, 3) q = frac(5, 3) elif n == 3: p1 = 1 p2 = frac(14, 5) q = frac(5, 2) r = 1 elif n == 4: p1 = 1 p2 = frac(112, 11) q = 5 r = 2 else: assert n >= 5 p1 = 1 En = frac(25 * n ** 2 - 165 * n + 302, 972) p2 = 1 / (frac(3, 5) - frac(1, 35 * En)) q = frac(5, 3) r = frac(5, 3) gamma = frac((n - 1) * (19 - 5 * n), 270) delta = frac((n - 1) * (n - 2), 108) a1 = frac(23 - 5 * n, 180) - gamma * q / 2 a2 = frac(35 * n ** 2 - 231 * n + 466, 3780) beta1 = (a1 - a2 * p2) / (p1 - p2) beta2 = (a1 - a2 * p1) / (p2 - p1) lambda1 = 1 / sqrt(p1) lambda2 = 1 / sqrt(p2) mu = 1 / sqrt(q) b1 = beta1 / lambda1 ** 6 b2 = beta2 / lambda2 ** 6 c = gamma / (2 * (n - 1) * mu ** 6) a = 1 - 2 * n * (b1 + b2) - 4 * comb(n, 2) * c if n > 2: nu = 1 / sqrt(r) d = delta / (4 * comb(n - 1, 2) * nu ** 6) a -= 8 * comb(n, 3) * d data = [ (a, z(n)), (b1, fsd(n, (lambda1, 1))), (b2, fsd(n, (lambda2, 1))), (c, fsd(n, (mu, 2))), ] if n > 2: data.append((d, fsd(n, (nu, 3)))) points, weights = untangle(data) points = np.ascontiguousarray(points.T) return CnScheme("Phillips", n, weights, points, 7, _source, 1.521e-13)
mit
hidekb/espressopp
src/Exceptions.py
7
2456
# Copyright (C) 2012,2013 # Max Planck Institute for Polymer Research # Copyright (C) 2008,2009,2010,2011 # Max-Planck-Institute for Polymer Research & Fraunhofer SCAI # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. r""" ********************* espressopp.Exceptions ********************* .. function:: espressopp.Error(msg) :param msg: :type msg: .. function:: espressopp.ParticleDoesNotExistHere(msg) :param msg: :type msg: .. function:: espressopp.UnknownParticleProperty(msg) :param msg: :type msg: .. function:: espressopp.MissingFixedPairList(msg) :param msg: :type msg: """ import sys, traceback class Error(Exception): def __init__(self, msg): try: raise Exception except: file, lineno, module, line = traceback.extract_stack()[0] self.msg = 'ERROR while executing ' + str(file) + ' line ' + str(lineno) + ': ' + str(line) + '\n-> ' + msg def __str__(self) : return self.msg def __repr__(self) : return str(self) class ParticleDoesNotExistHere(Exception): def __init__(self, msg): try: raise Exception except: self.msg = msg def __str__(self) : return self.msg def __repr__(self) : return str(self) class UnknownParticleProperty(Exception): def __init__(self, msg): try: raise Exception except: self.msg = msg def __str__(self) : return self.msg def __repr__(self) : return str(self) class MissingFixedPairList(Exception): def __init__(self, msg): try: raise Exception except: self.msg = msg def __str__(self) : return self.msg def __repr__(self) : return str(self)
gpl-3.0
lgeiger/ide-python
lib/debugger/VendorLib/vs-py-debugger/pythonFiles/experimental/ptvsd/ptvsd/_vendored/pydevd/_pydev_bundle/pydev_monkey_qt.py
3
6722
from __future__ import nested_scopes import os def set_trace_in_qt(): import pydevd_tracing from _pydevd_bundle.pydevd_comm import get_global_debugger debugger = get_global_debugger() if debugger is not None: pydevd_tracing.SetTrace(debugger.trace_dispatch, debugger.frame_eval_func) _patched_qt = False def patch_qt(qt_support_mode): ''' This method patches qt (PySide, PyQt4, PyQt5) so that we have hooks to set the tracing for QThread. ''' if not qt_support_mode: return if qt_support_mode is True or qt_support_mode == 'True': # do not break backward compatibility qt_support_mode = 'auto' if qt_support_mode == 'auto': qt_support_mode = os.getenv('PYDEVD_PYQT_MODE', 'auto') # Avoid patching more than once global _patched_qt if _patched_qt: return _patched_qt = True if qt_support_mode == 'auto': patch_qt_on_import = None try: import PySide # @UnresolvedImport @UnusedImport qt_support_mode = 'pyside' except: try: import PyQt5 # @UnresolvedImport @UnusedImport qt_support_mode = 'pyqt5' except: try: import PyQt4 # @UnresolvedImport @UnusedImport qt_support_mode = 'pyqt4' except: return if qt_support_mode == 'pyside': try: import PySide.QtCore # @UnresolvedImport _internal_patch_qt(PySide.QtCore, qt_support_mode) except: return elif qt_support_mode == 'pyqt5': try: import PyQt5.QtCore # @UnresolvedImport _internal_patch_qt(PyQt5.QtCore) except: return elif qt_support_mode == 'pyqt4': # Ok, we have an issue here: # PyDev-452: Selecting PyQT API version using sip.setapi fails in debug mode # http://pyqt.sourceforge.net/Docs/PyQt4/incompatible_apis.html # Mostly, if the user uses a different API version (i.e.: v2 instead of v1), # that has to be done before importing PyQt4 modules (PySide/PyQt5 don't have this issue # as they only implements v2). patch_qt_on_import = 'PyQt4' def get_qt_core_module(): import PyQt4.QtCore # @UnresolvedImport return PyQt4.QtCore _patch_import_to_patch_pyqt_on_import(patch_qt_on_import, get_qt_core_module) else: raise ValueError('Unexpected qt support mode: %s' % (qt_support_mode,)) def _patch_import_to_patch_pyqt_on_import(patch_qt_on_import, get_qt_core_module): # I don't like this approach very much as we have to patch __import__, but I like even less # asking the user to configure something in the client side... # So, our approach is to patch PyQt4 right before the user tries to import it (at which # point he should've set the sip api version properly already anyways). dotted = patch_qt_on_import + '.' original_import = __import__ from _pydev_imps._pydev_sys_patch import patch_sys_module, patch_reload, cancel_patches_in_sys_module patch_sys_module() patch_reload() def patched_import(name, *args, **kwargs): if patch_qt_on_import == name or name.startswith(dotted): builtins.__import__ = original_import cancel_patches_in_sys_module() _internal_patch_qt(get_qt_core_module()) # Patch it only when the user would import the qt module return original_import(name, *args, **kwargs) import sys if sys.version_info[0] >= 3: import builtins # Py3 else: import __builtin__ as builtins builtins.__import__ = patched_import def _internal_patch_qt(QtCore, qt_support_mode='auto'): _original_thread_init = QtCore.QThread.__init__ _original_runnable_init = QtCore.QRunnable.__init__ _original_QThread = QtCore.QThread class FuncWrapper: def __init__(self, original): self._original = original def __call__(self, *args, **kwargs): set_trace_in_qt() return self._original(*args, **kwargs) class StartedSignalWrapper(QtCore.QObject): # Wrapper for the QThread.started signal try: _signal = QtCore.Signal() # @UndefinedVariable except: _signal = QtCore.pyqtSignal() # @UndefinedVariable def __init__(self, thread, original_started): QtCore.QObject.__init__(self) self.thread = thread self.original_started = original_started if qt_support_mode == 'pyside': self._signal = original_started else: self._signal.connect(self._on_call) self.original_started.connect(self._signal) def connect(self, func, *args, **kwargs): if qt_support_mode == 'pyside': return self._signal.connect(FuncWrapper(func), *args, **kwargs) else: return self._signal.connect(func, *args, **kwargs) def disconnect(self, *args, **kwargs): return self._signal.disconnect(*args, **kwargs) def emit(self, *args, **kwargs): return self._signal.emit(*args, **kwargs) def _on_call(self, *args, **kwargs): set_trace_in_qt() class ThreadWrapper(QtCore.QThread): # Wrapper for QThread def __init__(self, *args, **kwargs): _original_thread_init(self, *args, **kwargs) # In PyQt5 the program hangs when we try to call original run method of QThread class. # So we need to distinguish instances of QThread class and instances of QThread inheritors. if self.__class__.run == _original_QThread.run: self.run = self._exec_run else: self._original_run = self.run self.run = self._new_run self._original_started = self.started self.started = StartedSignalWrapper(self, self.started) def _exec_run(self): set_trace_in_qt() self.exec_() return None def _new_run(self): set_trace_in_qt() return self._original_run() class RunnableWrapper(QtCore.QRunnable): # Wrapper for QRunnable def __init__(self, *args, **kwargs): _original_runnable_init(self, *args, **kwargs) self._original_run = self.run self.run = self._new_run def _new_run(self): set_trace_in_qt() return self._original_run() QtCore.QThread = ThreadWrapper QtCore.QRunnable = RunnableWrapper
mit
mujiansu/arangodb
3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/win32com/client/CLSIDToClass.py
17
1840
"""Manages a dictionary of CLSID strings to Python classes. Primary use of this module is to allow modules generated by makepy.py to share classes. @makepy@ automatically generates code which interacts with this module. You should never need to reference this module directly. This module only provides support for modules which have been previously been imported. The gencache module provides some support for loading modules on demand - once done, this module supports it... As an example, the MSACCESS.TLB type library makes reference to the CLSID of the Database object, as defined in DAO3032.DLL. This allows code using the MSAccess wrapper to natively use Databases. This obviously applies to all cooperating objects, not just DAO and Access. """ mapCLSIDToClass = {} def RegisterCLSID( clsid, pythonClass ): """Register a class that wraps a CLSID This function allows a CLSID to be globally associated with a class. Certain module will automatically convert an IDispatch object to an instance of the associated class. """ mapCLSIDToClass[str(clsid)] = pythonClass def RegisterCLSIDsFromDict( dict ): """Register a dictionary of CLSID's and classes. This module performs the same function as @RegisterCLSID@, but for an entire dictionary of associations. Typically called by makepy generated modules at import time. """ try: mapCLSIDToClass.update(dict) except AttributeError: # Python 1.4? for clsid, pythonClass in dict.items(): mapCLSIDToClass[clsid] = pythonClass def GetClass(clsid): """Given a CLSID, return the globally associated class. clsid -- a string CLSID representation to check. """ return mapCLSIDToClass[clsid] def HasClass(clsid): """Determines if the CLSID has an associated class. clsid -- the string CLSID to check """ return mapCLSIDToClass.has_key(clsid)
apache-2.0
OpenUpgrade/OpenUpgrade
openerp/addons/base/tests/test_ir_filters.py
64
12256
# -*- coding: utf-8 -*- import functools from openerp import exceptions from openerp.tests import common def noid(d): """ Removes values that are not relevant for the test comparisons """ d.pop('id', None) d.pop('action_id', None) return d class FiltersCase(common.TransactionCase): def build(self, model, *args): Model = self.registry(model) for vars in args: Model.create(self.cr, common.ADMIN_USER_ID, vars, {}) class TestGetFilters(FiltersCase): def setUp(self): super(TestGetFilters, self).setUp() self.USER = self.registry('res.users').name_search(self.cr, self.uid, 'demo')[0] self.USER_ID = self.USER[0] def test_own_filters(self): self.build( 'ir.filters', dict(name='a', user_id=self.USER_ID, model_id='ir.filters'), dict(name='b', user_id=self.USER_ID, model_id='ir.filters'), dict(name='c', user_id=self.USER_ID, model_id='ir.filters'), dict(name='d', user_id=self.USER_ID, model_id='ir.filters')) filters = self.registry('ir.filters').get_filters( self.cr, self.USER_ID, 'ir.filters') self.assertItemsEqual(map(noid, filters), [ dict(name='a', is_default=False, user_id=self.USER, domain='[]', context='{}'), dict(name='b', is_default=False, user_id=self.USER, domain='[]', context='{}'), dict(name='c', is_default=False, user_id=self.USER, domain='[]', context='{}'), dict(name='d', is_default=False, user_id=self.USER, domain='[]', context='{}'), ]) def test_global_filters(self): self.build( 'ir.filters', dict(name='a', user_id=False, model_id='ir.filters'), dict(name='b', user_id=False, model_id='ir.filters'), dict(name='c', user_id=False, model_id='ir.filters'), dict(name='d', user_id=False, model_id='ir.filters'), ) filters = self.registry('ir.filters').get_filters( self.cr, self.USER_ID, 'ir.filters') self.assertItemsEqual(map(noid, filters), [ dict(name='a', is_default=False, user_id=False, domain='[]', context='{}'), dict(name='b', is_default=False, user_id=False, domain='[]', context='{}'), dict(name='c', is_default=False, user_id=False, domain='[]', context='{}'), dict(name='d', is_default=False, user_id=False, domain='[]', context='{}'), ]) def test_no_third_party_filters(self): self.build( 'ir.filters', dict(name='a', user_id=False, model_id='ir.filters'), dict(name='b', user_id=common.ADMIN_USER_ID, model_id='ir.filters'), dict(name='c', user_id=self.USER_ID, model_id='ir.filters'), dict(name='d', user_id=common.ADMIN_USER_ID, model_id='ir.filters') ) filters = self.registry('ir.filters').get_filters( self.cr, self.USER_ID, 'ir.filters') self.assertItemsEqual(map(noid, filters), [ dict(name='a', is_default=False, user_id=False, domain='[]', context='{}'), dict(name='c', is_default=False, user_id=self.USER, domain='[]', context='{}'), ]) class TestOwnDefaults(FiltersCase): def setUp(self): super(TestOwnDefaults, self).setUp() self.USER = self.registry('res.users').name_search(self.cr, self.uid, 'demo')[0] self.USER_ID = self.USER[0] def test_new_no_filter(self): """ When creating a @is_default filter with no existing filter, that new filter gets the default flag """ Filters = self.registry('ir.filters') Filters.create_or_replace(self.cr, self.USER_ID, { 'name': 'a', 'model_id': 'ir.filters', 'user_id': self.USER_ID, 'is_default': True, }) filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters') self.assertItemsEqual(map(noid, filters), [ dict(name='a', user_id=self.USER, is_default=True, domain='[]', context='{}') ]) def test_new_filter_not_default(self): """ When creating a @is_default filter with existing non-default filters, the new filter gets the flag """ self.build( 'ir.filters', dict(name='a', user_id=self.USER_ID, model_id='ir.filters'), dict(name='b', user_id=self.USER_ID, model_id='ir.filters'), ) Filters = self.registry('ir.filters') Filters.create_or_replace(self.cr, self.USER_ID, { 'name': 'c', 'model_id': 'ir.filters', 'user_id': self.USER_ID, 'is_default': True, }) filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters') self.assertItemsEqual(map(noid, filters), [ dict(name='a', user_id=self.USER, is_default=False, domain='[]', context='{}'), dict(name='b', user_id=self.USER, is_default=False, domain='[]', context='{}'), dict(name='c', user_id=self.USER, is_default=True, domain='[]', context='{}'), ]) def test_new_filter_existing_default(self): """ When creating a @is_default filter where an existing filter is already @is_default, the flag should be *moved* from the old to the new filter """ self.build( 'ir.filters', dict(name='a', user_id=self.USER_ID, model_id='ir.filters'), dict(name='b', is_default=True, user_id=self.USER_ID, model_id='ir.filters'), ) Filters = self.registry('ir.filters') Filters.create_or_replace(self.cr, self.USER_ID, { 'name': 'c', 'model_id': 'ir.filters', 'user_id': self.USER_ID, 'is_default': True, }) filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters') self.assertItemsEqual(map(noid, filters), [ dict(name='a', user_id=self.USER, is_default=False, domain='[]', context='{}'), dict(name='b', user_id=self.USER, is_default=False, domain='[]', context='{}'), dict(name='c', user_id=self.USER, is_default=True, domain='[]', context='{}'), ]) def test_update_filter_set_default(self): """ When updating an existing filter to @is_default, if an other filter already has the flag the flag should be moved """ self.build( 'ir.filters', dict(name='a', user_id=self.USER_ID, model_id='ir.filters'), dict(name='b', is_default=True, user_id=self.USER_ID, model_id='ir.filters'), ) Filters = self.registry('ir.filters') Filters.create_or_replace(self.cr, self.USER_ID, { 'name': 'a', 'model_id': 'ir.filters', 'user_id': self.USER_ID, 'is_default': True, }) filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters') self.assertItemsEqual(map(noid, filters), [ dict(name='a', user_id=self.USER, is_default=True, domain='[]', context='{}'), dict(name='b', user_id=self.USER, is_default=False, domain='[]', context='{}'), ]) class TestGlobalDefaults(FiltersCase): def setUp(self): super(TestGlobalDefaults, self).setUp() self.USER = self.registry('res.users').name_search(self.cr, self.uid, 'demo')[0] self.USER_ID = self.USER[0] def test_new_filter_not_default(self): """ When creating a @is_default filter with existing non-default filters, the new filter gets the flag """ self.build( 'ir.filters', dict(name='a', user_id=False, model_id='ir.filters'), dict(name='b', user_id=False, model_id='ir.filters'), ) Filters = self.registry('ir.filters') Filters.create_or_replace(self.cr, self.USER_ID, { 'name': 'c', 'model_id': 'ir.filters', 'user_id': False, 'is_default': True, }) filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters') self.assertItemsEqual(map(noid, filters), [ dict(name='a', user_id=False, is_default=False, domain='[]', context='{}'), dict(name='b', user_id=False, is_default=False, domain='[]', context='{}'), dict(name='c', user_id=False, is_default=True, domain='[]', context='{}'), ]) def test_new_filter_existing_default(self): """ When creating a @is_default filter where an existing filter is already @is_default, an error should be generated """ self.build( 'ir.filters', dict(name='a', user_id=False, model_id='ir.filters'), dict(name='b', is_default=True, user_id=False, model_id='ir.filters'), ) Filters = self.registry('ir.filters') with self.assertRaises(exceptions.Warning): Filters.create_or_replace(self.cr, self.USER_ID, { 'name': 'c', 'model_id': 'ir.filters', 'user_id': False, 'is_default': True, }) def test_update_filter_set_default(self): """ When updating an existing filter to @is_default, if an other filter already has the flag an error should be generated """ self.build( 'ir.filters', dict(name='a', user_id=False, model_id='ir.filters'), dict(name='b', is_default=True, user_id=False, model_id='ir.filters'), ) Filters = self.registry('ir.filters') with self.assertRaises(exceptions.Warning): Filters.create_or_replace(self.cr, self.USER_ID, { 'name': 'a', 'model_id': 'ir.filters', 'user_id': False, 'is_default': True, }) def test_update_default_filter(self): """ Replacing the current default global filter should not generate any error """ self.build( 'ir.filters', dict(name='a', user_id=False, model_id='ir.filters'), dict(name='b', is_default=True, user_id=False, model_id='ir.filters'), ) Filters = self.registry('ir.filters') context_value = "{'some_key': True}" Filters.create_or_replace(self.cr, self.USER_ID, { 'name': 'b', 'model_id': 'ir.filters', 'user_id': False, 'context': context_value, 'is_default': True, }) filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters') self.assertItemsEqual(map(noid, filters), [ dict(name='a', user_id=False, is_default=False, domain='[]', context='{}'), dict(name='b', user_id=False, is_default=True, domain='[]', context=context_value), ]) from openerp.tests.common import TransactionCase class TestReadGroup(TransactionCase): """Test function read_group with groupby on a many2one field to a model (in test, "user_id" to "res.users") which is ordered by an inherited not stored field (in test, "name" inherited from "res.partners"). """ def setUp(self): super(TestReadGroup, self).setUp() self.ir_filters_model = self.env['ir.filters'] self.res_partner_model = self.env['res.partner'] self.res_users_model = self.env['res.users'] def test_read_group_1(self): self.assertEqual(self.res_users_model._order, "name, login", "Model res.users must be ordered by name, login") self.assertFalse(self.res_users_model._fields['name'].store, "Field name is not stored in res.users") filter_a = self.ir_filters_model.create(dict(name="Filter_A", model_id="ir.filters")) filter_b = self.ir_filters_model.create(dict(name="Filter_B", model_id="ir.filters")) filter_b.write(dict(user_id=False)) res = self.ir_filters_model.read_group([], ['name', 'user_id'], ['user_id']) self.assertTrue(any(val['user_id'] == False for val in res), "At least one group must contain val['user_id'] == False.")
agpl-3.0
olavoasantos/FluentDB
lib/FluentDB/core/mysql/queryBuilder.py
1
2785
class MysqlQueryBuilder(object): # Table name table = None # Selection selection = "*" # Restriction restriction = "" # Where Where = None # Order Order = None # Limit Limit = None # Action action = "SELECT {0} FROM {1}{2}" def build(self): self.buildRestrictions() return self.action.format(self.selection, self.table, self.restriction) def table(self, table): self.table = table return self def create(self, table, values): valHolder = [] for i in values: valHolder.append('%s') return "INSERT INTO {0}({1}) VALUES({2})".format(table, ", ".join(values.keys()), ", ".join(valHolder)) def update(self, table, columns, id): formatedColumns = list() for column in columns: formatedColumns.append("{0} = %s".format(column)) return "UPDATE {0} SET {1} WHERE id = {2}".format(table, ", ".join(formatedColumns), id) def delete(self, table): return "DELETE FROM {0} WHERE id = %s".format(table) def select(self, *args): if len(args) > 1: self.selection = ",".join(args) elif len(args) == 1: self.selection = args[0] else: self.selection = "*" return self def buildRestrictions(self): if self.Where: self.restriction = " ".join((self.restriction, self.Where)) if self.Order: self.restriction = " ".join((self.restriction, self.Order)) if self.Limit: self.restriction = " ".join((self.restriction, self.Limit)) pass def where(self, *args): if len(args) == 1: __ = "id={0}".format(args[0]) elif len(args) == 2: if isinstance(args[1], str): __ = "{0}=\'{1}\'".format(args[0], args[1]) else: __ = "{0}={1}".format(args[0], args[1]) elif len(args) == 3: if isinstance(args[2], str): __ = "{0}{1}\'{2}\'".format(args[0], args[1], args[2]) else: __ = "{0}{1}{2}".format(args[0], args[1], args[2]) else: raise ValueError("Missing argument for where clause.") if self.Where: self.Where = "{0} AND {1}".format(self.Where, __) else: self.Where = "WHERE {0}".format(__) return self def orderBy(self, column, order="ASC"): self.Order = "ORDER BY {0} {1}".format(column, order) return self def limitBy(self, count, offset=None): if offset: vars = "{0}, {1}".format(offset, count) else: vars = count self.Limit = "LIMIT {0}".format(vars) return self
mit
uwdata/termite-visualizations
web2py/gluon/sqlhtml.py
5
130636
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) Holds: - SQLFORM: provide a form for a table (with/without record) - SQLTABLE: provides a table for a set of records - form_factory: provides a SQLFORM for an non-db backed table """ import os from gluon.http import HTTP from gluon.html import XmlComponent from gluon.html import XML, SPAN, TAG, A, DIV, CAT, UL, LI, TEXTAREA, BR, IMG, SCRIPT from gluon.html import FORM, INPUT, LABEL, OPTION, SELECT, COL, COLGROUP from gluon.html import TABLE, THEAD, TBODY, TR, TD, TH, STYLE from gluon.html import URL, truncate_string, FIELDSET from gluon.dal import DAL, Field, Table, Row, CALLABLETYPES, smart_query, \ bar_encode, Reference, REGEX_TABLE_DOT_FIELD, Expression, SQLCustomType from gluon.storage import Storage from gluon.utils import md5_hash from gluon.validators import IS_EMPTY_OR, IS_NOT_EMPTY, IS_LIST_OF, IS_DATE, \ IS_DATETIME, IS_INT_IN_RANGE, IS_FLOAT_IN_RANGE, IS_STRONG import gluon.serializers as serializers import datetime import urllib import re import cStringIO from gluon.globals import current from gluon.http import redirect import inspect try: import gluon.settings as settings is_gae = settings.global_settings.web2py_runtime_gae except ImportError: is_gae = False # this is an assumption (if settings missing) table_field = re.compile('[\w_]+\.[\w_]+') widget_class = re.compile('^\w*') def represent(field, value, record): f = field.represent if not callable(f): return str(value) n = f.func_code.co_argcount - len(f.func_defaults or []) if getattr(f, 'im_self', None): n -= 1 if n == 1: return f(value) elif n == 2: return f(value, record) else: raise RuntimeError("field representation must take 1 or 2 args") def safe_int(x): try: return int(x) except ValueError: return 0 def safe_float(x): try: return float(x) except ValueError: return 0 def show_if(cond): if not cond: return None base = "%s_%s" % (cond.first.tablename, cond.first.name) if ((cond.op.__name__ == 'EQ' and cond.second == True) or (cond.op.__name__ == 'NE' and cond.second == False)): return base,":checked" if ((cond.op.__name__ == 'EQ' and cond.second == False) or (cond.op.__name__ == 'NE' and cond.second == True)): return base,":not(:checked)" if cond.op.__name__ == 'EQ': return base,"[value='%s']" % cond.second if cond.op.__name__ == 'NE': return base,"[value!='%s']" % cond.second if cond.op.__name__ == 'CONTAINS': return base,"[value~='%s']" % cond.second if cond.op.__name__ == 'BELONGS' and isinstance(cond.second,(list,tuple)): return base,','.join("[value='%s']" % (v) for v in cond.second) raise RuntimeError("Not Implemented Error") class FormWidget(object): """ helper for SQLFORM to generate form input fields (widget), related to the fieldtype """ _class = 'generic-widget' @classmethod def _attributes(cls, field, widget_attributes, **attributes): """ helper to build a common set of attributes :param field: the field involved, some attributes are derived from this :param widget_attributes: widget related attributes :param attributes: any other supplied attributes """ attr = dict( _id='%s_%s' % (field.tablename, field.name), _class=cls._class or widget_class.match(str(field.type)).group(), _name=field.name, requires=field.requires, ) if getattr(field,'show_if',None): trigger, cond = show_if(field.show_if) attr['_data-show-trigger'] = trigger attr['_data-show-if'] = cond attr.update(widget_attributes) attr.update(attributes) return attr @classmethod def widget(cls, field, value, **attributes): """ generates the widget for the field. When serialized, will provide an INPUT tag: - id = tablename_fieldname - class = field.type - name = fieldname :param field: the field needing the widget :param value: value :param attributes: any other attributes to be applied """ raise NotImplementedError class StringWidget(FormWidget): _class = 'string' @classmethod def widget(cls, field, value, **attributes): """ generates an INPUT text tag. see also: :meth:`FormWidget.widget` """ default = dict( _type='text', value=(not value is None and str(value)) or '', ) attr = cls._attributes(field, default, **attributes) return INPUT(**attr) class IntegerWidget(StringWidget): _class = 'integer' class DoubleWidget(StringWidget): _class = 'double' class DecimalWidget(StringWidget): _class = 'decimal' class TimeWidget(StringWidget): _class = 'time' class DateWidget(StringWidget): _class = 'date' class DatetimeWidget(StringWidget): _class = 'datetime' class TextWidget(FormWidget): _class = 'text' @classmethod def widget(cls, field, value, **attributes): """ generates a TEXTAREA tag. see also: :meth:`FormWidget.widget` """ default = dict(value=value) attr = cls._attributes(field, default, **attributes) return TEXTAREA(**attr) class JSONWidget(FormWidget): _class = 'json' @classmethod def widget(cls, field, value, **attributes): """ generates a TEXTAREA for JSON notation. see also: :meth:`FormWidget.widget` """ if not isinstance(value, basestring): if value is not None: value = serializers.json(value) default = dict(value=value) attr = cls._attributes(field, default, **attributes) return TEXTAREA(**attr) class BooleanWidget(FormWidget): _class = 'boolean' @classmethod def widget(cls, field, value, **attributes): """ generates an INPUT checkbox tag. see also: :meth:`FormWidget.widget` """ default = dict(_type='checkbox', value=value) attr = cls._attributes(field, default, **attributes) return INPUT(**attr) class OptionsWidget(FormWidget): @staticmethod def has_options(field): """ checks if the field has selectable options :param field: the field needing checking :returns: True if the field has options """ return hasattr(field.requires, 'options') @classmethod def widget(cls, field, value, **attributes): """ generates a SELECT tag, including OPTIONs (only 1 option allowed) see also: :meth:`FormWidget.widget` """ default = dict(value=value) attr = cls._attributes(field, default, **attributes) requires = field.requires if not isinstance(requires, (list, tuple)): requires = [requires] if requires: if hasattr(requires[0], 'options'): options = requires[0].options() else: raise SyntaxError( 'widget cannot determine options of %s' % field) opts = [OPTION(v, _value=k) for (k, v) in options] return SELECT(*opts, **attr) class ListWidget(StringWidget): @classmethod def widget(cls, field, value, **attributes): _id = '%s_%s' % (field.tablename, field.name) _name = field.name if field.type == 'list:integer': _class = 'integer' else: _class = 'string' requires = field.requires if isinstance( field.requires, (IS_NOT_EMPTY, IS_LIST_OF)) else None if isinstance(value,str): value = [value] nvalue = value or [''] items = [LI(INPUT(_id=_id, _class=_class, _name=_name, value=v, hideerror=k < len(nvalue) - 1, requires=requires), **attributes) for (k, v) in enumerate(nvalue)] attributes['_id'] = _id + '_grow_input' attributes['_style'] = 'list-style:none' attributes['_class'] = 'w2p_list' return TAG[''](UL(*items, **attributes)) class MultipleOptionsWidget(OptionsWidget): @classmethod def widget(cls, field, value, size=5, **attributes): """ generates a SELECT tag, including OPTIONs (multiple options allowed) see also: :meth:`FormWidget.widget` :param size: optional param (default=5) to indicate how many rows must be shown """ attributes.update(_size=size, _multiple=True) return OptionsWidget.widget(field, value, **attributes) class RadioWidget(OptionsWidget): @classmethod def widget(cls, field, value, **attributes): """ generates a TABLE tag, including INPUT radios (only 1 option allowed) see also: :meth:`FormWidget.widget` """ if isinstance(value, (list,tuple)): value = str(value[0]) else: value = str(value) attr = cls._attributes(field, {}, **attributes) attr['_class'] = attr.get('_class', 'web2py_radiowidget') requires = field.requires if not isinstance(requires, (list, tuple)): requires = [requires] if requires: if hasattr(requires[0], 'options'): options = requires[0].options() else: raise SyntaxError('widget cannot determine options of %s' % field) options = [(k, v) for k, v in options if str(v)] opts = [] cols = attributes.get('cols', 1) totals = len(options) mods = totals % cols rows = totals / cols if mods: rows += 1 #widget style wrappers = dict( table=(TABLE, TR, TD), ul=(DIV, UL, LI), divs=(CAT, DIV, DIV) ) parent, child, inner = wrappers[attributes.get('style', 'table')] for r_index in range(rows): tds = [] for k, v in options[r_index * cols:(r_index + 1) * cols]: checked = {'_checked': 'checked'} if k == value else {} tds.append(inner(INPUT(_type='radio', _id='%s%s' % (field.name, k), _name=field.name, requires=attr.get('requires', None), hideerror=True, _value=k, value=value, **checked), LABEL(v, _for='%s%s' % (field.name, k)))) opts.append(child(tds)) if opts: opts[-1][0][0]['hideerror'] = False return parent(*opts, **attr) class CheckboxesWidget(OptionsWidget): @classmethod def widget(cls, field, value, **attributes): """ generates a TABLE tag, including INPUT checkboxes (multiple allowed) see also: :meth:`FormWidget.widget` """ # was values = re.compile('[\w\-:]+').findall(str(value)) if isinstance(value, (list, tuple)): values = [str(v) for v in value] else: values = [str(value)] attr = cls._attributes(field, {}, **attributes) attr['_class'] = attr.get('_class', 'web2py_checkboxeswidget') requires = field.requires if not isinstance(requires, (list, tuple)): requires = [requires] if requires and hasattr(requires[0], 'options'): options = requires[0].options() else: raise SyntaxError('widget cannot determine options of %s' % field) options = [(k, v) for k, v in options if k != ''] opts = [] cols = attributes.get('cols', 1) totals = len(options) mods = totals % cols rows = totals / cols if mods: rows += 1 #widget style wrappers = dict( table=(TABLE, TR, TD), ul=(DIV, UL, LI), divs=(CAT, DIV, DIV) ) parent, child, inner = wrappers[attributes.get('style', 'table')] for r_index in range(rows): tds = [] for k, v in options[r_index * cols:(r_index + 1) * cols]: if k in values: r_value = k else: r_value = [] tds.append(inner(INPUT(_type='checkbox', _id='%s%s' % (field.name, k), _name=field.name, requires=attr.get('requires', None), hideerror=True, _value=k, value=r_value), LABEL(v, _for='%s%s' % (field.name, k)))) opts.append(child(tds)) if opts: opts.append( INPUT(requires=attr.get('requires', None), _style="display:none;", _disabled="disabled", _name=field.name, hideerror=False)) return parent(*opts, **attr) class PasswordWidget(FormWidget): _class = 'password' DEFAULT_PASSWORD_DISPLAY = 8 * ('*') @classmethod def widget(cls, field, value, **attributes): """ generates a INPUT password tag. If a value is present it will be shown as a number of '*', not related to the length of the actual value. see also: :meth:`FormWidget.widget` """ # detect if attached a IS_STRONG with entropy default = dict( _type='password', _value=(value and cls.DEFAULT_PASSWORD_DISPLAY) or '', ) attr = cls._attributes(field, default, **attributes) # deal with entropy check! requires = field.requires if not isinstance(requires, (list, tuple)): requires = [requires] is_strong = [r for r in requires if isinstance(r, IS_STRONG)] if is_strong: attr['_data-w2p_entropy'] = is_strong[0].entropy if is_strong[0].entropy else "null" # end entropy check output = INPUT(**attr) return output class UploadWidget(FormWidget): _class = 'upload' DEFAULT_WIDTH = '150px' ID_DELETE_SUFFIX = '__delete' GENERIC_DESCRIPTION = 'file ## download' DELETE_FILE = 'delete' @classmethod def widget(cls, field, value, download_url=None, **attributes): """ generates a INPUT file tag. Optionally provides an A link to the file, including a checkbox so the file can be deleted. All is wrapped in a DIV. see also: :meth:`FormWidget.widget` :param download_url: Optional URL to link to the file (default = None) """ default = dict(_type='file',) attr = cls._attributes(field, default, **attributes) inp = INPUT(**attr) if download_url and value: if callable(download_url): url = download_url(value) else: url = download_url + '/' + value (br, image) = ('', '') if UploadWidget.is_image(value): br = BR() image = IMG(_src=url, _width=cls.DEFAULT_WIDTH) requires = attr["requires"] if requires == [] or isinstance(requires, IS_EMPTY_OR): inp = DIV(inp, SPAN('[', A(current.T( UploadWidget.GENERIC_DESCRIPTION), _href=url), '|', INPUT(_type='checkbox', _name=field.name + cls.ID_DELETE_SUFFIX, _id=field.name + cls.ID_DELETE_SUFFIX), LABEL(current.T(cls.DELETE_FILE), _for=field.name + cls.ID_DELETE_SUFFIX, _style='display:inline'), ']', _style='white-space:nowrap'), br, image) else: inp = DIV(inp, SPAN('[', A(current.T(cls.GENERIC_DESCRIPTION),_href=url), ']', _style='white-space:nowrap'), br, image) return inp @classmethod def represent(cls, field, value, download_url=None): """ how to represent the file: - with download url and if it is an image: <A href=...><IMG ...></A> - otherwise with download url: <A href=...>file</A> - otherwise: file :param field: the field :param value: the field value :param download_url: url for the file download (default = None) """ inp = current.T(cls.GENERIC_DESCRIPTION) if download_url and value: if callable(download_url): url = download_url(value) else: url = download_url + '/' + value if cls.is_image(value): inp = IMG(_src=url, _width=cls.DEFAULT_WIDTH) inp = A(inp, _href=url) return inp @staticmethod def is_image(value): """ Tries to check if the filename provided references to an image Checking is based on filename extension. Currently recognized: gif, png, jp(e)g, bmp :param value: filename """ extension = value.split('.')[-1].lower() if extension in ['gif', 'png', 'jpg', 'jpeg', 'bmp']: return True return False class AutocompleteWidget(object): _class = 'string' def __init__(self, request, field, id_field=None, db=None, orderby=None, limitby=(0, 10), distinct=False, keyword='_autocomplete_%(tablename)s_%(fieldname)s', min_length=2, help_fields=None, help_string=None): self.help_fields = help_fields or [] self.help_string = help_string if self.help_fields and not self.help_string: self.help_string = ' '.join('%%(%s)s'%f.name for f in self.help_fields) self.request = request self.keyword = keyword % dict(tablename=field.tablename, fieldname=field.name) self.db = db or field._db self.orderby = orderby self.limitby = limitby self.distinct = distinct self.min_length = min_length self.fields = [field] if id_field: self.is_reference = True self.fields.append(id_field) else: self.is_reference = False if hasattr(request, 'application'): self.url = URL(args=request.args) self.callback() else: self.url = request def callback(self): if self.keyword in self.request.vars: field = self.fields[0] if is_gae: rows = self.db(field.__ge__(self.request.vars[self.keyword]) & field.__lt__(self.request.vars[self.keyword] + u'\ufffd')).select(orderby=self.orderby, limitby=self.limitby, *(self.fields+self.help_fields)) else: rows = self.db(field.like(self.request.vars[self.keyword] + '%')).select(orderby=self.orderby, limitby=self.limitby, distinct=self.distinct, *(self.fields+self.help_fields)) if rows: if self.is_reference: id_field = self.fields[1] if self.help_fields: options = [OPTION( self.help_string % dict([(h.name, s[h.name]) for h in self.fields[:1] + self.help_fields]), _value=s[id_field.name], _selected=(k == 0)) for k, s in enumerate(rows)] else: options = [OPTION( s[field.name], _value=s[id_field.name], _selected=(k == 0)) for k, s in enumerate(rows)] raise HTTP( 200, SELECT(_id=self.keyword, _class='autocomplete', _size=len(rows), _multiple=(len(rows) == 1), *options).xml()) else: raise HTTP( 200, SELECT(_id=self.keyword, _class='autocomplete', _size=len(rows), _multiple=(len(rows) == 1), *[OPTION(s[field.name], _selected=(k == 0)) for k, s in enumerate(rows)]).xml()) else: raise HTTP(200, '') def __call__(self, field, value, **attributes): default = dict( _type='text', value=(not value is None and str(value)) or '', ) attr = StringWidget._attributes(field, default, **attributes) div_id = self.keyword + '_div' attr['_autocomplete'] = 'off' if self.is_reference: key2 = self.keyword + '_aux' key3 = self.keyword + '_auto' attr['_class'] = 'string' name = attr['_name'] if 'requires' in attr: del attr['requires'] attr['_name'] = key2 value = attr['value'] record = self.db( self.fields[1] == value).select(self.fields[0]).first() attr['value'] = record and record[self.fields[0].name] attr['_onblur'] = "jQuery('#%(div_id)s').delay(1000).fadeOut('slow');" % \ dict(div_id=div_id, u='F' + self.keyword) attr['_onkeyup'] = "jQuery('#%(key3)s').val('');var e=event.which?event.which:event.keyCode; function %(u)s(){jQuery('#%(id)s').val(jQuery('#%(key)s :selected').text());jQuery('#%(key3)s').val(jQuery('#%(key)s').val())}; if(e==39) %(u)s(); else if(e==40) {if(jQuery('#%(key)s option:selected').next().length)jQuery('#%(key)s option:selected').attr('selected',null).next().attr('selected','selected'); %(u)s();} else if(e==38) {if(jQuery('#%(key)s option:selected').prev().length)jQuery('#%(key)s option:selected').attr('selected',null).prev().attr('selected','selected'); %(u)s();} else if(jQuery('#%(id)s').val().length>=%(min_length)s) jQuery.get('%(url)s?%(key)s='+encodeURIComponent(jQuery('#%(id)s').val()),function(data){if(data=='')jQuery('#%(key3)s').val('');else{jQuery('#%(id)s').next('.error').hide();jQuery('#%(div_id)s').html(data).show().focus();jQuery('#%(div_id)s select').css('width',jQuery('#%(id)s').css('width'));jQuery('#%(key3)s').val(jQuery('#%(key)s').val());jQuery('#%(key)s').change(%(u)s);jQuery('#%(key)s').click(%(u)s);};}); else jQuery('#%(div_id)s').fadeOut('slow');" % \ dict(url=self.url, min_length=self.min_length, key=self.keyword, id=attr['_id'], key2=key2, key3=key3, name=name, div_id=div_id, u='F' + self.keyword) if self.min_length == 0: attr['_onfocus'] = attr['_onkeyup'] return TAG[''](INPUT(**attr), INPUT(_type='hidden', _id=key3, _value=value, _name=name, requires=field.requires), DIV(_id=div_id, _style='position:absolute;')) else: attr['_name'] = field.name attr['_onblur'] = "jQuery('#%(div_id)s').delay(1000).fadeOut('slow');" % \ dict(div_id=div_id, u='F' + self.keyword) attr['_onkeyup'] = "var e=event.which?event.which:event.keyCode; function %(u)s(){jQuery('#%(id)s').val(jQuery('#%(key)s').val())}; if(e==39) %(u)s(); else if(e==40) {if(jQuery('#%(key)s option:selected').next().length)jQuery('#%(key)s option:selected').attr('selected',null).next().attr('selected','selected'); %(u)s();} else if(e==38) {if(jQuery('#%(key)s option:selected').prev().length)jQuery('#%(key)s option:selected').attr('selected',null).prev().attr('selected','selected'); %(u)s();} else if(jQuery('#%(id)s').val().length>=%(min_length)s) jQuery.get('%(url)s?%(key)s='+encodeURIComponent(jQuery('#%(id)s').val()),function(data){jQuery('#%(id)s').next('.error').hide();jQuery('#%(div_id)s').html(data).show().focus();jQuery('#%(div_id)s select').css('width',jQuery('#%(id)s').css('width'));jQuery('#%(key)s').change(%(u)s);jQuery('#%(key)s').click(%(u)s);}); else jQuery('#%(div_id)s').fadeOut('slow');" % \ dict(url=self.url, min_length=self.min_length, key=self.keyword, id=attr['_id'], div_id=div_id, u='F' + self.keyword) if self.min_length == 0: attr['_onfocus'] = attr['_onkeyup'] return TAG[''](INPUT(**attr), DIV(_id=div_id, _style='position:absolute;')) def formstyle_table3cols(form, fields): ''' 3 column table - default ''' table = TABLE() for id, label, controls, help in fields: _help = TD(help, _class='w2p_fc') _controls = TD(controls, _class='w2p_fw') _label = TD(label, _class='w2p_fl') table.append(TR(_label, _controls, _help, _id=id)) return table def formstyle_table2cols(form, fields): ''' 2 column table ''' table = TABLE() for id, label, controls, help in fields: _help = TD(help, _class='w2p_fc', _width='50%') _controls = TD(controls, _class='w2p_fw', _colspan='2') _label = TD(label, _class='w2p_fl', _width='50%') table.append(TR(_label, _help, _id=id + '1', _class='even')) table.append(TR(_controls, _id=id + '2', _class='odd')) return table def formstyle_divs(form, fields): ''' divs only ''' table = FIELDSET() for id, label, controls, help in fields: _help = DIV(help, _class='w2p_fc') _controls = DIV(controls, _class='w2p_fw') _label = DIV(label, _class='w2p_fl') table.append(DIV(_label, _controls, _help, _id=id)) return table def formstyle_inline(form, fields): ''' divs only ''' if len(fields) != 2: raise RuntimeError("Not possible") id, label, controls, help = fields[0] submit_button = fields[1][2] return CAT(DIV(controls, _style='display:inline'), submit_button) def formstyle_ul(form, fields): ''' unordered list ''' table = UL() for id, label, controls, help in fields: _help = DIV(help, _class='w2p_fc') _controls = DIV(controls, _class='w2p_fw') _label = DIV(label, _class='w2p_fl') table.append(LI(_label, _controls, _help, _id=id)) return table def formstyle_bootstrap(form, fields): ''' bootstrap format form layout ''' form.add_class('form-horizontal') parent = FIELDSET() for id, label, controls, help in fields: # wrappers _help = SPAN(help, _class='help-block') # embed _help into _controls _controls = DIV(controls, _help, _class='controls') # submit unflag by default _submit = False if isinstance(controls, INPUT): controls.add_class('span4') if controls['_type'] == 'submit': # flag submit button _submit = True controls['_class'] = 'btn btn-primary' if controls['_type'] == 'file': controls['_class'] = 'input-file' # For password fields, which are wrapped in a CAT object. if isinstance(controls, CAT) and isinstance(controls[0], INPUT): controls[0].add_class('span4') if isinstance(controls, SELECT): controls.add_class('span4') if isinstance(controls, TEXTAREA): controls.add_class('span4') if isinstance(label, LABEL): label['_class'] = 'control-label' if _submit: # submit button has unwrapped label and controls, different class parent.append(DIV(label, controls, _class='form-actions', _id=id)) # unflag submit (possible side effect) _submit = False else: # unwrapped label parent.append(DIV(label, _controls, _class='control-group', _id=id)) return parent def formstyle_bootstrap3(form, fields): ''' bootstrap 3 format form layout ''' form.add_class('form-horizontal') parent = FIELDSET() for id, label, controls, help in fields: # wrappers _help = SPAN(help, _class='help-block') # embed _help into _controls _controls = DIV(controls, _help, _class='col-lg-4') # submit unflag by default _submit = False if isinstance(controls, INPUT): controls.add_class('col-lg-4') if controls['_type'] == 'submit': # flag submit button _submit = True controls['_class'] = 'btn btn-primary' if controls['_type'] == 'button': controls['_class'] = 'btn btn-default' elif controls['_type'] == 'file': controls['_class'] = 'input-file' elif controls['_type'] == 'text': controls['_class'] = 'form-control' elif controls['_type'] == 'password': controls['_class'] = 'form-control' elif controls['_type'] == 'checkbox': controls['_class'] = 'checkbox' # For password fields, which are wrapped in a CAT object. if isinstance(controls, CAT) and isinstance(controls[0], INPUT): controls[0].add_class('col-lg-2') if isinstance(controls, SELECT): controls.add_class('form-control') if isinstance(controls, TEXTAREA): controls.add_class('form-control') if isinstance(label, LABEL): label['_class'] = 'col-lg-2 control-label' if _submit: # submit button has unwrapped label and controls, different class parent.append(DIV(label, DIV(controls,_class="col-lg-4 col-lg-offset-2"), _class='form-group', _id=id)) # unflag submit (possible side effect) _submit = False else: # unwrapped label parent.append(DIV(label, _controls, _class='form-group', _id=id)) return parent class SQLFORM(FORM): """ SQLFORM is used to map a table (and a current record) into an HTML form given a SQLTable stored in db.table generates an insert form:: SQLFORM(db.table) generates an update form:: record=db.table[some_id] SQLFORM(db.table, record) generates an update with a delete button:: SQLFORM(db.table, record, deletable=True) if record is an int:: record=db.table[record] optional arguments: :param fields: a list of fields that should be placed in the form, default is all. :param labels: a dictionary with labels for each field, keys are the field names. :param col3: a dictionary with content for an optional third column (right of each field). keys are field names. :param linkto: the URL of a controller/function to access referencedby records see controller appadmin.py for examples :param upload: the URL of a controller/function to download an uploaded file see controller appadmin.py for examples any named optional attribute is passed to the <form> tag for example _class, _id, _style, _action, _method, etc. """ # usability improvements proposal by fpp - 4 May 2008 : # - correct labels (for points to field id, not field name) # - add label for delete checkbox # - add translatable label for record ID # - add third column to right of fields, populated from the col3 dict widgets = Storage(dict( string=StringWidget, text=TextWidget, json=JSONWidget, password=PasswordWidget, integer=IntegerWidget, double=DoubleWidget, decimal=DecimalWidget, time=TimeWidget, date=DateWidget, datetime=DatetimeWidget, upload=UploadWidget, boolean=BooleanWidget, blob=None, options=OptionsWidget, multiple=MultipleOptionsWidget, radio=RadioWidget, checkboxes=CheckboxesWidget, autocomplete=AutocompleteWidget, list=ListWidget, )) formstyles = Storage(dict( table3cols=formstyle_table3cols, table2cols=formstyle_table2cols, divs=formstyle_divs, ul=formstyle_ul, bootstrap=formstyle_bootstrap, bootstrap3=formstyle_bootstrap3, inline=formstyle_inline, )) FIELDNAME_REQUEST_DELETE = 'delete_this_record' FIELDKEY_DELETE_RECORD = 'delete_record' ID_LABEL_SUFFIX = '__label' ID_ROW_SUFFIX = '__row' def assert_status(self, status, request_vars): if not status and self.record and self.errors: ### if there are errors in update mode # and some errors refers to an already uploaded file # delete error if # - user not trying to upload a new file # - there is existing file and user is not trying to delete it # this is because removing the file may not pass validation for key in self.errors.keys(): if key in self.table \ and self.table[key].type == 'upload' \ and request_vars.get(key, None) in (None, '') \ and self.record[key] \ and not key + UploadWidget.ID_DELETE_SUFFIX in request_vars: del self.errors[key] if not self.errors: status = True return status def __init__( self, table, record=None, deletable=False, linkto=None, upload=None, fields=None, labels=None, col3={}, submit_button='Submit', delete_label='Check to delete', showid=True, readonly=False, comments=True, keepopts=[], ignore_rw=False, record_id=None, formstyle='table3cols', buttons=['submit'], separator=': ', **attributes ): """ SQLFORM(db.table, record=None, fields=['name'], labels={'name': 'Your name'}, linkto=URL(f='table/db/') """ T = current.T self.ignore_rw = ignore_rw self.formstyle = formstyle self.readonly = readonly # Default dbio setting self.detect_record_change = None nbsp = XML('&nbsp;') # Firefox2 does not display fields with blanks FORM.__init__(self, *[], **attributes) ofields = fields keyed = hasattr(table, '_primarykey') # for backward compatibility # if no fields are provided, build it from the provided table # will only use writable or readable fields, unless forced to ignore if fields is None: fields = [f.name for f in table if (ignore_rw or f.writable or f.readable) and (readonly or not f.compute)] self.fields = fields # make sure we have an id if self.fields[0] != table.fields[0] and \ isinstance(table, Table) and not keyed: self.fields.insert(0, table.fields[0]) self.table = table # try to retrieve the indicated record using its id # otherwise ignore it if record and isinstance(record, (int, long, str, unicode)): if not str(record).isdigit(): raise HTTP(404, "Object not found") record = table._db(table._id == record).select().first() if not record: raise HTTP(404, "Object not found") self.record = record self.record_id = record_id if keyed: self.record_id = dict([(k, record and str(record[k]) or None) for k in table._primarykey]) self.field_parent = {} xfields = [] self.fields = fields self.custom = Storage() self.custom.dspval = Storage() self.custom.inpval = Storage() self.custom.label = Storage() self.custom.comment = Storage() self.custom.widget = Storage() self.custom.linkto = Storage() # default id field name if not keyed: self.id_field_name = table._id.name else: self.id_field_name = table._primarykey[0] # only works if one key sep = separator or '' for fieldname in self.fields: if fieldname.find('.') >= 0: continue field = self.table[fieldname] comment = None if comments: comment = col3.get(fieldname, field.comment) if comment is None: comment = '' self.custom.comment[fieldname] = comment if not labels is None and fieldname in labels: label = labels[fieldname] else: label = field.label self.custom.label[fieldname] = label field_id = '%s_%s' % (table._tablename, fieldname) label = LABEL(label, label and sep, _for=field_id, _id=field_id + SQLFORM.ID_LABEL_SUFFIX) row_id = field_id + SQLFORM.ID_ROW_SUFFIX if field.type == 'id': self.custom.dspval.id = nbsp self.custom.inpval.id = '' widget = '' # store the id field name (for legacy databases) self.id_field_name = field.name if record: if showid and field.name in record and field.readable: v = record[field.name] widget = SPAN(v, _id=field_id) self.custom.dspval.id = str(v) xfields.append((row_id, label, widget, comment)) self.record_id = str(record[field.name]) self.custom.widget.id = widget continue if readonly and not ignore_rw and not field.readable: continue if record: default = record[fieldname] else: default = field.default if isinstance(default, CALLABLETYPES): default = default() cond = readonly or \ (not ignore_rw and not field.writable and field.readable) if default is not None and not cond: default = field.formatter(default) dspval = default inpval = default if cond: # ## if field.represent is available else # ## ignore blob and preview uploaded images # ## format everything else if field.represent: inp = represent(field, default, record) elif field.type in ['blob']: continue elif field.type == 'upload': inp = UploadWidget.represent(field, default, upload) elif field.type == 'boolean': inp = self.widgets.boolean.widget( field, default, _disabled=True) else: inp = field.formatter(default) elif field.type == 'upload': if field.widget: inp = field.widget(field, default, upload) else: inp = self.widgets.upload.widget(field, default, upload) elif field.widget: inp = field.widget(field, default) elif field.type == 'boolean': inp = self.widgets.boolean.widget(field, default) if default: inpval = 'checked' else: inpval = '' elif OptionsWidget.has_options(field): if not field.requires.multiple: inp = self.widgets.options.widget(field, default) else: inp = self.widgets.multiple.widget(field, default) if fieldname in keepopts: inpval = TAG[''](*inp.components) elif field.type.startswith('list:'): inp = self.widgets.list.widget(field, default) elif field.type == 'text': inp = self.widgets.text.widget(field, default) elif field.type == 'password': inp = self.widgets.password.widget(field, default) if self.record: dspval = PasswordWidget.DEFAULT_PASSWORD_DISPLAY else: dspval = '' elif field.type == 'blob': continue else: field_type = widget_class.match(str(field.type)).group() field_type = field_type in self.widgets and field_type or 'string' inp = self.widgets[field_type].widget(field, default) xfields.append((row_id, label, inp, comment)) self.custom.dspval[fieldname] = dspval if (dspval is not None) else nbsp self.custom.inpval[ fieldname] = inpval if not inpval is None else '' self.custom.widget[fieldname] = inp # if a record is provided and found, as is linkto # build a link if record and linkto: db = linkto.split('/')[-1] for rfld in table._referenced_by: if keyed: query = urllib.quote('%s.%s==%s' % ( db, rfld, record[rfld.type[10:].split('.')[1]])) else: query = urllib.quote( '%s.%s==%s' % (db, rfld, record[self.id_field_name])) lname = olname = '%s.%s' % (rfld.tablename, rfld.name) if ofields and not olname in ofields: continue if labels and lname in labels: lname = labels[lname] widget = A(lname, _class='reference', _href='%s/%s?query=%s' % (linkto, rfld.tablename, query)) xfields.append( (olname.replace('.', '__') + SQLFORM.ID_ROW_SUFFIX, '', widget, col3.get(olname, ''))) self.custom.linkto[olname.replace('.', '__')] = widget # </block> # when deletable, add delete? checkbox self.custom.delete = self.custom.deletable = '' if record and deletable: #add secondary css class for cascade delete warning css = 'delete' for f in self.table.fields: on_del = self.table[f].ondelete if isinstance(on_del,str) and 'cascade' in on_del.lower(): css += ' cascade_delete' break widget = INPUT(_type='checkbox', _class=css, _id=self.FIELDKEY_DELETE_RECORD, _name=self.FIELDNAME_REQUEST_DELETE, ) xfields.append( (self.FIELDKEY_DELETE_RECORD + SQLFORM.ID_ROW_SUFFIX, LABEL( T(delete_label), separator, _for=self.FIELDKEY_DELETE_RECORD, _id=self.FIELDKEY_DELETE_RECORD + \ SQLFORM.ID_LABEL_SUFFIX), widget, col3.get(self.FIELDKEY_DELETE_RECORD, ''))) self.custom.delete = self.custom.deletable = widget # when writable, add submit button self.custom.submit = '' if not readonly: if 'submit' in buttons: widget = self.custom.submit = INPUT(_type='submit', _value=T(submit_button)) elif buttons: widget = self.custom.submit = DIV(*buttons) if self.custom.submit: xfields.append(('submit_record' + SQLFORM.ID_ROW_SUFFIX, '', widget, col3.get('submit_button', ''))) # if a record is provided and found # make sure it's id is stored in the form if record: if not self['hidden']: self['hidden'] = {} if not keyed: self['hidden']['id'] = record[table._id.name] (begin, end) = self._xml() self.custom.begin = XML("<%s %s>" % (self.tag, begin)) self.custom.end = XML("%s</%s>" % (end, self.tag)) table = self.createform(xfields) self.components = [table] def createform(self, xfields): formstyle = self.formstyle if isinstance(formstyle, basestring): if formstyle in SQLFORM.formstyles: formstyle = SQLFORM.formstyles[formstyle] else: raise RuntimeError('formstyle not found') if callable(formstyle): # backward compatibility, 4 argument function is the old style args, varargs, keywords, defaults = inspect.getargspec(formstyle) if defaults and len(args) - len(defaults) == 4 or len(args) == 4: table = TABLE() for id, a, b, c in xfields: newrows = formstyle(id, a, b, c) self.field_parent[id] = getattr(b, 'parent', None) \ if isinstance(b,XmlComponent) else None if type(newrows).__name__ != "tuple": newrows = [newrows] for newrow in newrows: table.append(newrow) else: table = formstyle(self, xfields) for id, a, b, c in xfields: self.field_parent[id] = getattr(b, 'parent', None) \ if isinstance(b,XmlComponent) else None else: raise RuntimeError('formstyle not supported') return table def accepts( self, request_vars, session=None, formname='%(tablename)s/%(record_id)s', keepvalues=None, onvalidation=None, dbio=True, hideerror=False, detect_record_change=False, **kwargs ): """ similar FORM.accepts but also does insert, update or delete in DAL. but if detect_record_change == True than: form.record_changed = False (record is properly validated/submitted) form.record_changed = True (record cannot be submitted because changed) elseif detect_record_change == False than: form.record_changed = None """ if keepvalues is None: keepvalues = True if self.record else False if self.readonly: return False if request_vars.__class__.__name__ == 'Request': request_vars = request_vars.post_vars keyed = hasattr(self.table, '_primarykey') # implement logic to detect whether record exist but has been modified # server side self.record_changed = None self.detect_record_change = detect_record_change if self.detect_record_change: if self.record: self.record_changed = False serialized = '|'.join( str(self.record[k]) for k in self.table.fields()) self.record_hash = md5_hash(serialized) # logic to deal with record_id for keyed tables if self.record: if keyed: formname_id = '.'.join(str(self.record[k]) for k in self.table._primarykey if hasattr(self.record, k)) record_id = dict((k, request_vars.get(k, None)) for k in self.table._primarykey) else: (formname_id, record_id) = (self.record[self.id_field_name], request_vars.get('id', None)) keepvalues = True else: if keyed: formname_id = 'create' record_id = dict([(k, None) for k in self.table._primarykey]) else: (formname_id, record_id) = ('create', None) if not keyed and isinstance(record_id, (list, tuple)): record_id = record_id[0] if formname: formname = formname % dict(tablename=self.table._tablename, record_id=formname_id) # ## THIS IS FOR UNIQUE RECORDS, read IS_NOT_IN_DB for fieldname in self.fields: field = self.table[fieldname] requires = field.requires or [] if not isinstance(requires, (list, tuple)): requires = [requires] [item.set_self_id(self.record_id) for item in requires if hasattr(item, 'set_self_id') and self.record_id] # ## END fields = {} for key in self.vars: fields[key] = self.vars[key] ret = FORM.accepts( self, request_vars, session, formname, keepvalues, onvalidation, hideerror=hideerror, **kwargs ) self.deleted = \ request_vars.get(self.FIELDNAME_REQUEST_DELETE, False) self.custom.end = TAG[''](self.hidden_fields(), self.custom.end) auch = record_id and self.errors and self.deleted if self.record_changed and self.detect_record_change: message_onchange = \ kwargs.setdefault("message_onchange", current.T("A record change was detected. " + "Consecutive update self-submissions " + "are not allowed. Try re-submitting or " + "refreshing the form page.")) if message_onchange is not None: current.response.flash = message_onchange return ret elif (not ret) and (not auch): # auch is true when user tries to delete a record # that does not pass validation, yet it should be deleted for fieldname in self.fields: field = self.table[fieldname] ### this is a workaround! widgets should always have default not None! if not field.widget and field.type.startswith('list:') and \ not OptionsWidget.has_options(field): field.widget = self.widgets.list.widget if field.widget and fieldname in request_vars: if fieldname in self.request_vars: value = self.request_vars[fieldname] elif self.record: value = self.record[fieldname] else: value = self.table[fieldname].default row_id = '%s_%s%s' % ( self.table, fieldname, SQLFORM.ID_ROW_SUFFIX) widget = field.widget(field, value) parent = self.field_parent[row_id] if parent: parent.components = [widget] if self.errors.get(fieldname): parent._traverse(False, hideerror) self.custom.widget[fieldname] = widget self.accepted = ret return ret if record_id and str(record_id) != str(self.record_id): raise SyntaxError('user is tampering with form\'s record_id: ' '%s != %s' % (record_id, self.record_id)) if record_id and dbio and not keyed: self.vars.id = self.record[self.id_field_name] if self.deleted and self.custom.deletable: if dbio: if keyed: qry = reduce(lambda x, y: x & y, [self.table[k] == record_id[k] for k in self.table._primarykey]) else: qry = self.table._id == self.record[self.id_field_name] self.table._db(qry).delete() self.errors.clear() for component in self.elements('input, select, textarea'): component['_disabled'] = True self.accepted = True return True for fieldname in self.fields: if not fieldname in self.table.fields: continue if not self.ignore_rw and not self.table[fieldname].writable: ### this happens because FORM has no knowledge of writable ### and thinks that a missing boolean field is a None if self.table[fieldname].type == 'boolean' and \ self.vars.get(fieldname, True) is None: del self.vars[fieldname] continue field = self.table[fieldname] if field.type == 'id': continue if field.type == 'boolean': if self.vars.get(fieldname, False): self.vars[fieldname] = fields[fieldname] = True else: self.vars[fieldname] = fields[fieldname] = False elif field.type == 'password' and self.record\ and request_vars.get(fieldname, None) == \ PasswordWidget.DEFAULT_PASSWORD_DISPLAY: continue # do not update if password was not changed elif field.type == 'upload': f = self.vars[fieldname] fd = '%s__delete' % fieldname if f == '' or f is None: if self.vars.get(fd, False): f = self.table[fieldname].default or '' fields[fieldname] = f elif self.record: if self.record[fieldname]: fields[fieldname] = self.record[fieldname] else: f = self.table[fieldname].default or '' fields[fieldname] = f else: f = self.table[fieldname].default or '' fields[fieldname] = f self.vars[fieldname] = fields[fieldname] if not f: continue else: f = os.path.join( current.request.folder, os.path.normpath(f)) source_file = open(f, 'rb') original_filename = os.path.split(f)[1] elif hasattr(f, 'file'): (source_file, original_filename) = (f.file, f.filename) elif isinstance(f, (str, unicode)): ### do not know why this happens, it should not (source_file, original_filename) = \ (cStringIO.StringIO(f), 'file.txt') else: # this should never happen, why does it happen? #print 'f=',repr(f) continue newfilename = field.store(source_file, original_filename, field.uploadfolder) # this line was for backward compatibility but problematic # self.vars['%s_newfilename' % fieldname] = newfilename fields[fieldname] = newfilename if isinstance(field.uploadfield, str): fields[field.uploadfield] = source_file.read() # proposed by Hamdy (accept?) do we need fields at this point? self.vars[fieldname] = fields[fieldname] continue elif fieldname in self.vars: fields[fieldname] = self.vars[fieldname] elif field.default is None and field.type != 'blob': self.errors[fieldname] = 'no data' self.accepted = False return False value = fields.get(fieldname, None) if field.type == 'list:string': if not isinstance(value, (tuple, list)): fields[fieldname] = value and [value] or [] elif isinstance(field.type, str) and field.type.startswith('list:'): if not isinstance(value, list): fields[fieldname] = [safe_int( x) for x in (value and [value] or [])] elif field.type == 'integer': if not value is None: fields[fieldname] = safe_int(value) elif field.type.startswith('reference'): if not value is None and isinstance(self.table, Table) and not keyed: fields[fieldname] = safe_int(value) elif field.type == 'double': if not value is None: fields[fieldname] = safe_float(value) for fieldname in self.vars: if fieldname != 'id' and fieldname in self.table.fields\ and not fieldname in fields and not fieldname\ in request_vars: fields[fieldname] = self.vars[fieldname] if dbio: if 'delete_this_record' in fields: # this should never happen but seems to happen to some del fields['delete_this_record'] for field in self.table: if not field.name in fields and field.writable is False \ and field.update is None and field.compute is None: if record_id and self.record: fields[field.name] = self.record[field.name] elif not self.table[field.name].default is None: fields[field.name] = self.table[field.name].default if keyed: if reduce(lambda x, y: x and y, record_id.values()): # if record_id if fields: qry = reduce(lambda x, y: x & y, [self.table[k] == self.record[k] for k in self.table._primarykey]) self.table._db(qry).update(**fields) else: pk = self.table.insert(**fields) if pk: self.vars.update(pk) else: ret = False else: if record_id: self.vars.id = self.record[self.id_field_name] if fields: self.table._db(self.table._id == self.record[ self.id_field_name]).update(**fields) else: self.vars.id = self.table.insert(**fields) self.accepted = ret return ret AUTOTYPES = { type(''): ('string', None), type(True): ('boolean', None), type(1): ('integer', IS_INT_IN_RANGE(-1e12, +1e12)), type(1.0): ('double', IS_FLOAT_IN_RANGE()), type([]): ('list:string', None), type(datetime.date.today()): ('date', IS_DATE()), type(datetime.datetime.today()): ('datetime', IS_DATETIME()) } @staticmethod def dictform(dictionary, **kwargs): fields = [] for key, value in sorted(dictionary.items()): t, requires = SQLFORM.AUTOTYPES.get(type(value), (None, None)) if t: fields.append(Field(key, t, requires=requires, default=value)) return SQLFORM.factory(*fields, **kwargs) @staticmethod def smartdictform(session, name, filename=None, query=None, **kwargs): import os if query: session[name] = query.db(query).select().first().as_dict() elif os.path.exists(filename): env = {'datetime': datetime} session[name] = eval(open(filename).read(), {}, env) form = SQLFORM.dictform(session[name]) if form.process().accepted: session[name].update(form.vars) if query: query.db(query).update(**form.vars) else: open(filename, 'w').write(repr(session[name])) return form @staticmethod def factory(*fields, **attributes): """ generates a SQLFORM for the given fields. Internally will build a non-database based data model to hold the fields. """ # Define a table name, this way it can be logical to our CSS. # And if you switch from using SQLFORM to SQLFORM.factory # your same css definitions will still apply. table_name = attributes.get('table_name', 'no_table') # So it won't interfere with SQLDB.define_table if 'table_name' in attributes: del attributes['table_name'] return SQLFORM(DAL(None).define_table(table_name, *fields), **attributes) @staticmethod def build_query(fields, keywords): request = current.request if isinstance(keywords, (tuple, list)): keywords = keywords[0] request.vars.keywords = keywords key = keywords.strip() if key and not ' ' in key and not '"' in key and not "'" in key: SEARCHABLE_TYPES = ('string', 'text', 'list:string') parts = [field.contains( key) for field in fields if field.type in SEARCHABLE_TYPES] else: parts = None if parts: return reduce(lambda a, b: a | b, parts) else: return smart_query(fields, key) @staticmethod def search_menu(fields, search_options=None, prefix='w2p' ): T = current.T panel_id='%s_query_panel' % prefix fields_id='%s_query_fields' % prefix keywords_id='%s_keywords' % prefix field_id='%s_field' % prefix value_id='%s_value' % prefix search_options = search_options or { 'string': ['=', '!=', '<', '>', '<=', '>=', 'starts with', 'contains', 'in', 'not in'], 'text': ['=', '!=', '<', '>', '<=', '>=', 'starts with', 'contains', 'in', 'not in'], 'date': ['=', '!=', '<', '>', '<=', '>='], 'time': ['=', '!=', '<', '>', '<=', '>='], 'datetime': ['=', '!=', '<', '>', '<=', '>='], 'integer': ['=', '!=', '<', '>', '<=', '>=', 'in', 'not in'], 'double': ['=', '!=', '<', '>', '<=', '>='], 'id': ['=', '!=', '<', '>', '<=', '>=', 'in', 'not in'], 'reference': ['=', '!='], 'boolean': ['=', '!=']} if fields[0]._db._adapter.dbengine == 'google:datastore': search_options['string'] = ['=', '!=', '<', '>', '<=', '>='] search_options['text'] = ['=', '!=', '<', '>', '<=', '>='] search_options['list:string'] = ['contains'] search_options['list:integer'] = ['contains'] search_options['list:reference'] = ['contains'] criteria = [] selectfields = [] for field in fields: name = str(field).replace('.', '-') # treat ftype 'decimal' as 'double' # (this fixes problems but needs refactoring! if isinstance(field.type, SQLCustomType): ftype = field.type.type.split(' ')[0] else: ftype = field.type.split(' ')[0] if ftype.startswith('decimal'): ftype = 'double' elif ftype=='bigint': ftype = 'integer' elif ftype.startswith('big-'): ftype = ftype[4:] # end options = search_options.get(ftype, None) if options: label = isinstance( field.label, str) and T(field.label) or field.label selectfields.append(OPTION(label, _value=str(field))) operators = SELECT(*[OPTION(T(option), _value=option) for option in options]) _id = "%s_%s" % (value_id,name) if field.type == 'boolean': value_input = SQLFORM.widgets.boolean.widget(field,field.default,_id=_id) elif field.type == 'double': value_input = SQLFORM.widgets.double.widget(field,field.default,_id=_id) elif field.type == 'time': value_input = SQLFORM.widgets.time.widget(field,field.default,_id=_id) elif field.type == 'date': iso_format = {'_data-w2p_date_format' : '%Y-%m-%d'} value_input = SQLFORM.widgets.date.widget(field,field.default,_id=_id, **iso_format) elif field.type == 'datetime': iso_format = iso_format = {'_data-w2p_datetime_format' : '%Y-%m-%d %H:%M:%S'} value_input = SQLFORM.widgets.datetime.widget(field,field.default,_id=_id, **iso_format) elif (field.type.startswith('reference ') or field.type.startswith('list:reference ')) and \ hasattr(field.requires,'options'): value_input = SELECT( *[OPTION(v, _value=k) for k,v in field.requires.options()], **dict(_id=_id)) elif field.type == 'integer' or \ field.type.startswith('reference ') or \ field.type.startswith('list:integer') or \ field.type.startswith('list:reference '): value_input = SQLFORM.widgets.integer.widget(field,field.default,_id=_id) else: value_input = INPUT( _type='text', _id=_id, _class=field.type) new_button = INPUT( _type="button", _value=T('New'), _class="btn", _onclick="%s_build_query('new','%s')" % (prefix,field)) and_button = INPUT( _type="button", _value=T('And'), _class="btn", _onclick="%s_build_query('and','%s')" % (prefix, field)) or_button = INPUT( _type="button", _value=T('Or'), _class="btn", _onclick="%s_build_query('or','%s')" % (prefix, field)) close_button = INPUT( _type="button", _value=T('Close'), _class="btn", _onclick="jQuery('#%s').slideUp()" % panel_id) criteria.append(DIV( operators, value_input, new_button, and_button, or_button, close_button, _id='%s_%s' % (field_id, name), _class='w2p_query_row hidden', _style='display:inline')) criteria.insert(0, SELECT( _id=fields_id, _onchange="jQuery('.w2p_query_row').hide();jQuery('#%s_'+jQuery('#%s').val().replace('.','-')).show();" % (field_id,fields_id), _style='float:left', *selectfields)) fadd = SCRIPT(""" jQuery('#%(fields_id)s input,#%(fields_id)s select').css( 'width','auto'); jQuery(function(){web2py_ajax_fields('#%(fields_id)s');}); function %(prefix)s_build_query(aggregator,a) { var b=a.replace('.','-'); var option = jQuery('#%(field_id)s_'+b+' select').val(); var value = jQuery('#%(value_id)s_'+b).val().replace('"','\\\\"'); var s=a+' '+option+' "'+value+'"'; var k=jQuery('#%(keywords_id)s'); var v=k.val(); if(aggregator=='new') k.val(s); else k.val((v?(v+' '+ aggregator +' '):'')+s); } """ % dict( prefix=prefix,fields_id=fields_id,keywords_id=keywords_id, field_id=field_id,value_id=value_id ) ) return CAT( DIV(_id=panel_id, _style="display:none;", *criteria), fadd) @staticmethod def grid(query, fields=None, field_id=None, left=None, headers={}, orderby=None, groupby=None, searchable=True, sortable=True, paginate=20, deletable=True, editable=True, details=True, selectable=None, create=True, csv=True, links=None, links_in_grid=True, upload='<default>', args=[], user_signature=True, maxtextlengths={}, maxtextlength=20, onvalidation=None, onfailure=None, oncreate=None, onupdate=None, ondelete=None, sorter_icons=(XML('&#x25B2;'), XML('&#x25BC;')), ui = 'web2py', showbuttontext=True, _class="web2py_grid", formname='web2py_grid', search_widget='default', ignore_rw = False, formstyle = 'table3cols', exportclasses = None, formargs={}, createargs={}, editargs={}, viewargs={}, selectable_submit_button='Submit', buttons_placement = 'right', links_placement = 'right', noconfirm=False, cache_count=None, client_side_delete=False, ignore_common_filters=None, ): # jQuery UI ThemeRoller classes (empty if ui is disabled) if ui == 'jquery-ui': ui = dict(widget='ui-widget', header='ui-widget-header', content='ui-widget-content', default='ui-state-default', cornerall='ui-corner-all', cornertop='ui-corner-top', cornerbottom='ui-corner-bottom', button='ui-button-text-icon-primary', buttontext='ui-button-text', buttonadd='ui-icon ui-icon-plusthick', buttonback='ui-icon ui-icon-arrowreturnthick-1-w', buttonexport='ui-icon ui-icon-transferthick-e-w', buttondelete='ui-icon ui-icon-trash', buttonedit='ui-icon ui-icon-pencil', buttontable='ui-icon ui-icon-triangle-1-e', buttonview='ui-icon ui-icon-zoomin', ) elif ui == 'web2py': ui = dict(widget='', header='', content='', default='', cornerall='', cornertop='', cornerbottom='', button='button btn', buttontext='buttontext button', buttonadd='icon plus icon-plus', buttonback='icon leftarrow icon-arrow-left', buttonexport='icon downarrow icon-download', buttondelete='icon trash icon-trash', buttonedit='icon pen icon-pencil', buttontable='icon rightarrow icon-arrow-right', buttonview='icon magnifier icon-zoom-in', ) elif not isinstance(ui, dict): raise RuntimeError('SQLFORM.grid ui argument must be a dictionary') db = query._db T = current.T request = current.request session = current.session response = current.response logged = session.auth and session.auth.user wenabled = (not user_signature or logged) and not groupby create = wenabled and create editable = wenabled and editable deletable = wenabled and deletable details = details and not groupby rows = None def fetch_count(dbset): ##FIXME for google:datastore cache_count is ignored ## if it's not an integer if cache_count is None or isinstance(cache_count, tuple): if groupby: c = 'count(*)' nrows = db.executesql( 'select count(*) from (%s) _tmp;' % dbset._select(c, left=left, cacheable=True, groupby=groupby, cache=cache_count)[:-1])[0][0] elif left: c = 'count(*)' nrows = dbset.select(c, left=left, cacheable=True, cache=cache_count).first()[c] elif dbset._db._adapter.dbengine=='google:datastore': #if we don't set a limit, this can timeout for a large table nrows = dbset.db._adapter.count(dbset.query, limit=1000) else: nrows = dbset.count(cache=cache_count) elif isinstance(cache_count, (int, long)): nrows = cache_count elif callable(cache_count): nrows = cache_count(dbset, request.vars) else: nrows = 0 return nrows def url(**b): b['args'] = args + b.get('args', []) localvars = request.get_vars.copy() localvars.update(b.get('vars', {})) b['vars'] = localvars b['hash_vars'] = False b['user_signature'] = user_signature return URL(**b) def url2(**b): b['args'] = request.args + b.get('args', []) localvars = request.get_vars.copy() localvars.update(b.get('vars', {})) b['vars'] = localvars b['hash_vars'] = False b['user_signature'] = user_signature return URL(**b) referrer = session.get('_web2py_grid_referrer_' + formname, url()) # if not user_signature every action is accessible # else forbid access unless # - url is based url # - url has valid signature (vars are not signed, only path_info) # = url does not contain 'create','delete','edit' (readonly) if user_signature: if not ( '/'.join(str(a) for a in args) == '/'.join(request.args) or URL.verify(request,user_signature=user_signature, hash_vars=False) or (request.args(len(args))=='view' and not logged)): session.flash = T('not authorized') redirect(referrer) def gridbutton(buttonclass='buttonadd', buttontext=T('Add'), buttonurl=url(args=[]), callback=None, delete=None, trap=True, noconfirm=None): if showbuttontext: return A(SPAN(_class=ui.get(buttonclass)), SPAN(T(buttontext), _title=T(buttontext), _class=ui.get('buttontext')), _href=buttonurl, callback=callback, delete=delete, noconfirm=noconfirm, _class=ui.get('button'), cid=request.cid) else: return A(SPAN(_class=ui.get(buttonclass)), _href=buttonurl, callback=callback, delete=delete, noconfirm=noconfirm, _title=T(buttontext), _class=ui.get('buttontext'), cid=request.cid) dbset = db(query,ignore_common_filters=ignore_common_filters) tablenames = db._adapter.tables(dbset.query) if left is not None: if not isinstance(left, (list, tuple)): left = [left] for join in left: tablenames += db._adapter.tables(join) tables = [db[tablename] for tablename in tablenames] if fields: #add missing tablename to virtual fields for table in tables: for k,f in table.iteritems(): if isinstance(f,Field.Virtual): f.tablename = table._tablename columns = [f for f in fields if f.tablename in tablenames] else: fields = [] columns = [] filter1 = lambda f:isinstance(f,Field) filter2 = lambda f:isinstance(f,Field) and f.readable for table in tables: fields += filter(filter1, table) columns += filter(filter2, table) for k,f in table.iteritems(): if not k.startswith('_'): if isinstance(f,Field.Virtual) and f.readable: f.tablename = table._tablename fields.append(f) columns.append(f) if not field_id: if groupby is None: field_id = tables[0]._id elif groupby and isinstance(groupby, Field): field_id = groupby #take the field passed as groupby elif groupby and isinstance(groupby, Expression): field_id = groupby.first #take the first groupby field table = field_id.table tablename = table._tablename if not any(str(f)==str(field_id) for f in fields): fields = [f for f in fields]+[field_id] if upload == '<default>': upload = lambda filename: url(args=['download', filename]) if request.args(-2) == 'download': stream = response.download(request, db) raise HTTP(200, stream, **response.headers) def buttons(edit=False, view=False, record=None): buttons = DIV(gridbutton('buttonback', 'Back', referrer), _class='form_header row_buttons %(header)s %(cornertop)s' % ui) if edit and (not callable(edit) or edit(record)): args = ['edit', table._tablename, request.args[-1]] buttons.append(gridbutton('buttonedit', 'Edit', url(args=args))) if view: args = ['view', table._tablename, request.args[-1]] buttons.append(gridbutton('buttonview', 'View', url(args=args))) if record and links: for link in links: if isinstance(link, dict): buttons.append(link['body'](record)) elif link(record): buttons.append(link(record)) return buttons def linsert(lst, i, x): """ a = [1,2] linsert(a, 1, [0,3]) a = [1, 0, 3, 2] """ lst[i:i] = x formfooter = DIV( _class='form_footer row_buttons %(header)s %(cornerbottom)s' % ui) create_form = update_form = view_form = search_form = None if create and request.args(-2) == 'new': table = db[request.args[-1]] sqlformargs = dict(ignore_rw=ignore_rw, formstyle=formstyle, _class='web2py_form') sqlformargs.update(formargs) sqlformargs.update(createargs) create_form = SQLFORM(table, **sqlformargs) create_form.process(formname=formname, next=referrer, onvalidation=onvalidation, onfailure=onfailure, onsuccess=oncreate) res = DIV(buttons(), create_form, formfooter, _class=_class) res.create_form = create_form res.update_form = update_form res.view_form = view_form res.search_form = search_form res.rows = None return res elif details and request.args(-3) == 'view': table = db[request.args[-2]] record = table(request.args[-1]) or redirect(referrer) sqlformargs = dict(upload=upload, ignore_rw=ignore_rw, formstyle=formstyle, readonly=True, _class='web2py_form') sqlformargs.update(formargs) sqlformargs.update(viewargs) view_form = SQLFORM(table, record, **sqlformargs) res = DIV(buttons(edit=editable, record=record), view_form, formfooter, _class=_class) res.create_form = create_form res.update_form = update_form res.view_form = view_form res.search_form = search_form res.rows = None return res elif editable and request.args(-3) == 'edit': table = db[request.args[-2]] record = table(request.args[-1]) or redirect(URL('error')) deletable_ = deletable(record) \ if callable(deletable) else deletable sqlformargs = dict(upload=upload, ignore_rw=ignore_rw, formstyle=formstyle, deletable=deletable_, _class='web2py_form', submit_button=T('Submit'), delete_label=T('Check to delete')) sqlformargs.update(formargs) sqlformargs.update(editargs) update_form = SQLFORM(table, record, **sqlformargs) update_form.process( formname=formname, onvalidation=onvalidation, onfailure=onfailure, onsuccess=onupdate, next=referrer) res = DIV(buttons(view=details, record=record), update_form, formfooter, _class=_class) res.create_form = create_form res.update_form = update_form res.view_form = view_form res.search_form = search_form res.rows = None return res elif deletable and request.args(-3) == 'delete': table = db[request.args[-2]] if not callable(deletable): if ondelete: ondelete(table, request.args[-1]) db(table[table._id.name] == request.args[-1]).delete() else: record = table(request.args[-1]) or redirect(URL('error')) if deletable(record): if ondelete: ondelete(table, request.args[-1]) record.delete_record() if request.ajax: #this means javascript is enabled, so we don't need to do #a redirect if not client_side_delete: #if it's an ajax request and we don't need to reload the #entire page, let's just inform that there have been no #exceptions and don't regenerate the grid raise HTTP(200) else: #if it's requested that the grid gets reloaded on delete #on ajax, the redirect should be on the original location newloc = request.env.http_web2py_component_location redirect(newloc, client_side=client_side_delete) else: #we need to do a redirect because javascript is not enabled redirect(referrer, client_side=client_side_delete) exportManager = dict( csv_with_hidden_cols=(ExporterCSV, 'CSV (hidden cols)'), csv=(ExporterCSV, 'CSV'), xml=(ExporterXML, 'XML'), html=(ExporterHTML, 'HTML'), json=(ExporterJSON, 'JSON'), tsv_with_hidden_cols= (ExporterTSV, 'TSV (Excel compatible, hidden cols)'), tsv=(ExporterTSV, 'TSV (Excel compatible)')) if not exportclasses is None: """ remember: allow to set exportclasses=dict(csv=False) to disable the csv format """ exportManager.update(exportclasses) export_type = request.vars._export_type if export_type: order = request.vars.order or '' if sortable: if order and not order == 'None': otablename, ofieldname = order.split('~')[-1].split('.', 1) sort_field = db[otablename][ofieldname] exception = sort_field.type in ('date', 'datetime', 'time') if exception: orderby = (order[:1] == '~' and sort_field) or ~sort_field else: orderby = (order[:1] == '~' and ~sort_field) or sort_field expcolumns = [str(f) for f in columns] if export_type.endswith('with_hidden_cols'): expcolumns = [] for table in tables: for field in table: if field.readable and field.tablename in tablenames: expcolumns.append(field) if export_type in exportManager and exportManager[export_type]: if request.vars.keywords: try: dbset = dbset(SQLFORM.build_query( fields, request.vars.get('keywords', ''))) rows = dbset.select(left=left, orderby=orderby, cacheable=True, *expcolumns) except Exception, e: response.flash = T('Internal Error') rows = [] else: rows = dbset.select(left=left, orderby=orderby, cacheable=True, *expcolumns) value = exportManager[export_type] clazz = value[0] if hasattr(value, '__getitem__') else value oExp = clazz(rows) filename = '.'.join(('rows', oExp.file_ext)) response.headers['Content-Type'] = oExp.content_type response.headers['Content-Disposition'] = \ 'attachment;filename=' + filename + ';' raise HTTP(200, oExp.export(), **response.headers) elif request.vars.records and not isinstance( request.vars.records, list): request.vars.records = [request.vars.records] elif not request.vars.records: request.vars.records = [] session['_web2py_grid_referrer_' + formname] = \ url2(vars=request.get_vars) console = DIV(_class='web2py_console %(header)s %(cornertop)s' % ui) error = None if create: add = gridbutton( buttonclass='buttonadd', buttontext=T('Add'), buttonurl=url(args=['new', tablename])) if not searchable: console.append(add) else: add = '' if searchable: sfields = reduce(lambda a, b: a + b, [[f for f in t if f.readable] for t in tables]) if isinstance(search_widget, dict): search_widget = search_widget[tablename] if search_widget == 'default': prefix = formname == 'web2py_grid' and 'w2p' or 'w2p_%s' % formname search_menu = SQLFORM.search_menu(sfields, prefix=prefix) spanel_id = '%s_query_fields' % prefix sfields_id = '%s_query_panel' % prefix skeywords_id = '%s_keywords' % prefix search_widget = lambda sfield, url: CAT(FORM( INPUT(_name='keywords', _value=request.vars.keywords, _id=skeywords_id, _onfocus="jQuery('#%s').change();jQuery('#%s').slideDown();" % (spanel_id, sfields_id)), INPUT(_type='submit', _value=T('Search'), _class="btn"), INPUT(_type='submit', _value=T('Clear'), _class="btn", _onclick="jQuery('#%s').val('');" % skeywords_id), _method="GET", _action=url), search_menu) form = search_widget and search_widget(sfields, url()) or '' console.append(add) console.append(form) keywords = request.vars.get('keywords', '') try: if callable(searchable): subquery = searchable(sfields, keywords) else: subquery = SQLFORM.build_query(sfields, keywords) except RuntimeError: subquery = None error = T('Invalid query') else: subquery = None if subquery: dbset = dbset(subquery) try: nrows = fetch_count(dbset) except: nrows = 0 error = T('Unsupported query') order = request.vars.order or '' if sortable: if order and not order == 'None': otablename, ofieldname = order.split('~')[-1].split('.', 1) sort_field = db[otablename][ofieldname] exception = sort_field.type in ('date', 'datetime', 'time') if exception: orderby = (order[:1] == '~' and sort_field) or ~sort_field else: orderby = (order[:1] == '~' and ~sort_field) or sort_field headcols = [] if selectable: headcols.append(TH(_class=ui.get('default'))) ordermatch, marker = orderby, '' if orderby: #if orderby is a single column, remember to put the marker if isinstance(orderby, Expression): if orderby.first and not orderby.second: ordermatch, marker = orderby.first, '~' ordermatch = marker + str(ordermatch) for field in columns: if not field.readable: continue key = str(field) header = headers.get(str(field), field.label or key) if sortable and not isinstance(field, Field.Virtual): marker = '' if order: if key == order: key, marker = '~' + order, sorter_icons[0] elif key == order[1:]: marker = sorter_icons[1] else: if key == ordermatch: key, marker = '~' + ordermatch, sorter_icons[0] elif key == ordermatch[1:]: marker = sorter_icons[1] header = A(header, marker, _href=url(vars=dict( keywords=request.vars.keywords or '', order=key)), cid=request.cid) headcols.append(TH(header, _class=ui.get('default'))) toadd = [] left_cols = 0 right_cols = 0 if links and links_in_grid: for link in links: if isinstance(link, dict): toadd.append(TH(link['header'], _class=ui.get('default'))) if links_placement in ['right', 'both']: headcols.extend(toadd) right_cols += len(toadd) if links_placement in ['left', 'both']: linsert(headcols, 0, toadd) left_cols += len(toadd) # Include extra column for buttons if needed. include_buttons_column = (details or editable or deletable or (links and links_in_grid and not all([isinstance(link, dict) for link in links]))) if include_buttons_column: if buttons_placement in ['right', 'both']: headcols.append(TH(_class=ui.get('default',''))) right_cols += 1 if buttons_placement in ['left', 'both']: headcols.insert(0, TH(_class=ui.get('default',''))) left_cols += 1 head = TR(*headcols, **dict(_class=ui.get('header'))) cursor = True #figure out what page we are one to setup the limitby if paginate and dbset._db._adapter.dbengine=='google:datastore': cursor = request.vars.cursor or True limitby = (0, paginate) try: page = int(request.vars.page or 1)-1 except ValueError: page = 0 elif paginate and paginate<nrows: try: page = int(request.vars.page or 1)-1 except ValueError: page = 0 limitby = (paginate*page,paginate*(page+1)) else: limitby = None try: table_fields = [field for field in fields if (field.tablename in tablenames and not(isinstance(field,Field.Virtual)))] if dbset._db._adapter.dbengine=='google:datastore': rows = dbset.select(left=left,orderby=orderby, groupby=groupby,limitby=limitby, reusecursor=cursor, cacheable=True,*table_fields) next_cursor = dbset._db.get('_lastcursor', None) else: rows = dbset.select(left=left,orderby=orderby, groupby=groupby,limitby=limitby, cacheable=True,*table_fields) except SyntaxError: rows = None next_cursor = None error = T("Query Not Supported") except Exception, e: rows = None next_cursor = None error = T("Query Not Supported: %s")%e message = error if not message and nrows: if dbset._db._adapter.dbengine=='google:datastore' and nrows>=1000: message = T('at least %(nrows)s records found') % dict(nrows=nrows) else: message = T('%(nrows)s records found') % dict(nrows=nrows) console.append(DIV(message or T('None'),_class='web2py_counter')) paginator = UL() if paginate and dbset._db._adapter.dbengine=='google:datastore': #this means we may have a large table with an unknown number of rows. try: page = int(request.vars.page or 1)-1 except ValueError: page = 0 paginator.append(LI('page %s'%(page+1))) if next_cursor: d = dict(page=page+2, cursor=next_cursor) if order: d['order']=order if request.vars.keywords: d['keywords']=request.vars.keywords paginator.append(LI( A('next',_href=url(vars=d),cid=request.cid))) elif paginate and paginate<nrows: npages, reminder = divmod(nrows, paginate) if reminder: npages += 1 try: page = int(request.vars.page or 1) - 1 except ValueError: page = 0 def self_link(name, p): d = dict(page=p + 1) if order: d['order'] = order if request.vars.keywords: d['keywords'] = request.vars.keywords return A(name, _href=url(vars=d), cid=request.cid) NPAGES = 5 # window is 2*NPAGES if page > NPAGES + 1: paginator.append(LI(self_link('<<', 0))) if page > NPAGES: paginator.append(LI(self_link('<', page - 1))) pages = range(max(0, page - NPAGES), min(page + NPAGES, npages)) for p in pages: if p == page: paginator.append(LI(A(p + 1, _onclick='return false'), _class='current')) else: paginator.append(LI(self_link(p + 1, p))) if page < npages - NPAGES: paginator.append(LI(self_link('>', page + 1))) if page < npages - NPAGES - 1: paginator.append(LI(self_link('>>', npages - 1))) else: limitby = None if rows: cols = [COL(_id=str(c).replace('.', '-'), data={'column': left_cols + i + 1}) for i,c in enumerate(columns)] n = len(head.components) cols = [COL(data={'column': i + 1}) for i in range(left_cols)] + \ cols + \ [COL(data={'column': left_cols + len(cols) + i + 1}) for i in range(right_cols)] htmltable = TABLE(COLGROUP(*cols),THEAD(head)) tbody = TBODY() numrec = 0 for row in rows: trcols = [] id = row[field_id] if selectable: trcols.append( INPUT(_type="checkbox", _name="records", _value=id, value=request.vars.records)) for field in columns: if not field.readable: continue if field.type == 'blob': continue value = row[str(field)] maxlength = maxtextlengths.get(str(field), maxtextlength) if field.represent: try: value = field.represent(value, row) except KeyError: try: value = field.represent( value, row[field.tablename]) except KeyError: pass elif field.type == 'boolean': value = INPUT(_type="checkbox", _checked=value, _disabled=True) elif field.type == 'upload': if value: if callable(upload): value = A( T('file'), _href=upload(value)) elif upload: value = A(T('file'), _href='%s/%s' % (upload, value)) else: value = '' if isinstance(value, str): value = truncate_string(value, maxlength) elif not isinstance(value, DIV): value = field.formatter(value) trcols.append(TD(value)) row_buttons = TD(_class='row_buttons',_nowrap=True) if links and links_in_grid: toadd = [] for link in links: if isinstance(link, dict): toadd.append(TD(link['body'](row))) else: if link(row): row_buttons.append(link(row)) if links_placement in ['right', 'both']: trcols.extend(toadd) if links_placement in ['left', 'both']: linsert(trcols, 0, toadd) if include_buttons_column: if details and (not callable(details) or details(row)): row_buttons.append(gridbutton( 'buttonview', 'View', url(args=['view', tablename, id]))) if editable and (not callable(editable) or editable(row)): row_buttons.append(gridbutton( 'buttonedit', 'Edit', url(args=['edit', tablename, id]))) if deletable and (not callable(deletable) or deletable(row)): row_buttons.append(gridbutton( 'buttondelete', 'Delete', url(args=['delete', tablename, id]), callback=url(args=['delete', tablename, id]), noconfirm=noconfirm, delete='tr')) if buttons_placement in ['right', 'both']: trcols.append(row_buttons) if buttons_placement in ['left', 'both']: trcols.insert(0, row_buttons) if numrec % 2 == 0: classtr = 'even' else: classtr = 'odd' numrec += 1 if id: rid = id if callable(rid): # can this ever be callable? rid = rid(row) tr = TR(*trcols, **dict( _id=rid, _class='%s %s' % (classtr, 'with_id'))) else: tr = TR(*trcols, **dict(_class=classtr)) tbody.append(tr) htmltable.append(tbody) htmltable = DIV( htmltable, _class='web2py_htmltable', _style='width:100%;overflow-x:auto;-ms-overflow-x:scroll') if selectable: if not callable(selectable): #now expect that selectable and related parameters are iterator (list, tuple, etc) inputs = [] for i, submit_info in enumerate(selectable): submit_text = submit_info[0] submit_class = submit_info[2] if len(submit_info) > 2 else '' input_ctrl = INPUT(_type="submit", _name='submit_%d' % i, _value=T(submit_text)) input_ctrl.add_class(submit_class) inputs.append(input_ctrl) else: inputs = [INPUT(_type="submit", _value=T(selectable_submit_button))] if formstyle == 'bootstrap': # add space between buttons #inputs = sum([[inp, ' '] for inp in inputs], [])[:-1] htmltable = FORM(htmltable, DIV(_class='form-actions', *inputs)) else: htmltable = FORM(htmltable, *inputs) if htmltable.process(formname=formname).accepted: htmltable.vars.records = htmltable.vars.records or [] htmltable.vars.records = htmltable.vars.records if type(htmltable.vars.records) == list else [htmltable.vars.records] records = [int(r) for r in htmltable.vars.records] if not callable(selectable): for i, submit_info in enumerate(selectable): submit_callback = submit_info[1] if htmltable.vars.get('submit_%d' % i, False): submit_callback(records) break else: selectable(records) redirect(referrer) else: htmltable = DIV(T('No records found')) if csv and nrows: export_links = [] for k, v in sorted(exportManager.items()): if not v: continue label = v[1] if hasattr(v, "__getitem__") else k link = url2(vars=dict( order=request.vars.order or '', _export_type=k, keywords=request.vars.keywords or '')) export_links.append(A(T(label), _href=link)) export_menu = \ DIV(T('Export:'), _class="w2p_export_menu", *export_links) else: export_menu = None res = DIV(console, DIV(htmltable, _class="web2py_table"), _class='%s %s' % (_class, ui.get('widget'))) if paginator.components: res.append( DIV(paginator, _class="web2py_paginator %(header)s %(cornerbottom)s" % ui)) if export_menu: res.append(export_menu) res.create_form = create_form res.update_form = update_form res.view_form = view_form res.search_form = search_form res.rows = rows return res @staticmethod def smartgrid(table, constraints=None, linked_tables=None, links=None, links_in_grid=True, args=None, user_signature=True, divider='>', breadcrumbs_class='', **kwargs): """ @auth.requires_login() def index(): db.define_table('person',Field('name'),format='%(name)s') db.define_table('dog', Field('name'),Field('owner',db.person),format='%(name)s') db.define_table('comment',Field('body'),Field('dog',db.dog)) if db(db.person).isempty(): from gluon.contrib.populate import populate populate(db.person,300) populate(db.dog,300) populate(db.comment,1000) db.commit() form=SQLFORM.smartgrid(db[request.args(0) or 'person']) #*** return dict(form=form) *** builds a complete interface to navigate all tables links to the request.args(0) table: pagination, search, view, edit, delete, children, parent, etc. constraints is a dict {'table':query} that limits which records can be accessible links is a dict like {'tablename':[lambda row: A(....), ...]} that will add buttons when table tablename is displayed linked_tables is a optional list of tablenames of tables to be linked """ request, T = current.request, current.T if args is None: args = [] def url(**b): b['args'] = request.args[:nargs] + b.get('args', []) b['hash_vars'] = False b['user_signature'] = user_signature return URL(**b) db = table._db breadcrumbs = [] if request.args(len(args)) != table._tablename: request.args[:] = args + [table._tablename] if links is None: links = {} if constraints is None: constraints = {} field = None name = None def format(table,row): if not row: return T('Unknown') elif isinstance(table._format,str): return table._format % row elif callable(table._format): return table._format(row) else: return '#'+str(row.id) try: nargs = len(args) + 1 previous_tablename, previous_fieldname, previous_id = \ table._tablename, None, None while len(request.args) > nargs: key = request.args(nargs) if '.' in key: id = request.args(nargs + 1) tablename, fieldname = key.split('.', 1) table = db[tablename] field = table[fieldname] field.default = id referee = field.type[10:] if referee != previous_tablename: raise HTTP(400) cond = constraints.get(referee, None) if cond: record = db( db[referee]._id == id)(cond).select().first() else: record = db[referee](id) if previous_id: if record[previous_fieldname] != int(previous_id): raise HTTP(400) previous_tablename, previous_fieldname, previous_id = \ tablename, fieldname, id name = format(db[referee],record) breadcrumbs.append( LI(A(T(db[referee]._plural), cid=request.cid, _href=url()), SPAN(divider, _class='divider'), _class='w2p_grid_breadcrumb_elem')) if kwargs.get('details', True): breadcrumbs.append( LI(A(name, cid=request.cid, _href=url(args=['view', referee, id])), SPAN(divider, _class='divider'), _class='w2p_grid_breadcrumb_elem')) nargs += 2 else: break if nargs > len(args) + 1: query = (field == id) # cjk # if isinstance(linked_tables, dict): # linked_tables = linked_tables.get(table._tablename, []) if linked_tables is None or referee in linked_tables: field.represent = lambda id, r=None, referee=referee, rep=field.represent: A(callable(rep) and rep(id) or id, cid=request.cid, _href=url(args=['view', referee, id])) except (KeyError, ValueError, TypeError): redirect(URL(args=table._tablename)) if nargs == len(args) + 1: query = table._db._adapter.id_query(table) # filter out data info for displayed table if table._tablename in constraints: query = query & constraints[table._tablename] if isinstance(links, dict): links = links.get(table._tablename, []) for key in 'columns,orderby,searchable,sortable,paginate,deletable,editable,details,selectable,create,fields'.split(','): if isinstance(kwargs.get(key, None), dict): if table._tablename in kwargs[key]: kwargs[key] = kwargs[key][table._tablename] else: del kwargs[key] check = {} id_field_name = table._id.name for rfield in table._referenced_by: check[rfield.tablename] = \ check.get(rfield.tablename, []) + [rfield.name] if linked_tables is None: linked_tables = db.tables() if isinstance(linked_tables, dict): linked_tables = linked_tables.get(table._tablename,[]) if linked_tables: for item in linked_tables: tb = None if isinstance(item,Table) and item._tablename in check: tablename = item._tablename linked_fieldnames = check[tablename] td = item elif isinstance(item,str) and item in check: tablename = item linked_fieldnames = check[item] tb = db[item] elif isinstance(item,Field) and item.name in check.get(item._tablename,[]): tablename = item._tablename linked_fieldnames = [item.name] tb = item.table else: linked_fieldnames = [] if tb: multiple_links = len(linked_fieldnames) > 1 for fieldname in linked_fieldnames: t = T(tb._plural) if not multiple_links else \ T(tb._plural + '(' + fieldname + ')') args0 = tablename + '.' + fieldname links.append( lambda row, t=t, nargs=nargs, args0=args0: A(SPAN(t), cid=request.cid, _href=url( args=[args0, row[id_field_name]]))) grid = SQLFORM.grid(query, args=request.args[:nargs], links=links, links_in_grid=links_in_grid, user_signature=user_signature, **kwargs) if isinstance(grid, DIV): header = table._plural next = grid.create_form or grid.update_form or grid.view_form breadcrumbs.append(LI( A(T(header), cid=request.cid,_href=url()), SPAN(divider, _class='divider') if next else '', _class='active w2p_grid_breadcrumb_elem')) if grid.create_form: header = T('New %(entity)s') % dict(entity=table._singular) elif grid.update_form: header = T('Edit %(entity)s') % dict( entity=format(grid.update_form.table, grid.update_form.record)) elif grid.view_form: header = T('View %(entity)s') % dict( entity=format(grid.view_form.table, grid.view_form.record)) if next: breadcrumbs.append(LI( A(T(header), cid=request.cid,_href=url()), _class='active w2p_grid_breadcrumb_elem')) grid.insert( 0, DIV(UL(*breadcrumbs, **{'_class': breadcrumbs_class}), _class='web2py_breadcrumbs')) return grid class SQLTABLE(TABLE): """ given a Rows object, as returned by a db().select(), generates an html table with the rows. optional arguments: :param linkto: URL (or lambda to generate a URL) to edit individual records :param upload: URL to download uploaded files :param orderby: Add an orderby link to column headers. :param headers: dictionary of headers to headers redefinions headers can also be a string to gerenare the headers from data for now only headers="fieldname:capitalize", headers="labels" and headers=None are supported :param truncate: length at which to truncate text in table cells. Defaults to 16 characters. :param columns: a list or dict contaning the names of the columns to be shown Defaults to all Optional names attributes for passed to the <table> tag The keys of headers and columns must be of the form "tablename.fieldname" Simple linkto example:: rows = db.select(db.sometable.ALL) table = SQLTABLE(rows, linkto='someurl') This will link rows[id] to .../sometable/value_of_id More advanced linkto example:: def mylink(field, type, ref): return URL(args=[field]) rows = db.select(db.sometable.ALL) table = SQLTABLE(rows, linkto=mylink) This will link rows[id] to current_app/current_controlle/current_function/value_of_id New Implements: 24 June 2011: ----------------------------- :param selectid: The id you want to select :param renderstyle: Boolean render the style with the table :param extracolumns = [{'label':A('Extra',_href='#'), 'class': '', #class name of the header 'width':'', #width in pixels or % 'content':lambda row, rc: A('Edit',_href='edit/%s'%row.id), 'selected': False #agregate class selected to this column }] :param headers = {'table.id':{'label':'Id', 'class':'', #class name of the header 'width':'', #width in pixels or % 'truncate': 16, #truncate the content to... 'selected': False #agregate class selected to this column }, 'table.myfield':{'label':'My field', 'class':'', #class name of the header 'width':'', #width in pixels or % 'truncate': 16, #truncate the content to... 'selected': False #agregate class selected to this column }, } table = SQLTABLE(rows, headers=headers, extracolumns=extracolumns) `< """ def __init__( self, sqlrows, linkto=None, upload=None, orderby=None, headers={}, truncate=16, columns=None, th_link='', extracolumns=None, selectid=None, renderstyle=False, cid=None, colgroup=False, **attributes ): TABLE.__init__(self, **attributes) self.components = [] self.attributes = attributes self.sqlrows = sqlrows (components, row) = (self.components, []) if not sqlrows: return if not columns: columns = sqlrows.colnames if headers == 'fieldname:capitalize': headers = {} for c in columns: headers[c] = c.split('.')[-1].replace('_', ' ').title() elif headers == 'labels': headers = {} for c in columns: (t, f) = c.split('.') field = sqlrows.db[t][f] headers[c] = field.label if colgroup: cols = [COL(_id=c.replace('.', '-'), data={'column': i + 1}) for i, c in enumerate(columns)] if extracolumns: cols += [COL(data={'column': len(cols) + i + 1}) for i, c in enumerate(extracolumns)] components.append(COLGROUP(*cols)) if headers is None: headers = {} else: for c in columns: # new implement dict if isinstance(headers.get(c, c), dict): coldict = headers.get(c, c) attrcol = dict() if coldict['width'] != "": attrcol.update(_width=coldict['width']) if coldict['class'] != "": attrcol.update(_class=coldict['class']) row.append(TH(coldict['label'], **attrcol)) elif orderby: row.append(TH(A(headers.get(c, c), _href=th_link + '?orderby=' + c, cid=cid))) else: row.append(TH(headers.get(c, c))) if extracolumns: # new implement dict for c in extracolumns: attrcol = dict() if c['width'] != "": attrcol.update(_width=c['width']) if c['class'] != "": attrcol.update(_class=c['class']) row.append(TH(c['label'], **attrcol)) components.append(THEAD(TR(*row))) tbody = [] for (rc, record) in enumerate(sqlrows): row = [] if rc % 2 == 0: _class = 'even' else: _class = 'odd' if not selectid is None: # new implement if record.get('id') == selectid: _class += ' rowselected' for colname in columns: if not table_field.match(colname): if "_extra" in record and colname in record._extra: r = record._extra[colname] row.append(TD(r)) continue else: raise KeyError( "Column %s not found (SQLTABLE)" % colname) (tablename, fieldname) = colname.split('.') try: field = sqlrows.db[tablename][fieldname] except (KeyError, AttributeError): field = None if tablename in record \ and isinstance(record, Row) \ and isinstance(record[tablename], Row): r = record[tablename][fieldname] elif fieldname in record: r = record[fieldname] else: raise SyntaxError('something wrong in Rows object') r_old = r if not field or isinstance(field, (Field.Virtual, Field.Lazy)): pass elif linkto and field.type == 'id': try: href = linkto(r, 'table', tablename) except TypeError: href = '%s/%s/%s' % (linkto, tablename, r_old) r = A(r, _href=href) elif isinstance(field.type, str) and field.type.startswith('reference'): if linkto: ref = field.type[10:] try: href = linkto(r, 'reference', ref) except TypeError: href = '%s/%s/%s' % (linkto, ref, r_old) if ref.find('.') >= 0: tref, fref = ref.split('.') if hasattr(sqlrows.db[tref], '_primarykey'): href = '%s/%s?%s' % (linkto, tref, urllib.urlencode({fref: r})) r = A(represent(field, r, record), _href=str(href)) elif field.represent: r = represent(field, r, record) elif linkto and hasattr(field._table, '_primarykey')\ and fieldname in field._table._primarykey: # have to test this with multi-key tables key = urllib.urlencode(dict([ ((tablename in record and isinstance(record, Row) and isinstance(record[tablename], Row)) and (k, record[tablename][k])) or (k, record[k]) for k in field._table._primarykey])) r = A(r, _href='%s/%s?%s' % (linkto, tablename, key)) elif isinstance(field.type, str) and field.type.startswith('list:'): r = represent(field, r or [], record) elif field.represent: r = represent(field, r, record) elif field.type == 'blob' and r: r = 'DATA' elif field.type == 'upload': if upload and r: r = A(current.T('file'), _href='%s/%s' % (upload, r)) elif r: r = current.T('file') else: r = '' elif field.type in ['string', 'text']: r = str(field.formatter(r)) if headers != {}: # new implement dict if isinstance(headers[colname], dict): if isinstance(headers[colname]['truncate'], int): r = truncate_string( r, headers[colname]['truncate']) elif not truncate is None: r = truncate_string(r, truncate) attrcol = dict() # new implement dict if headers != {}: if isinstance(headers[colname], dict): colclass = headers[colname]['class'] if headers[colname]['selected']: colclass = str(headers[colname] ['class'] + " colselected").strip() if colclass != "": attrcol.update(_class=colclass) row.append(TD(r, **attrcol)) if extracolumns: # new implement dict for c in extracolumns: attrcol = dict() colclass = c['class'] if c['selected']: colclass = str(c['class'] + " colselected").strip() if colclass != "": attrcol.update(_class=colclass) contentfunc = c['content'] row.append(TD(contentfunc(record, rc), **attrcol)) tbody.append(TR(_class=_class, *row)) if renderstyle: components.append(STYLE(self.style())) components.append(TBODY(*tbody)) def style(self): css = ''' table tbody tr.odd { background-color: #DFD; } table tbody tr.even { background-color: #EFE; } table tbody tr.rowselected { background-color: #FDD; } table tbody tr td.colselected { background-color: #FDD; } table tbody tr:hover { background: #DDF; } ''' return css form_factory = SQLFORM.factory # for backward compatibility, deprecated class ExportClass(object): label = None file_ext = None content_type = None def __init__(self, rows): self.rows = rows def represented(self): def none_exception(value): """ returns a cleaned up value that can be used for csv export: - unicode text is encoded as such - None values are replaced with the given representation (default <NULL>) """ if value is None: return '<NULL>' elif isinstance(value, unicode): return value.encode('utf8') elif isinstance(value, Reference): return int(value) elif hasattr(value, 'isoformat'): return value.isoformat()[:19].replace('T', ' ') elif isinstance(value, (list, tuple)): # for type='list:..' return bar_encode(value) return value represented = [] for record in self.rows: row = [] for col in self.rows.colnames: if not REGEX_TABLE_DOT_FIELD.match(col): row.append(record._extra[col]) else: (t, f) = col.split('.') field = self.rows.db[t][f] if isinstance(record.get(t, None), (Row, dict)): value = record[t][f] else: value = record[f] if field.type == 'blob' and not value is None: value = '' elif field.represent: value = field.represent(value, record) row.append(none_exception(value)) represented.append(row) return represented def export(self): raise NotImplementedError class ExporterTSV(ExportClass): label = 'TSV' file_ext = "csv" content_type = "text/tab-separated-values" def __init__(self, rows): ExportClass.__init__(self, rows) def export(self): out = cStringIO.StringIO() final = cStringIO.StringIO() import csv writer = csv.writer(out, delimiter='\t') if self.rows: import codecs final.write(codecs.BOM_UTF16) writer.writerow( [unicode(col).encode("utf8") for col in self.rows.colnames]) data = out.getvalue().decode("utf8") data = data.encode("utf-16") data = data[2:] final.write(data) out.truncate(0) records = self.represented() for row in records: writer.writerow( [str(col).decode('utf8').encode("utf-8") for col in row]) data = out.getvalue().decode("utf8") data = data.encode("utf-16") data = data[2:] final.write(data) out.truncate(0) return str(final.getvalue()) class ExporterCSV(ExportClass): label = 'CSV' file_ext = "csv" content_type = "text/csv" def __init__(self, rows): ExportClass.__init__(self, rows) def export(self): if self.rows: return self.rows.as_csv() else: return '' class ExporterHTML(ExportClass): label = 'HTML' file_ext = "html" content_type = "text/html" def __init__(self, rows): ExportClass.__init__(self, rows) def export(self): return '<html>\n<head>\n<meta http-equiv="content-type" content="text/html; charset=UTF-8" />\n</head>\n<body>\n%s\n</body>\n</html>' % (self.rows.xml() or '') class ExporterXML(ExportClass): label = 'XML' file_ext = "xml" content_type = "text/xml" def __init__(self, rows): ExportClass.__init__(self, rows) def export(self): if self.rows: return self.rows.as_xml() else: return '<rows></rows>' class ExporterJSON(ExportClass): label = 'JSON' file_ext = "json" content_type = "application/json" def __init__(self, rows): ExportClass.__init__(self, rows) def export(self): if self.rows: return self.rows.as_json() else: return 'null'
bsd-3-clause
egelmex/ajenti
plugins/notepad/main.py
17
6249
import os from ajenti.ui import * from ajenti.com import implements from ajenti.api import * from ajenti.utils import shell, enquote, BackgroundProcess from ajenti.plugins.core.api import * from ajenti.utils import * class NotepadPlugin(CategoryPlugin): text = 'Notepad' icon = '/dl/notepad/icon.png' folder = 'tools' def on_session_start(self): self._roots = [] self._files = [] self._data = [] self.add_tab() self._favs = [] if self.app.config.has_option('notepad', 'favs'): self._favs = self.app.config.get('notepad', 'favs').split('|') def add_tab(self): self._tab = len(self._roots) self._roots.append(self.app.get_config(self).dir) self._files.append(None) self._data.append(None) def get_ui(self): mui = self.app.inflate('notepad:main') tabs = UI.TabControl(active=self._tab,test='test') mui.append('main', tabs) idx = 0 for root in self._roots: file = self._files[idx] data = self._data[idx] ui = self.app.inflate('notepad:tab') tabs.add(file or root, ui, id=str(idx)) favs = ui.find('favs') files = ui.find('files') for f in self._favs: files.append( UI.ListItem( UI.HContainer( UI.Image(file='/dl/core/ui/stock/bookmark.png'), UI.Label(text=f), ), id='*'+str(self._favs.index(f))+'/%i'%idx, active=f==file ) ) if root != '/': files.append( UI.ListItem( UI.HContainer( UI.Image(file='/dl/core/ui/stock/folder.png'), UI.Label(text='..'), ), id='<back>/%i'%idx, active=False, ) ) for p in sorted(os.listdir(root)): path = os.path.join(root, p) if os.path.isdir(path): files.append( UI.ListItem( UI.HContainer( UI.Image(file='/dl/core/ui/stock/folder.png'), UI.Label(text=p), ), id=p+'/%i'%idx ) ) for p in sorted(os.listdir(root)): path = os.path.join(root, p) if not os.path.isdir(path): files.append( UI.ListItem( UI.Image(file='/dl/core/ui/stock/file.png'), UI.Label(text=p), id=p+'/%i'%idx, active=path==file ) ) ui.find('data').set('name', 'data/%i'%idx) if file is not None: ui.find('data').set('value', data) ui.find('data').set('id', 'data%i'%idx) fbtn = ui.find('btnFav') ui.find('btnSave').set('action', 'save/%i'%idx) ui.find('btnClose').set('action', 'close/%i'%idx) if file is not None: if not file in self._favs: fbtn.set('text', 'Bookmark') fbtn.set('action', 'fav/%i'%idx) fbtn.set('icon', '/dl/core/ui/stock/bookmark-add.png') else: fbtn.set('text', 'Unbookmark') fbtn.set('action', 'unfav/%i'%idx) fbtn.set('icon', '/dl/core/ui/stock/bookmark-remove.png') else: ui.remove('btnSave') ui.remove('btnFav') if len(self._roots) == 1: ui.remove('btnClose') idx += 1 tabs.add("+", None, id='newtab', form='frmEdit') return mui @event('listitem/click') def on_list_click(self, event, params, vars=None): self._tab = int(params[1]) if params[0] == '<back>': params[0] = '..' if params[0].startswith('*'): params[0] = self._favs[int(params[0][1:])] p = os.path.abspath(os.path.join(self._roots[self._tab], params[0])) if os.path.isdir(p): self._roots[self._tab] = p else: try: data = open(p).read() self._files[self._tab] = p self._data[self._tab] = data except: self.put_message('warn', 'Cannot open %s'%p) @event('button/click') def on_button(self, event, params, vars=None): if params[0] == 'btnClose': self._file = None @event('form/submit') def on_submit(self, event, params, vars=None): if vars.getvalue('action', None) == 'newtab': self.add_tab() for idx in range(0,len(self._roots)): if idx >= len(self._roots): # closed break self._data[idx] = vars.getvalue('data/%i'%idx, None) if vars.getvalue('action', None) == 'save/%i'%idx: self._tab = idx if self._files[idx] is not None: open(self._files[idx], 'w').write(self._data[idx]) self.put_message('info', 'Saved') if vars.getvalue('action', '') == 'fav/%i'%idx: self._tab = idx self._favs.append(self._files[idx]) if vars.getvalue('action', '') == 'unfav/%i'%idx: self._tab = idx self._favs.remove(self._files[idx]) if vars.getvalue('action', '') == 'close/%i'%idx: self._tab = 0 del self._roots[idx] del self._files[idx] del self._data[idx] if len(self._roots) == 0: self.add_tab() self.app.config.set('notepad', 'favs', '|'.join(self._favs)) self.app.config.save()
lgpl-3.0
dcosentino/edx-platform
common/lib/chem/chem/chemcalc.py
14
14759
from __future__ import division from fractions import Fraction from pyparsing import (Literal, StringEnd, OneOrMore, ParseException) import nltk from nltk.tree import Tree ARROWS = ('<->', '->') ## Defines a simple pyparsing tokenizer for chemical equations elements = ['Ac', 'Ag', 'Al', 'Am', 'Ar', 'As', 'At', 'Au', 'B', 'Ba', 'Be', 'Bh', 'Bi', 'Bk', 'Br', 'C', 'Ca', 'Cd', 'Ce', 'Cf', 'Cl', 'Cm', 'Cn', 'Co', 'Cr', 'Cs', 'Cu', 'Db', 'Ds', 'Dy', 'Er', 'Es', 'Eu', 'F', 'Fe', 'Fl', 'Fm', 'Fr', 'Ga', 'Gd', 'Ge', 'H', 'He', 'Hf', 'Hg', 'Ho', 'Hs', 'I', 'In', 'Ir', 'K', 'Kr', 'La', 'Li', 'Lr', 'Lu', 'Lv', 'Md', 'Mg', 'Mn', 'Mo', 'Mt', 'N', 'Na', 'Nb', 'Nd', 'Ne', 'Ni', 'No', 'Np', 'O', 'Os', 'P', 'Pa', 'Pb', 'Pd', 'Pm', 'Po', 'Pr', 'Pt', 'Pu', 'Ra', 'Rb', 'Re', 'Rf', 'Rg', 'Rh', 'Rn', 'Ru', 'S', 'Sb', 'Sc', 'Se', 'Sg', 'Si', 'Sm', 'Sn', 'Sr', 'Ta', 'Tb', 'Tc', 'Te', 'Th', 'Ti', 'Tl', 'Tm', 'U', 'Uuo', 'Uup', 'Uus', 'Uut', 'V', 'W', 'Xe', 'Y', 'Yb', 'Zn', 'Zr'] digits = map(str, range(10)) symbols = list("[](){}^+-/") phases = ["(s)", "(l)", "(g)", "(aq)"] tokens = reduce(lambda a, b: a ^ b, map(Literal, elements + digits + symbols + phases)) tokenizer = OneOrMore(tokens) + StringEnd() def _orjoin(l): return "'" + "' | '".join(l) + "'" ## Defines an NLTK parser for tokenized expressions grammar = """ S -> multimolecule | multimolecule '+' S multimolecule -> count molecule | molecule count -> number | number '/' number molecule -> unphased | unphased phase unphased -> group | paren_group_round | paren_group_square element -> """ + _orjoin(elements) + """ digit -> """ + _orjoin(digits) + """ phase -> """ + _orjoin(phases) + """ number -> digit | digit number group -> suffixed | suffixed group paren_group_round -> '(' group ')' paren_group_square -> '[' group ']' plus_minus -> '+' | '-' number_suffix -> number ion_suffix -> '^' number plus_minus | '^' plus_minus suffix -> number_suffix | number_suffix ion_suffix | ion_suffix unsuffixed -> element | paren_group_round | paren_group_square suffixed -> unsuffixed | unsuffixed suffix """ parser = nltk.ChartParser(nltk.parse_cfg(grammar)) def _clean_parse_tree(tree): ''' The parse tree contains a lot of redundant nodes. E.g. paren_groups have groups as children, etc. This will clean up the tree. ''' def unparse_number(n): ''' Go from a number parse tree to a number ''' if len(n) == 1: rv = n[0][0] else: rv = n[0][0] + unparse_number(n[1]) return rv def null_tag(n): ''' Remove a tag ''' return n[0] def ion_suffix(n): '''1. "if" part handles special case 2. "else" part is general behaviour ''' if n[1:][0].node == 'number' and n[1:][0][0][0] == '1': # if suffix is explicitly 1, like ^1- # strip 1, leave only sign: ^- return nltk.tree.Tree(n.node, n[2:]) else: return nltk.tree.Tree(n.node, n[1:]) dispatch = {'number': lambda x: nltk.tree.Tree("number", [unparse_number(x)]), 'unphased': null_tag, 'unsuffixed': null_tag, 'number_suffix': lambda x: nltk.tree.Tree('number_suffix', [unparse_number(x[0])]), 'suffixed': lambda x: len(x) > 1 and x or x[0], 'ion_suffix': ion_suffix, 'paren_group_square': lambda x: nltk.tree.Tree(x.node, x[1]), 'paren_group_round': lambda x: nltk.tree.Tree(x.node, x[1])} if type(tree) == str: return tree old_node = None ## This loop means that if a node is processed, and returns a child, ## the child will be processed. while tree.node in dispatch and tree.node != old_node: old_node = tree.node tree = dispatch[tree.node](tree) children = [] for child in tree: child = _clean_parse_tree(child) children.append(child) tree = nltk.tree.Tree(tree.node, children) return tree def _merge_children(tree, tags): ''' nltk, by documentation, cannot do arbitrary length groups. Instead of: (group 1 2 3 4) It has to handle this recursively: (group 1 (group 2 (group 3 (group 4)))) We do the cleanup of converting from the latter to the former. ''' if tree is None: # There was a problem--shouldn't have empty trees (NOTE: see this with input e.g. 'H2O(', or 'Xe+'). # Haven't grokked the code to tell if this is indeed the right thing to do. raise ParseException("Shouldn't have empty trees") if type(tree) == str: return tree merged_children = [] done = False #print '00000', tree ## Merge current tag while not done: done = True for child in tree: if type(child) == nltk.tree.Tree and child.node == tree.node and tree.node in tags: merged_children = merged_children + list(child) done = False else: merged_children = merged_children + [child] tree = nltk.tree.Tree(tree.node, merged_children) merged_children = [] #print '======',tree # And recurse children = [] for child in tree: children.append(_merge_children(child, tags)) #return tree return nltk.tree.Tree(tree.node, children) def _render_to_html(tree): ''' Renders a cleaned tree to HTML ''' def molecule_count(tree, children): # If an integer, return that integer if len(tree) == 1: return tree[0][0] # If a fraction, return the fraction if len(tree) == 3: return " <sup>{num}</sup>&frasl;<sub>{den}</sub> ".format(num=tree[0][0], den=tree[2][0]) return "Error" def subscript(tree, children): return "<sub>{sub}</sub>".format(sub=children) def superscript(tree, children): return "<sup>{sup}</sup>".format(sup=children) def round_brackets(tree, children): return "({insider})".format(insider=children) def square_brackets(tree, children): return "[{insider}]".format(insider=children) dispatch = {'count': molecule_count, 'number_suffix': subscript, 'ion_suffix': superscript, 'paren_group_round': round_brackets, 'paren_group_square': square_brackets} if type(tree) == str: return tree else: children = "".join(map(_render_to_html, tree)) if tree.node in dispatch: return dispatch[tree.node](tree, children) else: return children.replace(' ', '') def render_to_html(eq): ''' Render a chemical equation string to html. Renders each molecule separately, and returns invalid input wrapped in a <span>. ''' def err(s): "Render as an error span" return '<span class="inline-error inline">{0}</span>'.format(s) def render_arrow(arrow): """Turn text arrows into pretty ones""" if arrow == '->': return u'\u2192' if arrow == '<->': return u'\u2194' # this won't be reached unless we add more arrow types, but keep it to avoid explosions when # that happens. return arrow def render_expression(ex): """ Render a chemical expression--no arrows. """ try: return _render_to_html(_get_final_tree(ex)) except ParseException: return err(ex) def spanify(s): return u'<span class="math">{0}</span>'.format(s) left, arrow, right = split_on_arrow(eq) if arrow == '': # only one side return spanify(render_expression(left)) return spanify(render_expression(left) + render_arrow(arrow) + render_expression(right)) def _get_final_tree(s): ''' Return final tree after merge and clean. Raises pyparsing.ParseException if s is invalid. ''' tokenized = tokenizer.parseString(s) parsed = parser.parse(tokenized) merged = _merge_children(parsed, {'S', 'group'}) final = _clean_parse_tree(merged) return final def _check_equality(tuple1, tuple2): ''' return True if tuples of multimolecules are equal ''' list1 = list(tuple1) list2 = list(tuple2) # Hypo: trees where are levels count+molecule vs just molecule # cannot be sorted properly (tested on test_complex_additivity) # But without factors and phases sorting seems to work. # Also for lists of multimolecules without factors and phases # sorting seems to work fine. list1.sort() list2.sort() return list1 == list2 def compare_chemical_expression(s1, s2, ignore_state=False): ''' It does comparison between two expressions. It uses divide_chemical_expression and check if division is 1 ''' return divide_chemical_expression(s1, s2, ignore_state) == 1 def divide_chemical_expression(s1, s2, ignore_state=False): '''Compare two chemical expressions for equivalence up to a multiplicative factor: - If they are not the same chemicals, returns False. - If they are the same, "divide" s1 by s2 to returns a factor x such that s1 / s2 == x as a Fraction object. - if ignore_state is True, ignores phases when doing the comparison. Examples: divide_chemical_expression("H2O", "3H2O") -> Fraction(1,3) divide_chemical_expression("3H2O", "H2O") -> 3 # actually Fraction(3, 1), but compares == to 3. divide_chemical_expression("2H2O(s) + 2CO2", "H2O(s)+CO2") -> 2 divide_chemical_expression("H2O(s) + CO2", "3H2O(s)+2CO2") -> False Implementation sketch: - extract factors and phases to standalone lists, - compare expressions without factors and phases, - divide lists of factors for each other and check for equality of every element in list, - return result of factor division ''' # parsed final trees treedic = {} treedic['1'] = _get_final_tree(s1) treedic['2'] = _get_final_tree(s2) # strip phases and factors # collect factors in list for i in ('1', '2'): treedic[i + ' cleaned_mm_list'] = [] treedic[i + ' factors'] = [] treedic[i + ' phases'] = [] for el in treedic[i].subtrees(filter=lambda t: t.node == 'multimolecule'): count_subtree = [t for t in el.subtrees() if t.node == 'count'] group_subtree = [t for t in el.subtrees() if t.node == 'group'] phase_subtree = [t for t in el.subtrees() if t.node == 'phase'] if count_subtree: if len(count_subtree[0]) > 1: treedic[i + ' factors'].append( int(count_subtree[0][0][0]) / int(count_subtree[0][2][0])) else: treedic[i + ' factors'].append(int(count_subtree[0][0][0])) else: treedic[i + ' factors'].append(1.0) if phase_subtree: treedic[i + ' phases'].append(phase_subtree[0][0]) else: treedic[i + ' phases'].append(' ') treedic[i + ' cleaned_mm_list'].append( Tree('multimolecule', [Tree('molecule', group_subtree)])) # order of factors and phases must mirror the order of multimolecules, # use 'decorate, sort, undecorate' pattern treedic['1 cleaned_mm_list'], treedic['1 factors'], treedic['1 phases'] = zip( *sorted(zip(treedic['1 cleaned_mm_list'], treedic['1 factors'], treedic['1 phases']))) treedic['2 cleaned_mm_list'], treedic['2 factors'], treedic['2 phases'] = zip( *sorted(zip(treedic['2 cleaned_mm_list'], treedic['2 factors'], treedic['2 phases']))) # check if expressions are correct without factors if not _check_equality(treedic['1 cleaned_mm_list'], treedic['2 cleaned_mm_list']): return False # phases are ruled by ingore_state flag if not ignore_state: # phases matters if treedic['1 phases'] != treedic['2 phases']: return False if any( [ x / y - treedic['1 factors'][0] / treedic['2 factors'][0] for (x, y) in zip(treedic['1 factors'], treedic['2 factors']) ] ): # factors are not proportional return False else: # return ratio return Fraction(treedic['1 factors'][0] / treedic['2 factors'][0]) def split_on_arrow(eq): """ Split a string on an arrow. Returns left, arrow, right. If there is no arrow, returns the entire eq in left, and '' in arrow and right. Return left, arrow, right. """ # order matters -- need to try <-> first for arrow in ARROWS: left, a, right = eq.partition(arrow) if a != '': return left, a, right return eq, '', '' def chemical_equations_equal(eq1, eq2, exact=False): """ Check whether two chemical equations are the same. (equations have arrows) If exact is False, then they are considered equal if they differ by a constant factor. arrows matter: -> and <-> are different. e.g. chemical_equations_equal('H2 + O2 -> H2O2', 'O2 + H2 -> H2O2') -> True chemical_equations_equal('H2 + O2 -> H2O2', 'O2 + 2H2 -> H2O2') -> False chemical_equations_equal('H2 + O2 -> H2O2', 'O2 + H2 <-> H2O2') -> False chemical_equations_equal('H2 + O2 -> H2O2', '2 H2 + 2 O2 -> 2 H2O2') -> True chemical_equations_equal('H2 + O2 -> H2O2', '2 H2 + 2 O2 -> 2 H2O2', exact=True) -> False If there's a syntax error, we return False. """ left1, arrow1, right1 = split_on_arrow(eq1) left2, arrow2, right2 = split_on_arrow(eq2) if arrow1 == '' or arrow2 == '': return False # TODO: may want to be able to give student helpful feedback about why things didn't work. if arrow1 != arrow2: # arrows don't match return False try: factor_left = divide_chemical_expression(left1, left2) if not factor_left: # left sides don't match return False factor_right = divide_chemical_expression(right1, right2) if not factor_right: # right sides don't match return False if factor_left != factor_right: # factors don't match (molecule counts to add up) return False if exact and factor_left != 1: # want an exact match. return False return True except ParseException: # Don't want external users to have to deal with parsing exceptions. Just return False. return False
agpl-3.0
mollstam/UnrealPy
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Twisted-15.2.1/twisted/words/im/ircsupport.py
41
9227
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ IRC support for Instance Messenger. """ import string from twisted.words.protocols import irc from twisted.words.im.locals import ONLINE from twisted.internet import defer, reactor, protocol from twisted.internet.defer import succeed from twisted.words.im import basesupport, interfaces, locals from zope.interface import implements class IRCPerson(basesupport.AbstractPerson): def imperson_whois(self): if self.account.client is None: raise locals.OfflineError self.account.client.sendLine("WHOIS %s" % self.name) ### interface impl def isOnline(self): return ONLINE def getStatus(self): return ONLINE def setStatus(self,status): self.status=status self.chat.getContactsList().setContactStatus(self) def sendMessage(self, text, meta=None): if self.account.client is None: raise locals.OfflineError for line in string.split(text, '\n'): if meta and meta.get("style", None) == "emote": self.account.client.ctcpMakeQuery(self.name,[('ACTION', line)]) else: self.account.client.msg(self.name, line) return succeed(text) class IRCGroup(basesupport.AbstractGroup): implements(interfaces.IGroup) def imgroup_testAction(self): pass def imtarget_kick(self, target): if self.account.client is None: raise locals.OfflineError reason = "for great justice!" self.account.client.sendLine("KICK #%s %s :%s" % ( self.name, target.name, reason)) ### Interface Implementation def setTopic(self, topic): if self.account.client is None: raise locals.OfflineError self.account.client.topic(self.name, topic) def sendGroupMessage(self, text, meta={}): if self.account.client is None: raise locals.OfflineError if meta and meta.get("style", None) == "emote": self.account.client.me(self.name,text) return succeed(text) #standard shmandard, clients don't support plain escaped newlines! for line in string.split(text, '\n'): self.account.client.say(self.name, line) return succeed(text) def leave(self): if self.account.client is None: raise locals.OfflineError self.account.client.leave(self.name) self.account.client.getGroupConversation(self.name,1) class IRCProto(basesupport.AbstractClientMixin, irc.IRCClient): def __init__(self, account, chatui, logonDeferred=None): basesupport.AbstractClientMixin.__init__(self, account, chatui, logonDeferred) self._namreplies={} self._ingroups={} self._groups={} self._topics={} def getGroupConversation(self, name, hide=0): name=string.lower(name) return self.chat.getGroupConversation(self.chat.getGroup(name, self), stayHidden=hide) def getPerson(self,name): return self.chat.getPerson(name, self) def connectionMade(self): # XXX: Why do I duplicate code in IRCClient.register? try: self.performLogin = True self.nickname = self.account.username self.password = self.account.password self.realname = "Twisted-IM user" irc.IRCClient.connectionMade(self) for channel in self.account.channels: self.joinGroup(channel) self.account._isOnline=1 if self._logonDeferred is not None: self._logonDeferred.callback(self) self.chat.getContactsList() except: import traceback traceback.print_exc() def setNick(self,nick): self.name=nick self.accountName="%s (IRC)"%nick irc.IRCClient.setNick(self,nick) def kickedFrom(self, channel, kicker, message): """ Called when I am kicked from a channel. """ return self.chat.getGroupConversation( self.chat.getGroup(channel[1:], self), 1) def userKicked(self, kickee, channel, kicker, message): pass def noticed(self, username, channel, message): self.privmsg(username, channel, message, {"dontAutoRespond": 1}) def privmsg(self, username, channel, message, metadata=None): if metadata is None: metadata = {} username=string.split(username,'!',1)[0] if username==self.name: return if channel[0]=='#': group=channel[1:] self.getGroupConversation(group).showGroupMessage(username, message, metadata) return self.chat.getConversation(self.getPerson(username)).showMessage(message, metadata) def action(self,username,channel,emote): username=string.split(username,'!',1)[0] if username==self.name: return meta={'style':'emote'} if channel[0]=='#': group=channel[1:] self.getGroupConversation(group).showGroupMessage(username, emote, meta) return self.chat.getConversation(self.getPerson(username)).showMessage(emote,meta) def irc_RPL_NAMREPLY(self,prefix,params): """ RPL_NAMREPLY >> NAMES #bnl << :Arlington.VA.US.Undernet.Org 353 z3p = #bnl :pSwede Dan-- SkOyg AG """ group=string.lower(params[2][1:]) users=string.split(params[3]) for ui in range(len(users)): while users[ui][0] in ["@","+"]: # channel modes users[ui]=users[ui][1:] if not self._namreplies.has_key(group): self._namreplies[group]=[] self._namreplies[group].extend(users) for nickname in users: try: self._ingroups[nickname].append(group) except: self._ingroups[nickname]=[group] def irc_RPL_ENDOFNAMES(self,prefix,params): group=params[1][1:] self.getGroupConversation(group).setGroupMembers(self._namreplies[string.lower(group)]) del self._namreplies[string.lower(group)] def irc_RPL_TOPIC(self,prefix,params): self._topics[params[1][1:]]=params[2] def irc_333(self,prefix,params): group=params[1][1:] self.getGroupConversation(group).setTopic(self._topics[group],params[2]) del self._topics[group] def irc_TOPIC(self,prefix,params): nickname = string.split(prefix,"!")[0] group = params[0][1:] topic = params[1] self.getGroupConversation(group).setTopic(topic,nickname) def irc_JOIN(self,prefix,params): nickname=string.split(prefix,"!")[0] group=string.lower(params[0][1:]) if nickname!=self.nickname: try: self._ingroups[nickname].append(group) except: self._ingroups[nickname]=[group] self.getGroupConversation(group).memberJoined(nickname) def irc_PART(self,prefix,params): nickname=string.split(prefix,"!")[0] group=string.lower(params[0][1:]) if nickname!=self.nickname: if group in self._ingroups[nickname]: self._ingroups[nickname].remove(group) self.getGroupConversation(group).memberLeft(nickname) def irc_QUIT(self,prefix,params): nickname=string.split(prefix,"!")[0] if self._ingroups.has_key(nickname): for group in self._ingroups[nickname]: self.getGroupConversation(group).memberLeft(nickname) self._ingroups[nickname]=[] def irc_NICK(self, prefix, params): fromNick = string.split(prefix, "!")[0] toNick = params[0] if not self._ingroups.has_key(fromNick): return for group in self._ingroups[fromNick]: self.getGroupConversation(group).memberChangedNick(fromNick, toNick) self._ingroups[toNick] = self._ingroups[fromNick] del self._ingroups[fromNick] def irc_unknown(self, prefix, command, params): pass # GTKIM calls def joinGroup(self,name): self.join(name) self.getGroupConversation(name) class IRCAccount(basesupport.AbstractAccount): implements(interfaces.IAccount) gatewayType = "IRC" _groupFactory = IRCGroup _personFactory = IRCPerson def __init__(self, accountName, autoLogin, username, password, host, port, channels=''): basesupport.AbstractAccount.__init__(self, accountName, autoLogin, username, password, host, port) self.channels = map(string.strip,string.split(channels,',')) if self.channels == ['']: self.channels = [] def _startLogOn(self, chatui): logonDeferred = defer.Deferred() cc = protocol.ClientCreator(reactor, IRCProto, self, chatui, logonDeferred) d = cc.connectTCP(self.host, self.port) d.addErrback(logonDeferred.errback) return logonDeferred
mit
ess-dmsc/do-ess-data-simulator
DonkiDirector/metadatahandler.py
1
10785
# -*- coding: utf-8 -*- """ The main class here is MetaDataHandler that will allow collecting the metadata information for the hdf5 files from a single object. """ from xmlconfig import DaqXmlConfig ##import PyTango import sys import threading import time import traceback class InvalidEntry(Exception): """ Inform that there is an invalid entry for the metadata. It does not accept for example invalid tango attribute. """ pass class StaticMetadata(): """ Auxiliar class to allow the same fancy way to collect metadata. It exists to give to externals, the same behaviour of DynamicMetaData when you want to get the value of the MetaData object. """ def __init__(self, value): self.value = value self.is_paused = False def state(self): if self.value != None: return PyTango.DevState.ON else: return PyTango.DevState.FAULT class DynamicMetadata(): """ Allows the possibility to read the metadata in a fancy way. It is possible to read the tango attribute through :: a = dynamic_metadata.value # returns the current tango attribute. """ def __init__(self, entry): self.entry_name = entry self.proxyOK = False self.do_read = False self.go_on = True self.is_paused = False self.last_value = "" self.update_value() def state(self): if self.last_value != None: return PyTango.DevState.ON else: return PyTango.DevState.FAULT def _init_connection(self, entry): try: self.entry = PyTango.AttributeProxy(entry) self.entry.get_device_proxy().set_timeout_millis(1000) self.proxyOK = True except PyTango.DevFailed, ex: #sys.stderr.write("Unable to read metadata entry %s\n"% entry) #PyTango.Except.print_exception(ex) self.proxyOK = False #raise InvalidEntry(str(ex)) self.last_value = None def update_value(self): if self.is_paused: return if self.proxyOK == False: self._init_connection(self.entry_name) try: if self.proxyOK: self.last_value = self.entry.read().value except PyTango.Except, ex: #PyTango.Except.print_exception(ex) self.last_value = None except : self.last_value = None @property def value(self): """ Property for allowing reading the tango attribute using object.value. """ return self.last_value class MetaDataHandler(threading.Thread): """ Do management of all MetaData for the HDF5 files. Basically, there are two kind of metadata, those that are give some information specific for a dataset, that are called metadata, and those that give some information of the experiment, that may, some times, be collected as datasets. There are also two kind of metadata related to their nature, they may be static, in the sense that they do not change for the wholle acquisition shift, or dynamic, in the sense, that it should be read some how, periodically. This class allows to have a central object that will give back all the metadata for the HDFwriters. This class will provide two attributes to handle the metadata. ::meta, is the one that usually will produce metadata for the datasets. It is a dictionary, the dictionary entries have two fiels: the metadata name, and a Dynamic or StaticMetadata object. So, imagine that there is a dataset called: scalar/area:: dset_metadata = metadatahandler.meta['scalar/area'] this dset_metadata will be a list of tuple. For each entry in the list, there will be the metadata name and the object whose value is the metadata value. To be more clear, look at this code, that uses the metadatahandler to produce the attributes of the dataset scalar/area:: dset = h5file.create_dataset('scalar/area',data=area_values) for meta_entry in metadatahandler.meta['scalar/area']: dset.attr[meta_entry[0]] = meta_entry[1].value The MetaDataHandler also provide the ::meta_attributes that cope with the metadata information that will be datasets in the hdf5. They are dictionaries that has just an object of Metadata (Static or Dynamic). The following code shows how to write these datasets inside an hdf5 file:: for (meta_dset_name,meta_dset_val) in metadatahandler.meta_attribute.items(): hdf5file.create_dataset(meta_dset_name, data=meta_dset_val.value) """ def __init__(self, daq_xml_config): threading.Thread.__init__(self) assert isinstance(daq_xml_config, DaqXmlConfig) self.meta = dict() # metadata for the datasets self.meta_attribute = dict() self.do_read = False for (daq_sync_key, daq_sync_value) in daq_xml_config.sync.items(): l_meta_entry = [] # Try reading metadata in the Tango attribute conf try: tango_entry = PyTango.AttributeProxy(daq_sync_value.tango_attr) tango_entry.get_device_proxy().set_timeout_millis(1000) conf = tango_entry.get_config() if conf.unit != 'No unit': l_meta_entry += [('unit', StaticMetadata(conf.unit))] if conf.description != 'No description': l_meta_entry += [('description', StaticMetadata(conf.description))] except PyTango.DevFailed, ex: print "**** Unable to read conf from " , daq_sync_value.tango_attr #PyTango.Except.print_exception(ex) #raise InvalidEntry(str(ex)) # Try setting up additional metadata readers for (meta_key, meta_entry) in daq_sync_value.metadata.items(): if meta_entry.dynamic: l_meta_entry += [(meta_key, DynamicMetadata(meta_entry.tango_attr))] else: try: vval = PyTango.AttributeProxy(meta_entry.tango_attr).read() l_meta_entry += [(meta_key, StaticMetadata(vval.value))] except PyTango.DevFailed, ex: print "**** Unable to read metadata value from " , meta_entry.tango_attr #PyTango.Except.print_exception(ex) #raise InvalidEntry(str(ex)) if l_meta_entry: self.meta[daq_sync_key] = l_meta_entry for (daq_meta_key, daq_meta_value) in daq_xml_config.meta.items(): tango_attr_name = daq_meta_value.tango_attr #FIXME if daq_meta_value.dynamic or True: self.meta_attribute[daq_meta_key] = DynamicMetadata(tango_attr_name) else: try: val = PyTango.AttributeProxy(daq_meta_value.tango_attr).read() self.meta_attribute[daq_meta_key] = StaticMetadata(val.value) except PyTango.DevFailed, ex: print "**** Unable to read metadata value from " , daq_meta_value.tango_attr #PyTango.Except.print_exception(ex) #raise InvalidEntry(str(ex)) def run(self): self.last_read_time = time.time() self.go_on = True while self.go_on: try: # pollong loop poll_period_sec = 1 if self.do_read and (time.time() - self.last_read_time) > poll_period_sec: self.last_read_time = time.time() for key,meta_attr in self.meta_attribute.iteritems(): if isinstance(meta_attr, DynamicMetadata): meta_attr.update_value() #start polling on shot2shot source metadata for attr_meta_list in self.meta.iteritems(): for key,meta_attr in attr_meta_list[1]: if isinstance(meta_attr, DynamicMetadata): meta_attr.update_value() else: time.sleep(0.01) except: print traceback.format_exc() def serve_task_queue(self): try: next_task = self.task_queue.get() if next_task[0] == 'start_polling': self.start_polling() elif next_task[0] == 'stop_polling': self.stop_polling() elif next_task[0] == 'read_metadata': daq_meta_key = next_task[1] self.data_queue.put({daq_meta_key:self.meta_attribute[daq_meta_key].value}) elif next_task[0] == 'pause_daq': daq_meta_key = next_task[1] do_pause = next_task[2] self.meta_attribute[daq_meta_key].is_paused = do_pause elif next_task[0] == 'stop': self.go_on = False elif next_task[0] == 'daq_meta_needed': hdf_writer_key = next_task[1] daq_key = next_task[2] meta_attrs = {} if daq_key in self.meta: attrib_metadata = self.meta[daq_key] for (key,meta_entry) in attrib_metadata: meta_value = meta_entry.value if meta_value == None: print "Error: " + daq_key + " metadata readout failed, check device "+ meta_entry.entry_name + "\n" meta_value = "" meta_attrs[key] = meta_value self.data_queue.put({hdf_writer_key:[daq_key,meta_attrs]}) elif next_task[0] == 'meta_attrs_needed': hdf_writer_key = next_task[1] meta_attrs={} for key,meta_attr in self.meta_attribute.iteritems(): if not meta_attr.is_paused: meta_attrs[key] = meta_attr.value self.data_queue.put({hdf_writer_key:meta_attrs}) except: print traceback.format_exc() def start_polling(self): self.do_read = True def stop_polling(self): self.do_read = False def __str__(self): return "MetaDataHandler => meta = %s attribs = %s"% ((str(self.meta),str(self.meta_attribute)))
bsd-2-clause
sudeepdutt/mic
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
4653
3596
# EventClass.py # # This is a library defining some events types classes, which could # be used by other scripts to analyzing the perf samples. # # Currently there are just a few classes defined for examples, # PerfEvent is the base class for all perf event sample, PebsEvent # is a HW base Intel x86 PEBS event, and user could add more SW/HW # event classes based on requirements. import struct # Event types, user could add more here EVTYPE_GENERIC = 0 EVTYPE_PEBS = 1 # Basic PEBS event EVTYPE_PEBS_LL = 2 # PEBS event with load latency info EVTYPE_IBS = 3 # # Currently we don't have good way to tell the event type, but by # the size of raw buffer, raw PEBS event with load latency data's # size is 176 bytes, while the pure PEBS event's size is 144 bytes. # def create_event(name, comm, dso, symbol, raw_buf): if (len(raw_buf) == 144): event = PebsEvent(name, comm, dso, symbol, raw_buf) elif (len(raw_buf) == 176): event = PebsNHM(name, comm, dso, symbol, raw_buf) else: event = PerfEvent(name, comm, dso, symbol, raw_buf) return event class PerfEvent(object): event_num = 0 def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC): self.name = name self.comm = comm self.dso = dso self.symbol = symbol self.raw_buf = raw_buf self.ev_type = ev_type PerfEvent.event_num += 1 def show(self): print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso) # # Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer # contains the context info when that event happened: the EFLAGS and # linear IP info, as well as all the registers. # class PebsEvent(PerfEvent): pebs_num = 0 def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS): tmp_buf=raw_buf[0:80] flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf) self.flags = flags self.ip = ip self.ax = ax self.bx = bx self.cx = cx self.dx = dx self.si = si self.di = di self.bp = bp self.sp = sp PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type) PebsEvent.pebs_num += 1 del tmp_buf # # Intel Nehalem and Westmere support PEBS plus Load Latency info which lie # in the four 64 bit words write after the PEBS data: # Status: records the IA32_PERF_GLOBAL_STATUS register value # DLA: Data Linear Address (EIP) # DSE: Data Source Encoding, where the latency happens, hit or miss # in L1/L2/L3 or IO operations # LAT: the actual latency in cycles # class PebsNHM(PebsEvent): pebs_nhm_num = 0 def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL): tmp_buf=raw_buf[144:176] status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf) self.status = status self.dla = dla self.dse = dse self.lat = lat PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type) PebsNHM.pebs_nhm_num += 1 del tmp_buf
gpl-2.0
h4ck3rm1k3/ansible
plugins/inventory/ec2.py
57
26612
#!/usr/bin/env python ''' EC2 external inventory script ================================= Generates inventory that Ansible can understand by making API request to AWS EC2 using the Boto library. NOTE: This script assumes Ansible is being executed where the environment variables needed for Boto have already been set: export AWS_ACCESS_KEY_ID='AK123' export AWS_SECRET_ACCESS_KEY='abc123' This script also assumes there is an ec2.ini file alongside it. To specify a different path to ec2.ini, define the EC2_INI_PATH environment variable: export EC2_INI_PATH=/path/to/my_ec2.ini If you're using eucalyptus you need to set the above variables and you need to define: export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html When run against a specific host, this script returns the following variables: - ec2_ami_launch_index - ec2_architecture - ec2_association - ec2_attachTime - ec2_attachment - ec2_attachmentId - ec2_client_token - ec2_deleteOnTermination - ec2_description - ec2_deviceIndex - ec2_dns_name - ec2_eventsSet - ec2_group_name - ec2_hypervisor - ec2_id - ec2_image_id - ec2_instanceState - ec2_instance_type - ec2_ipOwnerId - ec2_ip_address - ec2_item - ec2_kernel - ec2_key_name - ec2_launch_time - ec2_monitored - ec2_monitoring - ec2_networkInterfaceId - ec2_ownerId - ec2_persistent - ec2_placement - ec2_platform - ec2_previous_state - ec2_private_dns_name - ec2_private_ip_address - ec2_publicIp - ec2_public_dns_name - ec2_ramdisk - ec2_reason - ec2_region - ec2_requester_id - ec2_root_device_name - ec2_root_device_type - ec2_security_group_ids - ec2_security_group_names - ec2_shutdown_state - ec2_sourceDestCheck - ec2_spot_instance_request_id - ec2_state - ec2_state_code - ec2_state_reason - ec2_status - ec2_subnet_id - ec2_tenancy - ec2_virtualization_type - ec2_vpc_id These variables are pulled out of a boto.ec2.instance object. There is a lack of consistency with variable spellings (camelCase and underscores) since this just loops through all variables the object exposes. It is preferred to use the ones with underscores when multiple exist. In addition, if an instance has AWS Tags associated with it, each tag is a new variable named: - ec2_tag_[Key] = [Value] Security groups are comma-separated in 'ec2_security_group_ids' and 'ec2_security_group_names'. ''' # (c) 2012, Peter Sankauskas # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ###################################################################### import sys import os import argparse import re from time import time import boto from boto import ec2 from boto import rds from boto import route53 import ConfigParser from collections import defaultdict try: import json except ImportError: import simplejson as json class Ec2Inventory(object): def _empty_inventory(self): return {"_meta" : {"hostvars" : {}}} def __init__(self): ''' Main execution path ''' # Inventory grouped by instance IDs, tags, security groups, regions, # and availability zones self.inventory = self._empty_inventory() # Index of hostname (address) to instance ID self.index = {} # Read settings and parse CLI arguments self.read_settings() self.parse_cli_args() # Cache if self.args.refresh_cache: self.do_api_calls_update_cache() elif not self.is_cache_valid(): self.do_api_calls_update_cache() # Data to print if self.args.host: data_to_print = self.get_host_info() elif self.args.list: # Display list of instances for inventory if self.inventory == self._empty_inventory(): data_to_print = self.get_inventory_from_cache() else: data_to_print = self.json_format_dict(self.inventory, True) print data_to_print def is_cache_valid(self): ''' Determines if the cache files have expired, or if it is still valid ''' if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_index): return True return False def read_settings(self): ''' Reads the settings from the ec2.ini file ''' config = ConfigParser.SafeConfigParser() ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini') ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path) config.read(ec2_ini_path) # is eucalyptus? self.eucalyptus_host = None self.eucalyptus = False if config.has_option('ec2', 'eucalyptus'): self.eucalyptus = config.getboolean('ec2', 'eucalyptus') if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'): self.eucalyptus_host = config.get('ec2', 'eucalyptus_host') # Regions self.regions = [] configRegions = config.get('ec2', 'regions') configRegions_exclude = config.get('ec2', 'regions_exclude') if (configRegions == 'all'): if self.eucalyptus_host: self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name) else: for regionInfo in ec2.regions(): if regionInfo.name not in configRegions_exclude: self.regions.append(regionInfo.name) else: self.regions = configRegions.split(",") # Destination addresses self.destination_variable = config.get('ec2', 'destination_variable') self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') # Route53 self.route53_enabled = config.getboolean('ec2', 'route53') self.route53_excluded_zones = [] if config.has_option('ec2', 'route53_excluded_zones'): self.route53_excluded_zones.extend( config.get('ec2', 'route53_excluded_zones', '').split(',')) # Include RDS instances? self.rds_enabled = True if config.has_option('ec2', 'rds'): self.rds_enabled = config.getboolean('ec2', 'rds') # Return all EC2 and RDS instances (if RDS is enabled) if config.has_option('ec2', 'all_instances'): self.all_instances = config.getboolean('ec2', 'all_instances') else: self.all_instances = False if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') else: self.all_rds_instances = False # Cache related cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) if not os.path.exists(cache_dir): os.makedirs(cache_dir) self.cache_path_cache = cache_dir + "/ansible-ec2.cache" self.cache_path_index = cache_dir + "/ansible-ec2.index" self.cache_max_age = config.getint('ec2', 'cache_max_age') # Configure nested groups instead of flat namespace. if config.has_option('ec2', 'nested_groups'): self.nested_groups = config.getboolean('ec2', 'nested_groups') else: self.nested_groups = False # Do we need to just include hosts that match a pattern? try: pattern_include = config.get('ec2', 'pattern_include') if pattern_include and len(pattern_include) > 0: self.pattern_include = re.compile(pattern_include) else: self.pattern_include = None except ConfigParser.NoOptionError, e: self.pattern_include = None # Do we need to exclude hosts that match a pattern? try: pattern_exclude = config.get('ec2', 'pattern_exclude'); if pattern_exclude and len(pattern_exclude) > 0: self.pattern_exclude = re.compile(pattern_exclude) else: self.pattern_exclude = None except ConfigParser.NoOptionError, e: self.pattern_exclude = None # Instance filters (see boto and EC2 API docs) self.ec2_instance_filters = defaultdict(list) if config.has_option('ec2', 'instance_filters'): for x in config.get('ec2', 'instance_filters', '').split(','): filter_key, filter_value = x.split('=') self.ec2_instance_filters[filter_key].append(filter_value) def parse_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') self.args = parser.parse_args() def do_api_calls_update_cache(self): ''' Do API calls to each region, and save data in cache files ''' if self.route53_enabled: self.get_route53_records() for region in self.regions: self.get_instances_by_region(region) if self.rds_enabled: self.get_rds_instances_by_region(region) self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) def get_instances_by_region(self, region): ''' Makes an AWS EC2 API call to the list of instances in a particular region ''' try: if self.eucalyptus: conn = boto.connect_euca(host=self.eucalyptus_host) conn.APIVersion = '2010-08-31' else: conn = ec2.connect_to_region(region) # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported if conn is None: print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) sys.exit(1) reservations = [] if self.ec2_instance_filters: for filter_key, filter_values in self.ec2_instance_filters.iteritems(): reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values })) else: reservations = conn.get_all_instances() for reservation in reservations: for instance in reservation.instances: self.add_instance(instance, region) except boto.exception.BotoServerError, e: if not self.eucalyptus: print "Looks like AWS is down again:" print e sys.exit(1) def get_rds_instances_by_region(self, region): ''' Makes an AWS API call to the list of RDS instances in a particular region ''' try: conn = rds.connect_to_region(region) if conn: instances = conn.get_all_dbinstances() for instance in instances: self.add_rds_instance(instance, region) except boto.exception.BotoServerError, e: if not e.reason == "Forbidden": print "Looks like AWS RDS is down: " print e sys.exit(1) def get_instance(self, region, instance_id): ''' Gets details about a specific instance ''' if self.eucalyptus: conn = boto.connect_euca(self.eucalyptus_host) conn.APIVersion = '2010-08-31' else: conn = ec2.connect_to_region(region) # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported if conn is None: print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) sys.exit(1) reservations = conn.get_all_instances([instance_id]) for reservation in reservations: for instance in reservation.instances: return instance def add_instance(self, instance, region): ''' Adds an instance to the inventory and index, as long as it is addressable ''' # Only want running instances unless all_instances is True if not self.all_instances and instance.state != 'running': return # Select the best destination address if instance.subnet_id: dest = getattr(instance, self.vpc_destination_variable) else: dest = getattr(instance, self.destination_variable) if not dest: # Skip instances we cannot address (e.g. private VPC subnet) return # if we only want to include hosts that match a pattern, skip those that don't if self.pattern_include and not self.pattern_include.match(dest): return # if we need to exclude hosts that match a pattern, skip those if self.pattern_exclude and self.pattern_exclude.match(dest): return # Add to index self.index[dest] = [region, instance.id] # Inventory: Group by instance ID (always a group of 1) self.inventory[instance.id] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', instance.id) # Inventory: Group by region if self.nested_groups: self.push_group(self.inventory, 'regions', region) else: self.push(self.inventory, region, dest) # Inventory: Group by availability zone self.push(self.inventory, instance.placement, dest) if self.nested_groups: self.push_group(self.inventory, region, instance.placement) # Inventory: Group by instance type type_name = self.to_safe('type_' + instance.instance_type) self.push(self.inventory, type_name, dest) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by key pair if instance.key_name: key_name = self.to_safe('key_' + instance.key_name) self.push(self.inventory, key_name, dest) if self.nested_groups: self.push_group(self.inventory, 'keys', key_name) # Inventory: Group by VPC if instance.vpc_id: self.push(self.inventory, self.to_safe('vpc_id_' + instance.vpc_id), dest) # Inventory: Group by security group try: for group in instance.groups: key = self.to_safe("security_group_" + group.name) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) except AttributeError: print 'Package boto seems a bit older.' print 'Please upgrade boto >= 2.3.0.' sys.exit(1) # Inventory: Group by tag keys for k, v in instance.tags.iteritems(): key = self.to_safe("tag_" + k + "=" + v) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) self.push_group(self.inventory, self.to_safe("tag_" + k), key) # Inventory: Group by Route53 domain names if enabled if self.route53_enabled: route53_names = self.get_instance_route53_names(instance) for name in route53_names: self.push(self.inventory, name, dest) if self.nested_groups: self.push_group(self.inventory, 'route53', name) # Global Tag: instances without tags if len(instance.tags) == 0: self.push(self.inventory, 'tag_none', dest) # Global Tag: tag all EC2 instances self.push(self.inventory, 'ec2', dest) self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) def add_rds_instance(self, instance, region): ''' Adds an RDS instance to the inventory and index, as long as it is addressable ''' # Only want available instances unless all_rds_instances is True if not self.all_rds_instances and instance.status != 'available': return # Select the best destination address #if instance.subnet_id: #dest = getattr(instance, self.vpc_destination_variable) #else: #dest = getattr(instance, self.destination_variable) dest = instance.endpoint[0] if not dest: # Skip instances we cannot address (e.g. private VPC subnet) return # Add to index self.index[dest] = [region, instance.id] # Inventory: Group by instance ID (always a group of 1) self.inventory[instance.id] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', instance.id) # Inventory: Group by region if self.nested_groups: self.push_group(self.inventory, 'regions', region) else: self.push(self.inventory, region, dest) # Inventory: Group by availability zone self.push(self.inventory, instance.availability_zone, dest) if self.nested_groups: self.push_group(self.inventory, region, instance.availability_zone) # Inventory: Group by instance type type_name = self.to_safe('type_' + instance.instance_class) self.push(self.inventory, type_name, dest) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by security group try: if instance.security_group: key = self.to_safe("security_group_" + instance.security_group.name) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) except AttributeError: print 'Package boto seems a bit older.' print 'Please upgrade boto >= 2.3.0.' sys.exit(1) # Inventory: Group by engine self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest) if self.nested_groups: self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) # Inventory: Group by parameter group self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest) if self.nested_groups: self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) # Global Tag: all RDS instances self.push(self.inventory, 'rds', dest) self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) def get_route53_records(self): ''' Get and store the map of resource records to domain names that point to them. ''' r53_conn = route53.Route53Connection() all_zones = r53_conn.get_zones() route53_zones = [ zone for zone in all_zones if zone.name[:-1] not in self.route53_excluded_zones ] self.route53_records = {} for zone in route53_zones: rrsets = r53_conn.get_all_rrsets(zone.id) for record_set in rrsets: record_name = record_set.name if record_name.endswith('.'): record_name = record_name[:-1] for resource in record_set.resource_records: self.route53_records.setdefault(resource, set()) self.route53_records[resource].add(record_name) def get_instance_route53_names(self, instance): ''' Check if an instance is referenced in the records we have from Route53. If it is, return the list of domain names pointing to said instance. If nothing points to it, return an empty list. ''' instance_attributes = [ 'public_dns_name', 'private_dns_name', 'ip_address', 'private_ip_address' ] name_list = set() for attrib in instance_attributes: try: value = getattr(instance, attrib) except AttributeError: continue if value in self.route53_records: name_list.update(self.route53_records[value]) return list(name_list) def get_host_info_dict_from_instance(self, instance): instance_vars = {} for key in vars(instance): value = getattr(instance, key) key = self.to_safe('ec2_' + key) # Handle complex types # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518 if key == 'ec2__state': instance_vars['ec2_state'] = instance.state or '' instance_vars['ec2_state_code'] = instance.state_code elif key == 'ec2__previous_state': instance_vars['ec2_previous_state'] = instance.previous_state or '' instance_vars['ec2_previous_state_code'] = instance.previous_state_code elif type(value) in [int, bool]: instance_vars[key] = value elif type(value) in [str, unicode]: instance_vars[key] = value.strip() elif type(value) == type(None): instance_vars[key] = '' elif key == 'ec2_region': instance_vars[key] = value.name elif key == 'ec2__placement': instance_vars['ec2_placement'] = value.zone elif key == 'ec2_tags': for k, v in value.iteritems(): key = self.to_safe('ec2_tag_' + k) instance_vars[key] = v elif key == 'ec2_groups': group_ids = [] group_names = [] for group in value: group_ids.append(group.id) group_names.append(group.name) instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids]) instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names]) else: pass # TODO Product codes if someone finds them useful #print key #print type(value) #print value return instance_vars def get_host_info(self): ''' Get variables about a specific host ''' if len(self.index) == 0: # Need to load index from cache self.load_index_from_cache() if not self.args.host in self.index: # try updating the cache self.do_api_calls_update_cache() if not self.args.host in self.index: # host might not exist anymore return self.json_format_dict({}, True) (region, instance_id) = self.index[self.args.host] instance = self.get_instance(region, instance_id) return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True) def push(self, my_dict, key, element): ''' Push an element onto an array that may not have been defined in the dict ''' group_info = my_dict.setdefault(key, []) if isinstance(group_info, dict): host_list = group_info.setdefault('hosts', []) host_list.append(element) else: group_info.append(element) def push_group(self, my_dict, key, element): ''' Push a group as a child of another group. ''' parent_group = my_dict.setdefault(key, {}) if not isinstance(parent_group, dict): parent_group = my_dict[key] = {'hosts': parent_group} child_groups = parent_group.setdefault('children', []) if element not in child_groups: child_groups.append(element) def get_inventory_from_cache(self): ''' Reads the inventory from the cache file and returns it as a JSON object ''' cache = open(self.cache_path_cache, 'r') json_inventory = cache.read() return json_inventory def load_index_from_cache(self): ''' Reads the index from the cache file sets self.index ''' cache = open(self.cache_path_index, 'r') json_index = cache.read() self.index = json.loads(json_index) def write_to_cache(self, data, filename): ''' Writes data in JSON format to a file ''' json_data = self.json_format_dict(data, True) cache = open(filename, 'w') cache.write(json_data) cache.close() def to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' return re.sub("[^A-Za-z0-9\-]", "_", word) def json_format_dict(self, data, pretty=False): ''' Converts a dict to a JSON object and dumps it as a formatted string ''' if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) # Run the script Ec2Inventory()
gpl-3.0
turbulent/substance
setup.py
1
1224
from setuptools import setup, find_packages import platform with open('README.rst') as f: readme = f.read() with open('substance/_version.py') as versionFile: exec(versionFile.read()) install_requires = [ 'setuptools>=1.1.3', 'PyYAML', 'tabulate', 'paramiko>=2.4.1', 'netaddr', 'requests', 'tinydb', 'python_hosts==0.3.3', 'jinja2' ] setup(name='substance', version=__version__, author='Turbulent', author_email='oss@turbulent.ca', url='https://substance.readthedocs.io/', license='Apache License 2.0', long_description=readme, description='Substance - Local dockerized development environment', install_requires=install_requires, extras_require={ ':sys.platform == "darwin"': ['macfsevents'], ':sys.platform == "linux"': ['watchdog'] }, python_requires='>=3', packages=find_packages(), package_data={'substance': ['support/*']}, test_suite='tests', zip_safe=False, include_package_data=True, entry_points={ 'console_scripts': [ 'substance = substance.cli:cli', 'subenv = substance.subenv.cli:cli' ], })
apache-2.0
itdxer/neupy
neupy/layers/convolutions.py
1
17619
from __future__ import division import math import collections import six import tensorflow as tf from neupy import init from neupy.utils import as_tuple from neupy.exceptions import LayerConnectionError from neupy.core.properties import ( TypedListProperty, Property, ParameterProperty, ) from .base import BaseLayer __all__ = ('Convolution', 'Deconvolution') class Spatial2DProperty(TypedListProperty): expected_type = (list, tuple, int) def __init__(self, *args, **kwargs): kwargs['element_type'] = int super(Spatial2DProperty, self).__init__(*args, **kwargs) def __set__(self, instance, value): if isinstance(value, collections.Iterable) and len(value) == 1: value = (value[0], 1) if isinstance(value, int): value = (value, value) super(Spatial2DProperty, self).__set__(instance, value) def validate(self, value): super(Spatial2DProperty, self).validate(value) if len(value) > 2: raise ValueError( "Stride can have only one or two elements " "in the list. Got {}".format(len(value))) if any(element <= 0 for element in value): raise ValueError( "Stride size should contain only values greater than zero") def deconv_output_shape(dimension_size, filter_size, padding, stride, dilation=1): """ Computes deconvolution's output shape for one spatial dimension. Parameters ---------- dimension_size : int or None Size of the dimension. Typically it's image's weight or height. It might be equal to ``None`` when we input might have variable dimension. filter_size : int Size of the convolution filter. padding : {``valid``, ``same``} or int Type or size of the zero-padding. stride : int Stride size. dilation : int Dilation rate. Only ``dilation=1`` is supported for the deconvolution. Returns ------- int Dimension size after applying deconvolution operation with specified configurations. """ if isinstance(dimension_size, tf.Dimension): dimension_size = dimension_size.value if dimension_size is None: return None if dilation != 1: raise ValueError("Deconvolution layer doesn't support dilation") if padding in ('VALID', 'valid'): return dimension_size * stride + max(filter_size - stride, 0) elif padding in ('SAME', 'same'): return dimension_size * stride elif isinstance(padding, int): return dimension_size * stride - 2 * padding + filter_size - 1 raise ValueError( "`{!r}` is unknown deconvolution's padding value".format(padding)) def conv_output_shape(dimension_size, filter_size, padding, stride, dilation=1): """ Computes convolution's output shape for one spatial dimension. Parameters ---------- dimension_size : int or None Size of the dimension. Typically it's image's weight or height. It might be equal to ``None`` when we input might have variable dimension. filter_size : int Size of the convolution filter. padding : {``valid``, ``same``} or int Type or size of the zero-padding. stride : int Stride size. dilation : int Dilation rate. Defaults to ``1``. Returns ------- int Dimension size after applying convolution operation with specified configurations. """ if isinstance(dimension_size, tf.Dimension): dimension_size = dimension_size.value if dimension_size is None: return None if not isinstance(stride, int): raise ValueError( "Stride needs to be an integer, got {} (value {!r})" "".format(type(stride), stride)) if not isinstance(filter_size, int): raise ValueError( "Filter size needs to be an integer, got {} " "(value {!r})".format(type(filter_size), filter_size)) # We can think of the dilation as very sparse convolutional filter # filter=3 and dilation=2 the same as filter=5 and dilation=1 filter_size = filter_size + (filter_size - 1) * (dilation - 1) if padding in ('VALID', 'valid'): return int(math.ceil((dimension_size - filter_size + 1) / stride)) elif padding in ('SAME', 'same'): return int(math.ceil(dimension_size / stride)) elif isinstance(padding, int): return int(math.ceil( (dimension_size + 2 * padding - filter_size + 1) / stride)) raise ValueError( "`{!r}` is unknown convolution's padding value".format(padding)) class PaddingProperty(Property): expected_type = (six.string_types, int, tuple) valid_string_choices = ('VALID', 'SAME', 'same', 'valid') def __set__(self, instance, value): if isinstance(value, int): if value < 0: raise ValueError( "Integer border mode value needs to be " "greater or equal to zero, got {}".format(value)) value = (value, value) if isinstance(value, six.string_types): value = value.upper() super(PaddingProperty, self).__set__(instance, value) def validate(self, value): super(PaddingProperty, self).validate(value) if isinstance(value, tuple) and len(value) != 2: raise ValueError( "Border mode property suppose to get a tuple that " "contains two elements, got {} elements" "".format(len(value))) is_invalid_string = ( isinstance(value, six.string_types) and value not in self.valid_string_choices ) if is_invalid_string: valid_choices = ', '.join(self.valid_string_choices) raise ValueError( "`{}` is invalid string value. Available choices: {}" "".format(value, valid_choices)) if isinstance(value, tuple) and any(element < 0 for element in value): raise ValueError( "Tuple border mode value needs to contain only elements " "that greater or equal to zero, got {}".format(value)) class Convolution(BaseLayer): """ Convolutional layer. Parameters ---------- size : tuple of int Filter shape. In should be defined as a tuple with three integers ``(filter rows, filter columns, output channels)``. padding : {{``same``, ``valid``}}, int, tuple Zero padding for the input tensor. - ``valid`` - Padding won't be added to the tensor. Result will be the same as for ``padding=0`` - ``same`` - Padding will depend on the number of rows and columns in the filter. This padding makes sure that image with the ``stride=1`` won't change its width and height. It's the same as ``padding=(filter rows // 2, filter columns // 2)``. - Custom value for the padding can be specified as an integer, like ``padding=1`` or it can be specified as a tuple when different dimensions have different padding values, for example ``padding=(2, 3)``. Defaults to ``valid``. stride : tuple with ints, int. Stride size. Defaults to ``(1, 1)`` dilation : int, tuple Rate for the filter upsampling. When ``dilation > 1`` layer will become dilated convolution (or atrous convolution). Defaults to ``1``. weight : array-like, Tensorfow variable, scalar or Initializer Defines layer's weights. Shape of the weight will be equal to ``(filter rows, filter columns, input channels, output channels)``. Default initialization methods you can find :ref:`here <init-methods>`. Defaults to :class:`HeNormal(gain=2) <neupy.init.HeNormal>`. bias : 1D array-like, Tensorfow variable, scalar, Initializer or None Defines layer's bias. Default initialization methods you can find :ref:`here <init-methods>`. Defaults to :class:`Constant(0) <neupy.init.Constant>`. The ``None`` value excludes bias from the calculations and do not add it into parameters list. {BaseLayer.name} Examples -------- 2D Convolution >>> from neupy import layers >>> >>> layers.join( ... layers.Input((28, 28, 3)), ... layers.Convolution((3, 3, 16)), ... ) 1D Convolution >>> from neupy.layers import * >>> network = join( ... Input((30, 10)), ... Reshape((30, 1, 10)), # convert 3D to 4D ... Convolution((3, 1, 16)), ... Reshape((-1, 16)) # convert 4D back to 3D ... ) >>> network (?, 30, 10) -> [... 4 layers ...] -> (?, 28, 16) Methods ------- {BaseLayer.Methods} Attributes ---------- {BaseLayer.Attributes} """ size = TypedListProperty(element_type=int, n_elements=3) weight = ParameterProperty() bias = ParameterProperty(allow_none=True) padding = PaddingProperty() stride = Spatial2DProperty() dilation = Spatial2DProperty() # We use gain=2 because it's suitable choice for relu non-linearity # and relu is the most common non-linearity used for CNN. def __init__(self, size, padding='valid', stride=1, dilation=1, weight=init.HeNormal(gain=2), bias=0, name=None): super(Convolution, self).__init__(name=name) self.size = size self.padding = padding self.stride = stride self.dilation = dilation self.weight = weight self.bias = bias def fail_if_shape_invalid(self, input_shape): if input_shape and input_shape.ndims != 4: raise LayerConnectionError( "Convolutional layer expects an input with 4 " "dimensions, got {} with shape {}" "".format(len(input_shape), input_shape)) def output_shape_per_dim(self, *args, **kwargs): return conv_output_shape(*args, **kwargs) def expected_output_shape(self, input_shape): n_samples = input_shape[0] row_filter_size, col_filter_size, n_kernels = self.size row_stride, col_stride = self.stride row_dilation, col_dilation = self.dilation if isinstance(self.padding, (list, tuple)): row_padding, col_padding = self.padding else: row_padding, col_padding = self.padding, self.padding return ( n_samples, self.output_shape_per_dim( input_shape[1], row_filter_size, row_padding, row_stride, row_dilation ), self.output_shape_per_dim( input_shape[2], col_filter_size, col_padding, col_stride, col_dilation ), n_kernels, ) def get_output_shape(self, input_shape): input_shape = tf.TensorShape(input_shape) self.fail_if_shape_invalid(input_shape) if input_shape.ndims is None: n_samples = input_shape[0] n_kernels = self.size[-1] return tf.TensorShape((n_samples, None, None, n_kernels)) return tf.TensorShape(self.expected_output_shape(input_shape)) def create_variables(self, input_shape): self.input_shape = input_shape n_channels = input_shape[-1] n_rows, n_cols, n_filters = self.size # Compare to the regular convolution weights, # transposed one has switched input and output channels. self.weight = self.variable( value=self.weight, name='weight', shape=(n_rows, n_cols, n_channels, n_filters)) if self.bias is not None: self.bias = self.variable( value=self.bias, name='bias', shape=as_tuple(n_filters)) def output(self, input, **kwargs): input = tf.convert_to_tensor(input, tf.float32) self.fail_if_shape_invalid(input.shape) padding = self.padding if not isinstance(padding, six.string_types): height_pad, width_pad = padding input = tf.pad(input, [ [0, 0], [height_pad, height_pad], [width_pad, width_pad], [0, 0], ]) # VALID option will make sure that # convolution won't use any padding. padding = 'VALID' output = tf.nn.convolution( input, self.weight, padding=padding, strides=self.stride, dilation_rate=self.dilation, data_format="NHWC", ) if self.bias is not None: bias = tf.reshape(self.bias, (1, 1, 1, -1)) output += bias return output def __repr__(self): return self._repr_arguments( self.size, padding=self.padding, stride=self.stride, dilation=self.dilation, weight=self.weight, bias=self.bias, name=self.name, ) class Deconvolution(Convolution): """ Deconvolution layer (also known as Transposed Convolution.). Parameters ---------- {Convolution.size} {Convolution.padding} {Convolution.stride} {Convolution.dilation} weight : array-like, Tensorfow variable, scalar or Initializer Defines layer's weights. Shape of the weight will be equal to ``(filter rows, filter columns, output channels, input channels)``. Default initialization methods you can find :ref:`here <init-methods>`. Defaults to :class:`HeNormal(gain=2) <neupy.init.HeNormal>`. {Convolution.bias} {Convolution.name} Methods ------- {Convolution.Methods} Attributes ---------- {Convolution.Attributes} Examples -------- >>> from neupy.layers import * >>> network = join( ... Input((28, 28, 3)), ... Convolution((3, 3, 16)), ... Deconvolution((3, 3, 1)), ... ) >>> network (?, 28, 28, 3) -> [... 3 layers ...] -> (?, 28, 28, 1) """ def __init__(self, size, padding='valid', stride=1, weight=init.HeNormal(gain=2), bias=0, name=None): super(Deconvolution, self).__init__( size=size, padding=padding, stride=stride, dilation=1, weight=weight, bias=bias, name=name) def output_shape_per_dim(self, *args, **kwargs): return deconv_output_shape(*args, **kwargs) def create_variables(self, input_shape): self.input_shape = input_shape n_channels = input_shape[-1] n_rows, n_cols, n_filters = self.size # Compare to the regular convolution weights, # transposed one has switched input and output channels. self.weight = self.variable( value=self.weight, name='weight', shape=(n_rows, n_cols, n_filters, n_channels)) if self.bias is not None: self.bias = self.variable( value=self.bias, name='bias', shape=as_tuple(n_filters)) def output(self, input, **kwargs): input = tf.convert_to_tensor(input, tf.float32) # We need to get information about output shape from the input # tensor's shape, because for some inputs we might have # height and width specified as None and shape value won't be # computed for these dimensions. padding = self.padding # It's important that expected output shape gets computed on then # Tensor (produced by tf.shape) rather than on TensorShape object. # Tensorflow cannot convert TensorShape object into Tensor and # it will cause an exception in the conv2d_transpose layer. output_shape = self.expected_output_shape(tf.shape(input)) if isinstance(self.padding, (list, tuple)): height_pad, width_pad = self.padding # VALID option will make sure that # deconvolution won't use any padding. padding = 'VALID' # conv2d_transpose doesn't know about extra paddings that we added # in the convolution. For this reason, we have to expand our # expected output shape and later we will remove these paddings # manually after transpose convolution. output_shape = ( output_shape[0], output_shape[1] + 2 * height_pad, output_shape[2] + 2 * width_pad, output_shape[3], ) output = tf.nn.conv2d_transpose( value=input, filter=self.weight, output_shape=list(output_shape), strides=as_tuple(1, self.stride, 1), padding=padding, data_format="NHWC" ) if isinstance(self.padding, (list, tuple)): h_pad, w_pad = self.padding if h_pad > 0: output = output[:, h_pad:-h_pad, :, :] if w_pad > 0: output = output[:, :, w_pad:-w_pad, :] if self.bias is not None: bias = tf.reshape(self.bias, (1, 1, 1, -1)) output += bias return output def __repr__(self): return self._repr_arguments( self.size, padding=self.padding, stride=self.stride, weight=self.weight, bias=self.bias, name=self.name, )
mit
akosyakov/intellij-community
python/lib/Lib/encodings/cp857.py
593
34164
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP857.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_map) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_map)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp857', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Map decoding_map = codecs.make_identity_dict(range(256)) decoding_map.update({ 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE 0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX 0x008d: 0x0131, # LATIN SMALL LETTER DOTLESS I 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS 0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE 0x0098: 0x0130, # LATIN CAPITAL LETTER I WITH DOT ABOVE 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS 0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE 0x009c: 0x00a3, # POUND SIGN 0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE 0x009e: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA 0x009f: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE 0x00a6: 0x011e, # LATIN CAPITAL LETTER G WITH BREVE 0x00a7: 0x011f, # LATIN SMALL LETTER G WITH BREVE 0x00a8: 0x00bf, # INVERTED QUESTION MARK 0x00a9: 0x00ae, # REGISTERED SIGN 0x00aa: 0x00ac, # NOT SIGN 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00b0: 0x2591, # LIGHT SHADE 0x00b1: 0x2592, # MEDIUM SHADE 0x00b2: 0x2593, # DARK SHADE 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE 0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX 0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE 0x00b8: 0x00a9, # COPYRIGHT SIGN 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT 0x00bd: 0x00a2, # CENT SIGN 0x00be: 0x00a5, # YEN SIGN 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE 0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x00cf: 0x00a4, # CURRENCY SIGN 0x00d0: 0x00ba, # MASCULINE ORDINAL INDICATOR 0x00d1: 0x00aa, # FEMININE ORDINAL INDICATOR 0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX 0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS 0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE 0x00d5: None, # UNDEFINED 0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE 0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX 0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x00db: 0x2588, # FULL BLOCK 0x00dc: 0x2584, # LOWER HALF BLOCK 0x00dd: 0x00a6, # BROKEN BAR 0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE 0x00df: 0x2580, # UPPER HALF BLOCK 0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S 0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX 0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE 0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE 0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE 0x00e6: 0x00b5, # MICRO SIGN 0x00e7: None, # UNDEFINED 0x00e8: 0x00d7, # MULTIPLICATION SIGN 0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE 0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX 0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE 0x00ed: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS 0x00ee: 0x00af, # MACRON 0x00ef: 0x00b4, # ACUTE ACCENT 0x00f0: 0x00ad, # SOFT HYPHEN 0x00f1: 0x00b1, # PLUS-MINUS SIGN 0x00f2: None, # UNDEFINED 0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS 0x00f4: 0x00b6, # PILCROW SIGN 0x00f5: 0x00a7, # SECTION SIGN 0x00f6: 0x00f7, # DIVISION SIGN 0x00f7: 0x00b8, # CEDILLA 0x00f8: 0x00b0, # DEGREE SIGN 0x00f9: 0x00a8, # DIAERESIS 0x00fa: 0x00b7, # MIDDLE DOT 0x00fb: 0x00b9, # SUPERSCRIPT ONE 0x00fc: 0x00b3, # SUPERSCRIPT THREE 0x00fd: 0x00b2, # SUPERSCRIPT TWO 0x00fe: 0x25a0, # BLACK SQUARE 0x00ff: 0x00a0, # NO-BREAK SPACE }) ### Decoding Table decoding_table = ( u'\x00' # 0x0000 -> NULL u'\x01' # 0x0001 -> START OF HEADING u'\x02' # 0x0002 -> START OF TEXT u'\x03' # 0x0003 -> END OF TEXT u'\x04' # 0x0004 -> END OF TRANSMISSION u'\x05' # 0x0005 -> ENQUIRY u'\x06' # 0x0006 -> ACKNOWLEDGE u'\x07' # 0x0007 -> BELL u'\x08' # 0x0008 -> BACKSPACE u'\t' # 0x0009 -> HORIZONTAL TABULATION u'\n' # 0x000a -> LINE FEED u'\x0b' # 0x000b -> VERTICAL TABULATION u'\x0c' # 0x000c -> FORM FEED u'\r' # 0x000d -> CARRIAGE RETURN u'\x0e' # 0x000e -> SHIFT OUT u'\x0f' # 0x000f -> SHIFT IN u'\x10' # 0x0010 -> DATA LINK ESCAPE u'\x11' # 0x0011 -> DEVICE CONTROL ONE u'\x12' # 0x0012 -> DEVICE CONTROL TWO u'\x13' # 0x0013 -> DEVICE CONTROL THREE u'\x14' # 0x0014 -> DEVICE CONTROL FOUR u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE u'\x16' # 0x0016 -> SYNCHRONOUS IDLE u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK u'\x18' # 0x0018 -> CANCEL u'\x19' # 0x0019 -> END OF MEDIUM u'\x1a' # 0x001a -> SUBSTITUTE u'\x1b' # 0x001b -> ESCAPE u'\x1c' # 0x001c -> FILE SEPARATOR u'\x1d' # 0x001d -> GROUP SEPARATOR u'\x1e' # 0x001e -> RECORD SEPARATOR u'\x1f' # 0x001f -> UNIT SEPARATOR u' ' # 0x0020 -> SPACE u'!' # 0x0021 -> EXCLAMATION MARK u'"' # 0x0022 -> QUOTATION MARK u'#' # 0x0023 -> NUMBER SIGN u'$' # 0x0024 -> DOLLAR SIGN u'%' # 0x0025 -> PERCENT SIGN u'&' # 0x0026 -> AMPERSAND u"'" # 0x0027 -> APOSTROPHE u'(' # 0x0028 -> LEFT PARENTHESIS u')' # 0x0029 -> RIGHT PARENTHESIS u'*' # 0x002a -> ASTERISK u'+' # 0x002b -> PLUS SIGN u',' # 0x002c -> COMMA u'-' # 0x002d -> HYPHEN-MINUS u'.' # 0x002e -> FULL STOP u'/' # 0x002f -> SOLIDUS u'0' # 0x0030 -> DIGIT ZERO u'1' # 0x0031 -> DIGIT ONE u'2' # 0x0032 -> DIGIT TWO u'3' # 0x0033 -> DIGIT THREE u'4' # 0x0034 -> DIGIT FOUR u'5' # 0x0035 -> DIGIT FIVE u'6' # 0x0036 -> DIGIT SIX u'7' # 0x0037 -> DIGIT SEVEN u'8' # 0x0038 -> DIGIT EIGHT u'9' # 0x0039 -> DIGIT NINE u':' # 0x003a -> COLON u';' # 0x003b -> SEMICOLON u'<' # 0x003c -> LESS-THAN SIGN u'=' # 0x003d -> EQUALS SIGN u'>' # 0x003e -> GREATER-THAN SIGN u'?' # 0x003f -> QUESTION MARK u'@' # 0x0040 -> COMMERCIAL AT u'A' # 0x0041 -> LATIN CAPITAL LETTER A u'B' # 0x0042 -> LATIN CAPITAL LETTER B u'C' # 0x0043 -> LATIN CAPITAL LETTER C u'D' # 0x0044 -> LATIN CAPITAL LETTER D u'E' # 0x0045 -> LATIN CAPITAL LETTER E u'F' # 0x0046 -> LATIN CAPITAL LETTER F u'G' # 0x0047 -> LATIN CAPITAL LETTER G u'H' # 0x0048 -> LATIN CAPITAL LETTER H u'I' # 0x0049 -> LATIN CAPITAL LETTER I u'J' # 0x004a -> LATIN CAPITAL LETTER J u'K' # 0x004b -> LATIN CAPITAL LETTER K u'L' # 0x004c -> LATIN CAPITAL LETTER L u'M' # 0x004d -> LATIN CAPITAL LETTER M u'N' # 0x004e -> LATIN CAPITAL LETTER N u'O' # 0x004f -> LATIN CAPITAL LETTER O u'P' # 0x0050 -> LATIN CAPITAL LETTER P u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q u'R' # 0x0052 -> LATIN CAPITAL LETTER R u'S' # 0x0053 -> LATIN CAPITAL LETTER S u'T' # 0x0054 -> LATIN CAPITAL LETTER T u'U' # 0x0055 -> LATIN CAPITAL LETTER U u'V' # 0x0056 -> LATIN CAPITAL LETTER V u'W' # 0x0057 -> LATIN CAPITAL LETTER W u'X' # 0x0058 -> LATIN CAPITAL LETTER X u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y u'Z' # 0x005a -> LATIN CAPITAL LETTER Z u'[' # 0x005b -> LEFT SQUARE BRACKET u'\\' # 0x005c -> REVERSE SOLIDUS u']' # 0x005d -> RIGHT SQUARE BRACKET u'^' # 0x005e -> CIRCUMFLEX ACCENT u'_' # 0x005f -> LOW LINE u'`' # 0x0060 -> GRAVE ACCENT u'a' # 0x0061 -> LATIN SMALL LETTER A u'b' # 0x0062 -> LATIN SMALL LETTER B u'c' # 0x0063 -> LATIN SMALL LETTER C u'd' # 0x0064 -> LATIN SMALL LETTER D u'e' # 0x0065 -> LATIN SMALL LETTER E u'f' # 0x0066 -> LATIN SMALL LETTER F u'g' # 0x0067 -> LATIN SMALL LETTER G u'h' # 0x0068 -> LATIN SMALL LETTER H u'i' # 0x0069 -> LATIN SMALL LETTER I u'j' # 0x006a -> LATIN SMALL LETTER J u'k' # 0x006b -> LATIN SMALL LETTER K u'l' # 0x006c -> LATIN SMALL LETTER L u'm' # 0x006d -> LATIN SMALL LETTER M u'n' # 0x006e -> LATIN SMALL LETTER N u'o' # 0x006f -> LATIN SMALL LETTER O u'p' # 0x0070 -> LATIN SMALL LETTER P u'q' # 0x0071 -> LATIN SMALL LETTER Q u'r' # 0x0072 -> LATIN SMALL LETTER R u's' # 0x0073 -> LATIN SMALL LETTER S u't' # 0x0074 -> LATIN SMALL LETTER T u'u' # 0x0075 -> LATIN SMALL LETTER U u'v' # 0x0076 -> LATIN SMALL LETTER V u'w' # 0x0077 -> LATIN SMALL LETTER W u'x' # 0x0078 -> LATIN SMALL LETTER X u'y' # 0x0079 -> LATIN SMALL LETTER Y u'z' # 0x007a -> LATIN SMALL LETTER Z u'{' # 0x007b -> LEFT CURLY BRACKET u'|' # 0x007c -> VERTICAL LINE u'}' # 0x007d -> RIGHT CURLY BRACKET u'~' # 0x007e -> TILDE u'\x7f' # 0x007f -> DELETE u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX u'\u0131' # 0x008d -> LATIN SMALL LETTER DOTLESS I u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE u'\u0130' # 0x0098 -> LATIN CAPITAL LETTER I WITH DOT ABOVE u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE u'\xa3' # 0x009c -> POUND SIGN u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE u'\u015e' # 0x009e -> LATIN CAPITAL LETTER S WITH CEDILLA u'\u015f' # 0x009f -> LATIN SMALL LETTER S WITH CEDILLA u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE u'\u011e' # 0x00a6 -> LATIN CAPITAL LETTER G WITH BREVE u'\u011f' # 0x00a7 -> LATIN SMALL LETTER G WITH BREVE u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK u'\xae' # 0x00a9 -> REGISTERED SIGN u'\xac' # 0x00aa -> NOT SIGN u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK u'\u2591' # 0x00b0 -> LIGHT SHADE u'\u2592' # 0x00b1 -> MEDIUM SHADE u'\u2593' # 0x00b2 -> DARK SHADE u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT u'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE u'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX u'\xc0' # 0x00b7 -> LATIN CAPITAL LETTER A WITH GRAVE u'\xa9' # 0x00b8 -> COPYRIGHT SIGN u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT u'\xa2' # 0x00bd -> CENT SIGN u'\xa5' # 0x00be -> YEN SIGN u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL u'\xe3' # 0x00c6 -> LATIN SMALL LETTER A WITH TILDE u'\xc3' # 0x00c7 -> LATIN CAPITAL LETTER A WITH TILDE u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL u'\xa4' # 0x00cf -> CURRENCY SIGN u'\xba' # 0x00d0 -> MASCULINE ORDINAL INDICATOR u'\xaa' # 0x00d1 -> FEMININE ORDINAL INDICATOR u'\xca' # 0x00d2 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX u'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS u'\xc8' # 0x00d4 -> LATIN CAPITAL LETTER E WITH GRAVE u'\ufffe' # 0x00d5 -> UNDEFINED u'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE u'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX u'\xcf' # 0x00d8 -> LATIN CAPITAL LETTER I WITH DIAERESIS u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT u'\u2588' # 0x00db -> FULL BLOCK u'\u2584' # 0x00dc -> LOWER HALF BLOCK u'\xa6' # 0x00dd -> BROKEN BAR u'\xcc' # 0x00de -> LATIN CAPITAL LETTER I WITH GRAVE u'\u2580' # 0x00df -> UPPER HALF BLOCK u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S u'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX u'\xd2' # 0x00e3 -> LATIN CAPITAL LETTER O WITH GRAVE u'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE u'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE u'\xb5' # 0x00e6 -> MICRO SIGN u'\ufffe' # 0x00e7 -> UNDEFINED u'\xd7' # 0x00e8 -> MULTIPLICATION SIGN u'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE u'\xdb' # 0x00ea -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX u'\xd9' # 0x00eb -> LATIN CAPITAL LETTER U WITH GRAVE u'\xec' # 0x00ec -> LATIN SMALL LETTER I WITH GRAVE u'\xff' # 0x00ed -> LATIN SMALL LETTER Y WITH DIAERESIS u'\xaf' # 0x00ee -> MACRON u'\xb4' # 0x00ef -> ACUTE ACCENT u'\xad' # 0x00f0 -> SOFT HYPHEN u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN u'\ufffe' # 0x00f2 -> UNDEFINED u'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS u'\xb6' # 0x00f4 -> PILCROW SIGN u'\xa7' # 0x00f5 -> SECTION SIGN u'\xf7' # 0x00f6 -> DIVISION SIGN u'\xb8' # 0x00f7 -> CEDILLA u'\xb0' # 0x00f8 -> DEGREE SIGN u'\xa8' # 0x00f9 -> DIAERESIS u'\xb7' # 0x00fa -> MIDDLE DOT u'\xb9' # 0x00fb -> SUPERSCRIPT ONE u'\xb3' # 0x00fc -> SUPERSCRIPT THREE u'\xb2' # 0x00fd -> SUPERSCRIPT TWO u'\u25a0' # 0x00fe -> BLACK SQUARE u'\xa0' # 0x00ff -> NO-BREAK SPACE ) ### Encoding Map encoding_map = { 0x0000: 0x0000, # NULL 0x0001: 0x0001, # START OF HEADING 0x0002: 0x0002, # START OF TEXT 0x0003: 0x0003, # END OF TEXT 0x0004: 0x0004, # END OF TRANSMISSION 0x0005: 0x0005, # ENQUIRY 0x0006: 0x0006, # ACKNOWLEDGE 0x0007: 0x0007, # BELL 0x0008: 0x0008, # BACKSPACE 0x0009: 0x0009, # HORIZONTAL TABULATION 0x000a: 0x000a, # LINE FEED 0x000b: 0x000b, # VERTICAL TABULATION 0x000c: 0x000c, # FORM FEED 0x000d: 0x000d, # CARRIAGE RETURN 0x000e: 0x000e, # SHIFT OUT 0x000f: 0x000f, # SHIFT IN 0x0010: 0x0010, # DATA LINK ESCAPE 0x0011: 0x0011, # DEVICE CONTROL ONE 0x0012: 0x0012, # DEVICE CONTROL TWO 0x0013: 0x0013, # DEVICE CONTROL THREE 0x0014: 0x0014, # DEVICE CONTROL FOUR 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE 0x0016: 0x0016, # SYNCHRONOUS IDLE 0x0017: 0x0017, # END OF TRANSMISSION BLOCK 0x0018: 0x0018, # CANCEL 0x0019: 0x0019, # END OF MEDIUM 0x001a: 0x001a, # SUBSTITUTE 0x001b: 0x001b, # ESCAPE 0x001c: 0x001c, # FILE SEPARATOR 0x001d: 0x001d, # GROUP SEPARATOR 0x001e: 0x001e, # RECORD SEPARATOR 0x001f: 0x001f, # UNIT SEPARATOR 0x0020: 0x0020, # SPACE 0x0021: 0x0021, # EXCLAMATION MARK 0x0022: 0x0022, # QUOTATION MARK 0x0023: 0x0023, # NUMBER SIGN 0x0024: 0x0024, # DOLLAR SIGN 0x0025: 0x0025, # PERCENT SIGN 0x0026: 0x0026, # AMPERSAND 0x0027: 0x0027, # APOSTROPHE 0x0028: 0x0028, # LEFT PARENTHESIS 0x0029: 0x0029, # RIGHT PARENTHESIS 0x002a: 0x002a, # ASTERISK 0x002b: 0x002b, # PLUS SIGN 0x002c: 0x002c, # COMMA 0x002d: 0x002d, # HYPHEN-MINUS 0x002e: 0x002e, # FULL STOP 0x002f: 0x002f, # SOLIDUS 0x0030: 0x0030, # DIGIT ZERO 0x0031: 0x0031, # DIGIT ONE 0x0032: 0x0032, # DIGIT TWO 0x0033: 0x0033, # DIGIT THREE 0x0034: 0x0034, # DIGIT FOUR 0x0035: 0x0035, # DIGIT FIVE 0x0036: 0x0036, # DIGIT SIX 0x0037: 0x0037, # DIGIT SEVEN 0x0038: 0x0038, # DIGIT EIGHT 0x0039: 0x0039, # DIGIT NINE 0x003a: 0x003a, # COLON 0x003b: 0x003b, # SEMICOLON 0x003c: 0x003c, # LESS-THAN SIGN 0x003d: 0x003d, # EQUALS SIGN 0x003e: 0x003e, # GREATER-THAN SIGN 0x003f: 0x003f, # QUESTION MARK 0x0040: 0x0040, # COMMERCIAL AT 0x0041: 0x0041, # LATIN CAPITAL LETTER A 0x0042: 0x0042, # LATIN CAPITAL LETTER B 0x0043: 0x0043, # LATIN CAPITAL LETTER C 0x0044: 0x0044, # LATIN CAPITAL LETTER D 0x0045: 0x0045, # LATIN CAPITAL LETTER E 0x0046: 0x0046, # LATIN CAPITAL LETTER F 0x0047: 0x0047, # LATIN CAPITAL LETTER G 0x0048: 0x0048, # LATIN CAPITAL LETTER H 0x0049: 0x0049, # LATIN CAPITAL LETTER I 0x004a: 0x004a, # LATIN CAPITAL LETTER J 0x004b: 0x004b, # LATIN CAPITAL LETTER K 0x004c: 0x004c, # LATIN CAPITAL LETTER L 0x004d: 0x004d, # LATIN CAPITAL LETTER M 0x004e: 0x004e, # LATIN CAPITAL LETTER N 0x004f: 0x004f, # LATIN CAPITAL LETTER O 0x0050: 0x0050, # LATIN CAPITAL LETTER P 0x0051: 0x0051, # LATIN CAPITAL LETTER Q 0x0052: 0x0052, # LATIN CAPITAL LETTER R 0x0053: 0x0053, # LATIN CAPITAL LETTER S 0x0054: 0x0054, # LATIN CAPITAL LETTER T 0x0055: 0x0055, # LATIN CAPITAL LETTER U 0x0056: 0x0056, # LATIN CAPITAL LETTER V 0x0057: 0x0057, # LATIN CAPITAL LETTER W 0x0058: 0x0058, # LATIN CAPITAL LETTER X 0x0059: 0x0059, # LATIN CAPITAL LETTER Y 0x005a: 0x005a, # LATIN CAPITAL LETTER Z 0x005b: 0x005b, # LEFT SQUARE BRACKET 0x005c: 0x005c, # REVERSE SOLIDUS 0x005d: 0x005d, # RIGHT SQUARE BRACKET 0x005e: 0x005e, # CIRCUMFLEX ACCENT 0x005f: 0x005f, # LOW LINE 0x0060: 0x0060, # GRAVE ACCENT 0x0061: 0x0061, # LATIN SMALL LETTER A 0x0062: 0x0062, # LATIN SMALL LETTER B 0x0063: 0x0063, # LATIN SMALL LETTER C 0x0064: 0x0064, # LATIN SMALL LETTER D 0x0065: 0x0065, # LATIN SMALL LETTER E 0x0066: 0x0066, # LATIN SMALL LETTER F 0x0067: 0x0067, # LATIN SMALL LETTER G 0x0068: 0x0068, # LATIN SMALL LETTER H 0x0069: 0x0069, # LATIN SMALL LETTER I 0x006a: 0x006a, # LATIN SMALL LETTER J 0x006b: 0x006b, # LATIN SMALL LETTER K 0x006c: 0x006c, # LATIN SMALL LETTER L 0x006d: 0x006d, # LATIN SMALL LETTER M 0x006e: 0x006e, # LATIN SMALL LETTER N 0x006f: 0x006f, # LATIN SMALL LETTER O 0x0070: 0x0070, # LATIN SMALL LETTER P 0x0071: 0x0071, # LATIN SMALL LETTER Q 0x0072: 0x0072, # LATIN SMALL LETTER R 0x0073: 0x0073, # LATIN SMALL LETTER S 0x0074: 0x0074, # LATIN SMALL LETTER T 0x0075: 0x0075, # LATIN SMALL LETTER U 0x0076: 0x0076, # LATIN SMALL LETTER V 0x0077: 0x0077, # LATIN SMALL LETTER W 0x0078: 0x0078, # LATIN SMALL LETTER X 0x0079: 0x0079, # LATIN SMALL LETTER Y 0x007a: 0x007a, # LATIN SMALL LETTER Z 0x007b: 0x007b, # LEFT CURLY BRACKET 0x007c: 0x007c, # VERTICAL LINE 0x007d: 0x007d, # RIGHT CURLY BRACKET 0x007e: 0x007e, # TILDE 0x007f: 0x007f, # DELETE 0x00a0: 0x00ff, # NO-BREAK SPACE 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK 0x00a2: 0x00bd, # CENT SIGN 0x00a3: 0x009c, # POUND SIGN 0x00a4: 0x00cf, # CURRENCY SIGN 0x00a5: 0x00be, # YEN SIGN 0x00a6: 0x00dd, # BROKEN BAR 0x00a7: 0x00f5, # SECTION SIGN 0x00a8: 0x00f9, # DIAERESIS 0x00a9: 0x00b8, # COPYRIGHT SIGN 0x00aa: 0x00d1, # FEMININE ORDINAL INDICATOR 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00ac: 0x00aa, # NOT SIGN 0x00ad: 0x00f0, # SOFT HYPHEN 0x00ae: 0x00a9, # REGISTERED SIGN 0x00af: 0x00ee, # MACRON 0x00b0: 0x00f8, # DEGREE SIGN 0x00b1: 0x00f1, # PLUS-MINUS SIGN 0x00b2: 0x00fd, # SUPERSCRIPT TWO 0x00b3: 0x00fc, # SUPERSCRIPT THREE 0x00b4: 0x00ef, # ACUTE ACCENT 0x00b5: 0x00e6, # MICRO SIGN 0x00b6: 0x00f4, # PILCROW SIGN 0x00b7: 0x00fa, # MIDDLE DOT 0x00b8: 0x00f7, # CEDILLA 0x00b9: 0x00fb, # SUPERSCRIPT ONE 0x00ba: 0x00d0, # MASCULINE ORDINAL INDICATOR 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF 0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS 0x00bf: 0x00a8, # INVERTED QUESTION MARK 0x00c0: 0x00b7, # LATIN CAPITAL LETTER A WITH GRAVE 0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE 0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX 0x00c3: 0x00c7, # LATIN CAPITAL LETTER A WITH TILDE 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA 0x00c8: 0x00d4, # LATIN CAPITAL LETTER E WITH GRAVE 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE 0x00ca: 0x00d2, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX 0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS 0x00cc: 0x00de, # LATIN CAPITAL LETTER I WITH GRAVE 0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE 0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX 0x00cf: 0x00d8, # LATIN CAPITAL LETTER I WITH DIAERESIS 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE 0x00d2: 0x00e3, # LATIN CAPITAL LETTER O WITH GRAVE 0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE 0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX 0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS 0x00d7: 0x00e8, # MULTIPLICATION SIGN 0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE 0x00d9: 0x00eb, # LATIN CAPITAL LETTER U WITH GRAVE 0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE 0x00db: 0x00ea, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX 0x00e3: 0x00c6, # LATIN SMALL LETTER A WITH TILDE 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS 0x00ec: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX 0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE 0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX 0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS 0x00f7: 0x00f6, # DIVISION SIGN 0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS 0x00ff: 0x00ed, # LATIN SMALL LETTER Y WITH DIAERESIS 0x011e: 0x00a6, # LATIN CAPITAL LETTER G WITH BREVE 0x011f: 0x00a7, # LATIN SMALL LETTER G WITH BREVE 0x0130: 0x0098, # LATIN CAPITAL LETTER I WITH DOT ABOVE 0x0131: 0x008d, # LATIN SMALL LETTER DOTLESS I 0x015e: 0x009e, # LATIN CAPITAL LETTER S WITH CEDILLA 0x015f: 0x009f, # LATIN SMALL LETTER S WITH CEDILLA 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x2580: 0x00df, # UPPER HALF BLOCK 0x2584: 0x00dc, # LOWER HALF BLOCK 0x2588: 0x00db, # FULL BLOCK 0x2591: 0x00b0, # LIGHT SHADE 0x2592: 0x00b1, # MEDIUM SHADE 0x2593: 0x00b2, # DARK SHADE 0x25a0: 0x00fe, # BLACK SQUARE }
apache-2.0
phikal/brainduck
python/brainfuck-simple.py
2
1609
import sys import optparse def precompute_jumps(program): stack = [] ret = {} pc = 0 while not pc == len(program): opcode = program[pc] if opcode == "[": stack.append(pc) elif opcode == "]": target = stack.pop() ret[target] = pc ret[pc] = target pc += 1 return ret def run(program): buffer = [0] jump_map = precompute_jumps(program) ptr = 0 pc = 0 while not pc == len(program): opcode = program[pc] if opcode == ">": ptr += 1 if ptr == len(buffer): buffer.append(0) elif opcode == "<": ptr -= 1 elif opcode == "+": buffer[ptr] += 1 elif opcode == "-": buffer[ptr] -= 1 elif opcode == ".": sys.stdout.write(chr(buffer[ptr])) sys.stdout.flush() elif opcode == ",": buffer[ptr] = ord(sys.stdin.read(1)) elif opcode == "[": if buffer[ptr] == 0: pc = jump_map[pc] elif opcode == "]": if buffer[ptr] != 0: pc = jump_map[pc] pc += 1 if __name__ == "__main__": parser = optparse.OptionParser() parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Verbosity ON") options, args = parser.parse_args() if args: with open(args[0], "r") as input_file: contents = input_file.read() else: contents = sys.stdin.read() program = filter(lambda c: c in "<>-+[],.", contents) run(program)
mit
jameshuang/mapnoise
setup.py
1
1529
from distutils.core import setup import py2exe import FileDialog from distutils.filelist import findall import os import glob import matplotlib def find_data_files(source,target,patterns): """Locates the specified data-files and returns the matches in a data_files compatible format. source is the root of the source data tree. Use '' or '.' for current directory. target is the root of the target data tree. Use '' or '.' for the distribution directory. patterns is a sequence of glob-patterns for the files you want to copy. """ if glob.has_magic(source) or glob.has_magic(target): raise ValueError("Magic not allowed in src, target") ret = {} for pattern in patterns: pattern = os.path.join(source,pattern) for filename in glob.glob(pattern): if os.path.isfile(filename): targetpath = os.path.join(target,os.path.relpath(filename,source)) path = os.path.dirname(targetpath) ret.setdefault(path,[]).append(filename) return sorted(ret.items()) my_data_files = find_data_files('','',[ 'images/*', 'sample-data/*/*', ]) #my_data_files=[('images', ['images\svmap.png'])] my_data_files += matplotlib.get_py2exe_datafiles() setup( console=['mapnoise.py'], options={ 'py2exe': { 'packages' : ['matplotlib', 'pytz'], } }, data_files=my_data_files )
mit
SarahBA/b2share
b2share/modules/deposit/minters.py
1
1603
# -*- coding: utf-8 -*- # # This file is part of EUDAT B2Share. # Copyright (C) 2016 CERN. # # B2Share is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # B2Share is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with B2Share; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. # # In applying this license, CERN does not # waive the privileges and immunities granted to it by virtue of its status # as an Intergovernmental Organization or submit itself to any jurisdiction. """PID minters.""" from __future__ import absolute_import, print_function import uuid from .providers import DepositUUIDProvider def b2share_deposit_uuid_minter(record_uuid, data): """Mint deposit's PID.""" provider = DepositUUIDProvider.create( object_type='rec', object_uuid=record_uuid, # we reuse the deposit UUID as PID value. This makes the demo easier. pid_value=record_uuid.hex ) data['_deposit'] = { 'id': provider.pid.pid_value, # FIXME: do not set the status once it is done by invenio-deposit API 'status': 'draft', } return provider.pid
gpl-2.0
sergshabal/p2pool
p2pool/test/bitcoin/test_sha256.py
275
1213
from __future__ import division import unittest import hashlib import random from p2pool.bitcoin import sha256 class Test(unittest.TestCase): def test_all(self): for test in ['', 'a', 'b', 'abc', 'abc'*50, 'hello world']: #print test #print sha256.sha256(test).hexdigest() #print hashlib.sha256(test).hexdigest() #print assert sha256.sha256(test).hexdigest() == hashlib.sha256(test).hexdigest() def random_str(l): return ''.join(chr(random.randrange(256)) for i in xrange(l)) for length in xrange(150): test = random_str(length) a = sha256.sha256(test).hexdigest() b = hashlib.sha256(test).hexdigest() assert a == b for i in xrange(100): test = random_str(int(random.expovariate(1/100))) test2 = random_str(int(random.expovariate(1/100))) a = sha256.sha256(test) a = a.copy() a.update(test2) a = a.hexdigest() b = hashlib.sha256(test) b = b.copy() b.update(test2) b = b.hexdigest() assert a == b
gpl-3.0
freedomtan/tensorflow
tensorflow/python/tools/module_util.py
15
2015
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Helper functions for modules.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import six if six.PY2: import imp # pylint: disable=g-import-not-at-top else: import importlib # pylint: disable=g-import-not-at-top def get_parent_dir(module): return os.path.abspath(os.path.join(os.path.dirname(module.__file__), "..")) def get_parent_dir_for_name(module_name): """Get parent directory for module with the given name. Args: module_name: Module name for e.g. tensorflow_estimator.python.estimator.api._v1.estimator. Returns: Path to the parent directory if module is found and None otherwise. Given example above, it should return: /pathtoestimator/tensorflow_estimator/python/estimator/api/_v1. """ name_split = module_name.split(".") if not name_split: return None if six.PY2: try: spec = imp.find_module(name_split[0]) except ImportError: return None if not spec: return None base_path = spec[1] else: try: spec = importlib.util.find_spec(name_split[0]) except ValueError: return None if not spec or not spec.origin: return None base_path = os.path.dirname(spec.origin) return os.path.join(base_path, *name_split[1:-1])
apache-2.0
R1chChapp3rs/lightblue-0.4
src/mac/obex.py
179
3421
# Copyright (c) 2009 Bea Lam. All rights reserved. # # This file is part of LightBlue. # # LightBlue is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # LightBlue is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with LightBlue. If not, see <http://www.gnu.org/licenses/>. """ Provides an OBEX client class and convenience functions for sending and receiving files over OBEX. This module also defines constants for response code values (without the final bit set). For example: >>> import lightblue >>> lightblue.obex.OK 32 # the OK/Success response 0x20 (i.e. 0xA0 without the final bit) >>> lightblue.obex.FORBIDDEN 67 # the Forbidden response 0x43 (i.e. 0xC3 without the final bit) """ # Docstrings for attributes in this module. _docstrings = { "sendfile": """ Sends a file to a remote device. Raises lightblue.obex.OBEXError if an error occurred during the request, or if the request was refused by the remote device. Arguments: - address: the address of the remote device - channel: the RFCOMM channel of the remote OBEX service - source: a filename or file-like object, containing the data to be sent. If a file object is given, it must be opened for reading. Note you can achieve the same thing using OBEXClient with something like this: >>> import lightblue >>> client = lightblue.obex.OBEXClient(address, channel) >>> client.connect() <OBEXResponse reason='OK' code=0x20 (0xa0) headers={}> >>> putresponse = client.put({"name": "MyFile.txt"}, file("MyFile.txt", 'rb')) >>> client.disconnect() <OBEXResponse reason='OK' code=0x20 (0xa0) headers={}> >>> if putresponse.code != lightblue.obex.OK: ... raise lightblue.obex.OBEXError("server denied the Put request") >>> """, "recvfile": """ Receives a file through an OBEX service. Arguments: - sock: the server socket on which the file is to be received. Note this socket must *not* be listening. Also, an OBEX service should have been advertised on this socket. - dest: a filename or file-like object, to which the received data will be written. If a filename is given, any existing file will be overwritten. If a file object is given, it must be opened for writing. For example, to receive a file and save it as "MyFile.txt": >>> from lightblue import * >>> s = socket() >>> s.bind(("", 0)) >>> advertise("My OBEX Service", s, OBEX) >>> obex.recvfile(s, "MyFile.txt") """ } # import implementation modules from _obex import * from _obexcommon import * import _obex import _obexcommon __all__ = _obex.__all__ + _obexcommon.__all__ # set docstrings localattrs = locals() for attr in _obex.__all__: try: localattrs[attr].__doc__ = _docstrings[attr] except KeyError: pass del attr, localattrs
gpl-3.0
0k/odoo
addons/l10n_tr/__openerp__.py
170
2160
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Turkey - Accounting', 'version': '1.beta', 'category': 'Localization/Account Charts', 'description': """ Türkiye için Tek düzen hesap planı şablonu OpenERP Modülü. ========================================================== Bu modül kurulduktan sonra, Muhasebe yapılandırma sihirbazı çalışır * Sihirbaz sizden hesap planı şablonu, planın kurulacağı şirket, banka hesap bilgileriniz, ilgili para birimi gibi bilgiler isteyecek. """, 'author': 'Ahmet Altınışık', 'maintainer':'https://launchpad.net/~openerp-turkey', 'website':'https://launchpad.net/openerp-turkey', 'depends': [ 'account', 'base_vat', 'account_chart', ], 'data': [ 'account_code_template.xml', 'account_tdhp_turkey.xml', 'account_tax_code_template.xml', 'account_chart_template.xml', 'account_tax_template.xml', 'l10n_tr_wizard.xml', ], 'demo': [], 'installable': True, 'images': ['images/chart_l10n_tr_1.jpg','images/chart_l10n_tr_2.jpg','images/chart_l10n_tr_3.jpg'], } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
jolevq/odoopub
addons/mrp/wizard/mrp_workcenter_load.py
381
2222
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class mrp_workcenter_load(osv.osv_memory): _name = 'mrp.workcenter.load' _description = 'Work Center Load' _columns = { 'time_unit': fields.selection([('day', 'Day by day'),('week', 'Per week'),('month', 'Per month')],'Type of period', required=True), 'measure_unit': fields.selection([('hours', 'Amount in hours'),('cycles', 'Amount in cycles')],'Amount measuring unit', required=True), } def print_report(self, cr, uid, ids, context=None): """ To print the report of Work Center Load @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param context: A standard dictionary @return : Report """ if context is None: context = {} datas = {'ids' : context.get('active_ids',[])} res = self.read(cr, uid, ids, ['time_unit','measure_unit']) res = res and res[0] or {} datas['form'] = res return { 'type' : 'ir.actions.report.xml', 'report_name':'mrp.workcenter.load', 'datas' : datas, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
bgris/ODL_bgris
lib/python3.5/site-packages/qtpy/__init__.py
5
3670
# -*- coding: utf-8 -*- # # Copyright © 2009- The Spyder Development Team # Copyright © 2014-2015 Colin Duquesnoy # # Licensed under the terms of the MIT License # (see LICENSE.txt for details) """ **QtPy** is a shim over the various Python Qt bindings. It is used to write Qt binding indenpendent libraries or applications. The shim will automatically select the first available API (PyQt5, PyQt4 and finally PySide). You can force the use of one specific bindings (e.g. if your application is using one specific bindings and you need to use library that use QtPy) by setting up the ``QT_API`` environment variable. PyQt5 ===== For PyQt5, you don't have to set anything as it will be used automatically:: >>> from qtpy import QtGui, QtWidgets, QtCore >>> print(QtWidgets.QWidget) PyQt4 ===== Set the ``QT_API`` environment variable to 'pyqt' before importing any python package:: >>> import os >>> os.environ['QT_API'] = 'pyqt' >>> from qtpy import QtGui, QtWidgets, QtCore >>> print(QtWidgets.QWidget) PySide ====== Set the QT_API environment variable to 'pyside' before importing other packages:: >>> import os >>> os.environ['QT_API'] = 'pyside' >>> from qtpy import QtGui, QtWidgets, QtCore >>> print(QtWidgets.QWidget) """ import os # Version of QtPy from ._version import __version__ #: Qt API environment variable name QT_API = 'QT_API' #: names of the expected PyQt5 api PYQT5_API = ['pyqt5'] #: names of the expected PyQt4 api PYQT4_API = [ 'pyqt', # name used in IPython.qt 'pyqt4' # pyqode.qt original name ] #: names of the expected PySide api PYSIDE_API = ['pyside'] os.environ.setdefault(QT_API, 'pyqt5') API = os.environ[QT_API].lower() assert API in (PYQT5_API + PYQT4_API + PYSIDE_API) is_old_pyqt = is_pyqt46 = False PYQT5 = True PYQT4 = PYSIDE = False class PythonQtError(Exception): """Error raise if no bindings could be selected""" pass if API in PYQT5_API: try: from PyQt5.Qt import PYQT_VERSION_STR as PYQT_VERSION # analysis:ignore from PyQt5.Qt import QT_VERSION_STR as QT_VERSION # analysis:ignore PYSIDE_VERSION = None except ImportError: API = os.environ['QT_API'] = 'pyqt' if API in PYQT4_API: try: import sip try: sip.setapi('QString', 2) sip.setapi('QVariant', 2) sip.setapi('QDate', 2) sip.setapi('QDateTime', 2) sip.setapi('QTextStream', 2) sip.setapi('QTime', 2) sip.setapi('QUrl', 2) except AttributeError: # PyQt < v4.6 pass from PyQt4.Qt import PYQT_VERSION_STR as PYQT_VERSION # analysis:ignore from PyQt4.Qt import QT_VERSION_STR as QT_VERSION # analysis:ignore PYSIDE_VERSION = None PYQT5 = False PYQT4 = True except ImportError: API = os.environ['QT_API'] = 'pyside' else: is_old_pyqt = PYQT_VERSION.startswith(('4.4', '4.5', '4.6', '4.7')) is_pyqt46 = PYQT_VERSION.startswith('4.6') if API in PYSIDE_API: try: from PySide import __version__ as PYSIDE_VERSION # analysis:ignore from PySide.QtCore import __version__ as QT_VERSION # analysis:ignore PYQT_VERSION = None PYQT5 = False PYSIDE = True except ImportError: raise PythonQtError('No Qt bindings could be found') API_NAME = {'pyqt5': 'PyQt5', 'pyqt': 'PyQt4', 'pyqt4': 'PyQt4', 'pyside': 'PySide'}[API] if PYQT4: import sip try: API_NAME += (" (API v{0})".format(sip.getapi('QString'))) except AttributeError: pass
gpl-3.0
brendon-boldt/lstm-language-model
reader_test.py
23
2252
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.models.ptb_lstm.ptb_reader.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os.path import tensorflow as tf from tensorflow.models.rnn.ptb import reader class PtbReaderTest(tf.test.TestCase): def setUp(self): self._string_data = "\n".join( [" hello there i am", " rain as day", " want some cheesy puffs ?"]) def testPtbRawData(self): tmpdir = tf.test.get_temp_dir() for suffix in "train", "valid", "test": filename = os.path.join(tmpdir, "ptb.%s.txt" % suffix) with tf.gfile.GFile(filename, "w") as fh: fh.write(self._string_data) # Smoke test output = reader.ptb_raw_data(tmpdir) self.assertEqual(len(output), 4) def testPtbProducer(self): raw_data = [4, 3, 2, 1, 0, 5, 6, 1, 1, 1, 1, 0, 3, 4, 1] batch_size = 3 num_steps = 2 x, y = reader.ptb_producer(raw_data, batch_size, num_steps) with self.test_session() as session: coord = tf.train.Coordinator() tf.train.start_queue_runners(session, coord=coord) try: xval, yval = session.run([x, y]) self.assertAllEqual(xval, [[4, 3], [5, 6], [1, 0]]) self.assertAllEqual(yval, [[3, 2], [6, 1], [0, 3]]) xval, yval = session.run([x, y]) self.assertAllEqual(xval, [[2, 1], [1, 1], [3, 4]]) self.assertAllEqual(yval, [[1, 0], [1, 1], [4, 1]]) finally: coord.request_stop() coord.join() if __name__ == "__main__": tf.test.main()
apache-2.0
Khan/git-bigfile
vendor/boto/ec2/spotpricehistory.py
152
2093
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """ Represents an EC2 Spot Instance Request """ from boto.ec2.ec2object import EC2Object class SpotPriceHistory(EC2Object): def __init__(self, connection=None): super(SpotPriceHistory, self).__init__(connection) self.price = 0.0 self.instance_type = None self.product_description = None self.timestamp = None self.availability_zone = None def __repr__(self): return 'SpotPriceHistory(%s):%2f' % (self.instance_type, self.price) def endElement(self, name, value, connection): if name == 'instanceType': self.instance_type = value elif name == 'spotPrice': self.price = float(value) elif name == 'productDescription': self.product_description = value elif name == 'timestamp': self.timestamp = value elif name == 'availabilityZone': self.availability_zone = value else: setattr(self, name, value)
mit
deepmind/dm_env_rpc
dm_env_rpc/v1/extensions/properties_test.py
1
6758
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests Properties extension.""" import contextlib from absl.testing import absltest from dm_env import specs import mock import numpy as np from google.protobuf import any_pb2 from google.rpc import status_pb2 from google.protobuf import text_format from dm_env_rpc.v1 import connection as dm_env_rpc_connection from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import error from dm_env_rpc.v1.extensions import properties from dm_env_rpc.v1.extensions import properties_pb2 def _create_property_request_key(text_proto): extension_message = any_pb2.Any() extension_message.Pack( text_format.Parse(text_proto, properties_pb2.PropertyRequest())) return dm_env_rpc_pb2.EnvironmentRequest( extension=extension_message).SerializeToString() def _pack_property_response(text_proto): extension_message = any_pb2.Any() extension_message.Pack( text_format.Parse(text_proto, properties_pb2.PropertyResponse())) return dm_env_rpc_pb2.EnvironmentResponse(extension=extension_message) # Set of expected requests and associated responses for mock connection. _EXPECTED_REQUEST_RESPONSE_PAIRS = { _create_property_request_key('read_property { key: "foo" }'): _pack_property_response( 'read_property { value: { int32s: { array: 1 } } }'), _create_property_request_key("""write_property { key: "bar" value: { strings { array: "some_value" } } }"""): _pack_property_response('write_property {}'), _create_property_request_key('read_property { key: "bar" }'): _pack_property_response( 'read_property { value: { strings: { array: "some_value" } } }'), _create_property_request_key('list_property { key: "baz" }'): _pack_property_response("""list_property { values: { is_readable:true spec { name: "baz.fiz" dtype:UINT32 shape: 2 shape: 2 } }}"""), _create_property_request_key('list_property {}'): _pack_property_response("""list_property { values: { is_readable:true spec { name: "foo" dtype:INT32 } description: "This is a documented integer" } values: { is_readable:true is_writable:true spec { name: "bar" dtype:STRING } } values: { is_listable:true spec { name: "baz" } } }"""), _create_property_request_key('read_property { key: "bad_property" }'): dm_env_rpc_pb2.EnvironmentResponse( error=status_pb2.Status(message='invalid property request.')) } @contextlib.contextmanager def _create_mock_connection(): """Helper to create mock dm_env_rpc connection.""" with mock.patch.object(dm_env_rpc_connection, 'dm_env_rpc_pb2_grpc') as mock_grpc: def _process(request_iterator): for request in request_iterator: yield _EXPECTED_REQUEST_RESPONSE_PAIRS[request.SerializeToString()] mock_stub_class = mock.MagicMock() mock_stub_class.Process = _process mock_grpc.EnvironmentStub.return_value = mock_stub_class yield dm_env_rpc_connection.Connection(mock.MagicMock()) class PropertiesTest(absltest.TestCase): def test_read_property(self): with _create_mock_connection() as connection: extension = properties.PropertiesExtension(connection) self.assertEqual(1, extension['foo']) def test_write_property(self): with _create_mock_connection() as connection: extension = properties.PropertiesExtension(connection) extension['bar'] = 'some_value' self.assertEqual('some_value', extension['bar']) def test_list_property(self): with _create_mock_connection() as connection: extension = properties.PropertiesExtension(connection) property_specs = extension.specs('baz') self.assertLen(property_specs, 1) property_spec = property_specs['baz.fiz'] self.assertTrue(property_spec.readable) self.assertFalse(property_spec.writable) self.assertFalse(property_spec.listable) self.assertEqual( specs.Array(shape=(2, 2), dtype=np.uint32), property_spec.spec) def test_root_list_property(self): with _create_mock_connection() as connection: extension = properties.PropertiesExtension(connection) property_specs = extension.specs() self.assertLen(property_specs, 3) self.assertTrue(property_specs['foo'].readable) self.assertTrue(property_specs['bar'].readable) self.assertTrue(property_specs['bar'].writable) self.assertTrue(property_specs['baz'].listable) def test_invalid_spec_request_on_listable_property(self): with _create_mock_connection() as connection: extension = properties.PropertiesExtension(connection) property_specs = extension.specs() self.assertTrue(property_specs['baz'].listable) self.assertIsNone(property_specs['baz'].spec) def test_invalid_request(self): with _create_mock_connection() as connection: extension = properties.PropertiesExtension(connection) with self.assertRaisesRegex(error.DmEnvRpcError, 'invalid property request.'): _ = extension['bad_property'] def test_property_description(self): with _create_mock_connection() as connection: extension = properties.PropertiesExtension(connection) property_specs = extension.specs() self.assertEqual('This is a documented integer', property_specs['foo'].description) def test_property_print(self): with _create_mock_connection() as connection: extension = properties.PropertiesExtension(connection) property_specs = extension.specs() self.assertRegex( str(property_specs['foo']), (r'PropertySpec\(key=foo, readable=True, writable=False, ' r'listable=False, spec=.*, ' r'description=This is a documented integer\)')) if __name__ == '__main__': absltest.main()
apache-2.0
mglukhikh/intellij-community
python/helpers/pydev/_pydev_runfiles/pydev_runfiles_parallel.py
32
10122
import unittest from _pydev_imps._pydev_saved_modules import thread try: import Queue except: import queue as Queue #@UnresolvedImport from _pydevd_bundle.pydevd_constants import * #@UnusedWildImport from _pydev_runfiles import pydev_runfiles_xml_rpc import time import os #======================================================================================================================= # flatten_test_suite #======================================================================================================================= def flatten_test_suite(test_suite, ret): if isinstance(test_suite, unittest.TestSuite): for t in test_suite._tests: flatten_test_suite(t, ret) elif isinstance(test_suite, unittest.TestCase): ret.append(test_suite) #======================================================================================================================= # execute_tests_in_parallel #======================================================================================================================= def execute_tests_in_parallel(tests, jobs, split, verbosity, coverage_files, coverage_include): ''' @param tests: list(PydevTestSuite) A list with the suites to be run @param split: str Either 'module' or the number of tests that should be run in each batch @param coverage_files: list(file) A list with the files that should be used for giving coverage information (if empty, coverage information should not be gathered). @param coverage_include: str The pattern that should be included in the coverage. @return: bool Returns True if the tests were actually executed in parallel. If the tests were not executed because only 1 should be used (e.g.: 2 jobs were requested for running 1 test), False will be returned and no tests will be run. It may also return False if in debug mode (in which case, multi-processes are not accepted) ''' try: from _pydevd_bundle.pydevd_comm import get_global_debugger if get_global_debugger() is not None: return False except: pass #Ignore any error here. #This queue will receive the tests to be run. Each entry in a queue is a list with the tests to be run together When #split == 'tests', each list will have a single element, when split == 'module', each list will have all the tests #from a given module. tests_queue = [] queue_elements = [] if split == 'module': module_to_tests = {} for test in tests: lst = [] flatten_test_suite(test, lst) for test in lst: key = (test.__pydev_pyfile__, test.__pydev_module_name__) module_to_tests.setdefault(key, []).append(test) for key, tests in module_to_tests.items(): queue_elements.append(tests) if len(queue_elements) < jobs: #Don't create jobs we will never use. jobs = len(queue_elements) elif split == 'tests': for test in tests: lst = [] flatten_test_suite(test, lst) for test in lst: queue_elements.append([test]) if len(queue_elements) < jobs: #Don't create jobs we will never use. jobs = len(queue_elements) else: raise AssertionError('Do not know how to handle: %s' % (split,)) for test_cases in queue_elements: test_queue_elements = [] for test_case in test_cases: try: test_name = test_case.__class__.__name__+"."+test_case._testMethodName except AttributeError: #Support for jython 2.1 (__testMethodName is pseudo-private in the test case) test_name = test_case.__class__.__name__+"."+test_case._TestCase__testMethodName test_queue_elements.append(test_case.__pydev_pyfile__+'|'+test_name) tests_queue.append(test_queue_elements) if jobs < 2: return False sys.stdout.write('Running tests in parallel with: %s jobs.\n' %(jobs,)) queue = Queue.Queue() for item in tests_queue: queue.put(item, block=False) providers = [] clients = [] for i in range(jobs): test_cases_provider = CommunicationThread(queue) providers.append(test_cases_provider) test_cases_provider.start() port = test_cases_provider.port if coverage_files: clients.append(ClientThread(i, port, verbosity, coverage_files.pop(0), coverage_include)) else: clients.append(ClientThread(i, port, verbosity)) for client in clients: client.start() client_alive = True while client_alive: client_alive = False for client in clients: #Wait for all the clients to exit. if not client.finished: client_alive = True time.sleep(.2) break for provider in providers: provider.shutdown() return True #======================================================================================================================= # CommunicationThread #======================================================================================================================= class CommunicationThread(threading.Thread): def __init__(self, tests_queue): threading.Thread.__init__(self) self.setDaemon(True) self.queue = tests_queue self.finished = False from _pydev_bundle.pydev_imports import SimpleXMLRPCServer # This is a hack to patch slow socket.getfqdn calls that # BaseHTTPServer (and its subclasses) make. # See: http://bugs.python.org/issue6085 # See: http://www.answermysearches.com/xmlrpc-server-slow-in-python-how-to-fix/2140/ try: import BaseHTTPServer def _bare_address_string(self): host, port = self.client_address[:2] return '%s' % host BaseHTTPServer.BaseHTTPRequestHandler.address_string = _bare_address_string except: pass # End hack. # Create server from _pydev_bundle import pydev_localhost server = SimpleXMLRPCServer((pydev_localhost.get_localhost(), 0), logRequests=False) server.register_function(self.GetTestsToRun) server.register_function(self.notifyStartTest) server.register_function(self.notifyTest) server.register_function(self.notifyCommands) self.port = server.socket.getsockname()[1] self.server = server def GetTestsToRun(self, job_id): ''' @param job_id: @return: list(str) Each entry is a string in the format: filename|Test.testName ''' try: ret = self.queue.get(block=False) return ret except: #Any exception getting from the queue (empty or not) means we finished our work on providing the tests. self.finished = True return [] def notifyCommands(self, job_id, commands): #Batch notification. for command in commands: getattr(self, command[0])(job_id, *command[1], **command[2]) return True def notifyStartTest(self, job_id, *args, **kwargs): pydev_runfiles_xml_rpc.notifyStartTest(*args, **kwargs) return True def notifyTest(self, job_id, *args, **kwargs): pydev_runfiles_xml_rpc.notifyTest(*args, **kwargs) return True def shutdown(self): if hasattr(self.server, 'shutdown'): self.server.shutdown() else: self._shutdown = True def run(self): if hasattr(self.server, 'shutdown'): self.server.serve_forever() else: self._shutdown = False while not self._shutdown: self.server.handle_request() #======================================================================================================================= # Client #======================================================================================================================= class ClientThread(threading.Thread): def __init__(self, job_id, port, verbosity, coverage_output_file=None, coverage_include=None): threading.Thread.__init__(self) self.setDaemon(True) self.port = port self.job_id = job_id self.verbosity = verbosity self.finished = False self.coverage_output_file = coverage_output_file self.coverage_include = coverage_include def _reader_thread(self, pipe, target): while True: target.write(pipe.read(1)) def run(self): try: from _pydev_runfiles import pydev_runfiles_parallel_client #TODO: Support Jython: # #For jython, instead of using sys.executable, we should use: #r'D:\bin\jdk_1_5_09\bin\java.exe', #'-classpath', #'D:/bin/jython-2.2.1/jython.jar', #'org.python.util.jython', args = [ sys.executable, pydev_runfiles_parallel_client.__file__, str(self.job_id), str(self.port), str(self.verbosity), ] if self.coverage_output_file and self.coverage_include: args.append(self.coverage_output_file) args.append(self.coverage_include) import subprocess if False: proc = subprocess.Popen(args, env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) thread.start_new_thread(self._reader_thread,(proc.stdout, sys.stdout)) thread.start_new_thread(target=self._reader_thread,args=(proc.stderr, sys.stderr)) else: proc = subprocess.Popen(args, env=os.environ, shell=False) proc.wait() finally: self.finished = True
apache-2.0
VenturaDelMonte/staticwebanalyzer
SDK/mechanize-0.2.5/mechanize/_html.py
132
20888
"""HTML handling. Copyright 2003-2006 John J. Lee <jjl@pobox.com> This code is free software; you can redistribute it and/or modify it under the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt included with the distribution). """ import codecs import copy import htmlentitydefs import re import _sgmllib_copy as sgmllib import _beautifulsoup import _form from _headersutil import split_header_words, is_html as _is_html import _request import _rfc3986 DEFAULT_ENCODING = "latin-1" COMPRESS_RE = re.compile(r"\s+") class CachingGeneratorFunction(object): """Caching wrapper around a no-arguments iterable.""" def __init__(self, iterable): self._cache = [] # wrap iterable to make it non-restartable (otherwise, repeated # __call__ would give incorrect results) self._iterator = iter(iterable) def __call__(self): cache = self._cache for item in cache: yield item for item in self._iterator: cache.append(item) yield item class EncodingFinder: def __init__(self, default_encoding): self._default_encoding = default_encoding def encoding(self, response): # HTTPEquivProcessor may be in use, so both HTTP and HTTP-EQUIV # headers may be in the response. HTTP-EQUIV headers come last, # so try in order from first to last. for ct in response.info().getheaders("content-type"): for k, v in split_header_words([ct])[0]: if k == "charset": encoding = v try: codecs.lookup(v) except LookupError: continue else: return encoding return self._default_encoding class ResponseTypeFinder: def __init__(self, allow_xhtml): self._allow_xhtml = allow_xhtml def is_html(self, response, encoding): ct_hdrs = response.info().getheaders("content-type") url = response.geturl() # XXX encoding return _is_html(ct_hdrs, url, self._allow_xhtml) class Args(object): # idea for this argument-processing trick is from Peter Otten def __init__(self, args_map): self.__dict__["dictionary"] = dict(args_map) def __getattr__(self, key): try: return self.dictionary[key] except KeyError: return getattr(self.__class__, key) def __setattr__(self, key, value): if key == "dictionary": raise AttributeError() self.dictionary[key] = value def form_parser_args( select_default=False, form_parser_class=None, request_class=None, backwards_compat=False, ): return Args(locals()) class Link: def __init__(self, base_url, url, text, tag, attrs): assert None not in [url, tag, attrs] self.base_url = base_url self.absolute_url = _rfc3986.urljoin(base_url, url) self.url, self.text, self.tag, self.attrs = url, text, tag, attrs def __cmp__(self, other): try: for name in "url", "text", "tag", "attrs": if getattr(self, name) != getattr(other, name): return -1 except AttributeError: return -1 return 0 def __repr__(self): return "Link(base_url=%r, url=%r, text=%r, tag=%r, attrs=%r)" % ( self.base_url, self.url, self.text, self.tag, self.attrs) class LinksFactory: def __init__(self, link_parser_class=None, link_class=Link, urltags=None, ): import _pullparser if link_parser_class is None: link_parser_class = _pullparser.TolerantPullParser self.link_parser_class = link_parser_class self.link_class = link_class if urltags is None: urltags = { "a": "href", "area": "href", "frame": "src", "iframe": "src", } self.urltags = urltags self._response = None self._encoding = None def set_response(self, response, base_url, encoding): self._response = response self._encoding = encoding self._base_url = base_url def links(self): """Return an iterator that provides links of the document.""" response = self._response encoding = self._encoding base_url = self._base_url p = self.link_parser_class(response, encoding=encoding) try: for token in p.tags(*(self.urltags.keys()+["base"])): if token.type == "endtag": continue if token.data == "base": base_href = dict(token.attrs).get("href") if base_href is not None: base_url = base_href continue attrs = dict(token.attrs) tag = token.data text = None # XXX use attr_encoding for ref'd doc if that doc does not # provide one by other means #attr_encoding = attrs.get("charset") url = attrs.get(self.urltags[tag]) # XXX is "" a valid URL? if not url: # Probably an <A NAME="blah"> link or <AREA NOHREF...>. # For our purposes a link is something with a URL, so # ignore this. continue url = _rfc3986.clean_url(url, encoding) if tag == "a": if token.type != "startendtag": # hmm, this'd break if end tag is missing text = p.get_compressed_text(("endtag", tag)) # but this doesn't work for e.g. # <a href="blah"><b>Andy</b></a> #text = p.get_compressed_text() yield Link(base_url, url, text, tag, token.attrs) except sgmllib.SGMLParseError, exc: raise _form.ParseError(exc) class FormsFactory: """Makes a sequence of objects satisfying HTMLForm interface. After calling .forms(), the .global_form attribute is a form object containing all controls not a descendant of any FORM element. For constructor argument docs, see ParseResponse argument docs. """ def __init__(self, select_default=False, form_parser_class=None, request_class=None, backwards_compat=False, ): self.select_default = select_default if form_parser_class is None: form_parser_class = _form.FormParser self.form_parser_class = form_parser_class if request_class is None: request_class = _request.Request self.request_class = request_class self.backwards_compat = backwards_compat self._response = None self.encoding = None self.global_form = None def set_response(self, response, encoding): self._response = response self.encoding = encoding self.global_form = None def forms(self): encoding = self.encoding forms = _form.ParseResponseEx( self._response, select_default=self.select_default, form_parser_class=self.form_parser_class, request_class=self.request_class, encoding=encoding, _urljoin=_rfc3986.urljoin, _urlparse=_rfc3986.urlsplit, _urlunparse=_rfc3986.urlunsplit, ) self.global_form = forms[0] return forms[1:] class TitleFactory: def __init__(self): self._response = self._encoding = None def set_response(self, response, encoding): self._response = response self._encoding = encoding def _get_title_text(self, parser): import _pullparser text = [] tok = None while 1: try: tok = parser.get_token() except _pullparser.NoMoreTokensError: break if tok.type == "data": text.append(str(tok)) elif tok.type == "entityref": t = unescape("&%s;" % tok.data, parser._entitydefs, parser.encoding) text.append(t) elif tok.type == "charref": t = unescape_charref(tok.data, parser.encoding) text.append(t) elif tok.type in ["starttag", "endtag", "startendtag"]: tag_name = tok.data if tok.type == "endtag" and tag_name == "title": break text.append(str(tok)) return COMPRESS_RE.sub(" ", "".join(text).strip()) def title(self): import _pullparser p = _pullparser.TolerantPullParser( self._response, encoding=self._encoding) try: try: p.get_tag("title") except _pullparser.NoMoreTokensError: return None else: return self._get_title_text(p) except sgmllib.SGMLParseError, exc: raise _form.ParseError(exc) def unescape(data, entities, encoding): if data is None or "&" not in data: return data def replace_entities(match): ent = match.group() if ent[1] == "#": return unescape_charref(ent[2:-1], encoding) repl = entities.get(ent[1:-1]) if repl is not None: repl = unichr(repl) if type(repl) != type(""): try: repl = repl.encode(encoding) except UnicodeError: repl = ent else: repl = ent return repl return re.sub(r"&#?[A-Za-z0-9]+?;", replace_entities, data) def unescape_charref(data, encoding): name, base = data, 10 if name.startswith("x"): name, base= name[1:], 16 uc = unichr(int(name, base)) if encoding is None: return uc else: try: repl = uc.encode(encoding) except UnicodeError: repl = "&#%s;" % data return repl class MechanizeBs(_beautifulsoup.BeautifulSoup): _entitydefs = htmlentitydefs.name2codepoint # don't want the magic Microsoft-char workaround PARSER_MASSAGE = [(re.compile('(<[^<>]*)/>'), lambda(x):x.group(1) + ' />'), (re.compile('<!\s+([^<>]*)>'), lambda(x):'<!' + x.group(1) + '>') ] def __init__(self, encoding, text=None, avoidParserProblems=True, initialTextIsEverything=True): self._encoding = encoding _beautifulsoup.BeautifulSoup.__init__( self, text, avoidParserProblems, initialTextIsEverything) def handle_charref(self, ref): t = unescape("&#%s;"%ref, self._entitydefs, self._encoding) self.handle_data(t) def handle_entityref(self, ref): t = unescape("&%s;"%ref, self._entitydefs, self._encoding) self.handle_data(t) def unescape_attrs(self, attrs): escaped_attrs = [] for key, val in attrs: val = unescape(val, self._entitydefs, self._encoding) escaped_attrs.append((key, val)) return escaped_attrs class RobustLinksFactory: compress_re = COMPRESS_RE def __init__(self, link_parser_class=None, link_class=Link, urltags=None, ): if link_parser_class is None: link_parser_class = MechanizeBs self.link_parser_class = link_parser_class self.link_class = link_class if urltags is None: urltags = { "a": "href", "area": "href", "frame": "src", "iframe": "src", } self.urltags = urltags self._bs = None self._encoding = None self._base_url = None def set_soup(self, soup, base_url, encoding): self._bs = soup self._base_url = base_url self._encoding = encoding def links(self): bs = self._bs base_url = self._base_url encoding = self._encoding for ch in bs.recursiveChildGenerator(): if (isinstance(ch, _beautifulsoup.Tag) and ch.name in self.urltags.keys()+["base"]): link = ch attrs = bs.unescape_attrs(link.attrs) attrs_dict = dict(attrs) if link.name == "base": base_href = attrs_dict.get("href") if base_href is not None: base_url = base_href continue url_attr = self.urltags[link.name] url = attrs_dict.get(url_attr) if not url: continue url = _rfc3986.clean_url(url, encoding) text = link.fetchText(lambda t: True) if not text: # follow _pullparser's weird behaviour rigidly if link.name == "a": text = "" else: text = None else: text = self.compress_re.sub(" ", " ".join(text).strip()) yield Link(base_url, url, text, link.name, attrs) class RobustFormsFactory(FormsFactory): def __init__(self, *args, **kwds): args = form_parser_args(*args, **kwds) if args.form_parser_class is None: args.form_parser_class = _form.RobustFormParser FormsFactory.__init__(self, **args.dictionary) def set_response(self, response, encoding): self._response = response self.encoding = encoding class RobustTitleFactory: def __init__(self): self._bs = self._encoding = None def set_soup(self, soup, encoding): self._bs = soup self._encoding = encoding def title(self): title = self._bs.first("title") if title == _beautifulsoup.Null: return None else: inner_html = "".join([str(node) for node in title.contents]) return COMPRESS_RE.sub(" ", inner_html.strip()) class Factory: """Factory for forms, links, etc. This interface may expand in future. Public methods: set_request_class(request_class) set_response(response) forms() links() Public attributes: Note that accessing these attributes may raise ParseError. encoding: string specifying the encoding of response if it contains a text document (this value is left unspecified for documents that do not have an encoding, e.g. an image file) is_html: true if response contains an HTML document (XHTML may be regarded as HTML too) title: page title, or None if no title or not HTML global_form: form object containing all controls that are not descendants of any FORM element, or None if the forms_factory does not support supplying a global form """ LAZY_ATTRS = ["encoding", "is_html", "title", "global_form"] def __init__(self, forms_factory, links_factory, title_factory, encoding_finder=EncodingFinder(DEFAULT_ENCODING), response_type_finder=ResponseTypeFinder(allow_xhtml=False), ): """ Pass keyword arguments only. default_encoding: character encoding to use if encoding cannot be determined (or guessed) from the response. You should turn on HTTP-EQUIV handling if you want the best chance of getting this right without resorting to this default. The default value of this parameter (currently latin-1) may change in future. """ self._forms_factory = forms_factory self._links_factory = links_factory self._title_factory = title_factory self._encoding_finder = encoding_finder self._response_type_finder = response_type_finder self.set_response(None) def set_request_class(self, request_class): """Set request class (mechanize.Request by default). HTMLForm instances returned by .forms() will return instances of this class when .click()ed. """ self._forms_factory.request_class = request_class def set_response(self, response): """Set response. The response must either be None or implement the same interface as objects returned by mechanize.urlopen(). """ self._response = response self._forms_genf = self._links_genf = None self._get_title = None for name in self.LAZY_ATTRS: try: delattr(self, name) except AttributeError: pass def __getattr__(self, name): if name not in self.LAZY_ATTRS: return getattr(self.__class__, name) if name == "encoding": self.encoding = self._encoding_finder.encoding( copy.copy(self._response)) return self.encoding elif name == "is_html": self.is_html = self._response_type_finder.is_html( copy.copy(self._response), self.encoding) return self.is_html elif name == "title": if self.is_html: self.title = self._title_factory.title() else: self.title = None return self.title elif name == "global_form": self.forms() return self.global_form def forms(self): """Return iterable over HTMLForm-like objects. Raises mechanize.ParseError on failure. """ # this implementation sets .global_form as a side-effect, for benefit # of __getattr__ impl if self._forms_genf is None: try: self._forms_genf = CachingGeneratorFunction( self._forms_factory.forms()) except: # XXXX define exception! self.set_response(self._response) raise self.global_form = getattr( self._forms_factory, "global_form", None) return self._forms_genf() def links(self): """Return iterable over mechanize.Link-like objects. Raises mechanize.ParseError on failure. """ if self._links_genf is None: try: self._links_genf = CachingGeneratorFunction( self._links_factory.links()) except: # XXXX define exception! self.set_response(self._response) raise return self._links_genf() class DefaultFactory(Factory): """Based on sgmllib.""" def __init__(self, i_want_broken_xhtml_support=False): Factory.__init__( self, forms_factory=FormsFactory(), links_factory=LinksFactory(), title_factory=TitleFactory(), response_type_finder=ResponseTypeFinder( allow_xhtml=i_want_broken_xhtml_support), ) def set_response(self, response): Factory.set_response(self, response) if response is not None: self._forms_factory.set_response( copy.copy(response), self.encoding) self._links_factory.set_response( copy.copy(response), response.geturl(), self.encoding) self._title_factory.set_response( copy.copy(response), self.encoding) class RobustFactory(Factory): """Based on BeautifulSoup, hopefully a bit more robust to bad HTML than is DefaultFactory. """ def __init__(self, i_want_broken_xhtml_support=False, soup_class=None): Factory.__init__( self, forms_factory=RobustFormsFactory(), links_factory=RobustLinksFactory(), title_factory=RobustTitleFactory(), response_type_finder=ResponseTypeFinder( allow_xhtml=i_want_broken_xhtml_support), ) if soup_class is None: soup_class = MechanizeBs self._soup_class = soup_class def set_response(self, response): Factory.set_response(self, response) if response is not None: data = response.read() soup = self._soup_class(self.encoding, data) self._forms_factory.set_response( copy.copy(response), self.encoding) self._links_factory.set_soup( soup, response.geturl(), self.encoding) self._title_factory.set_soup(soup, self.encoding)
mit
tornadozou/tensorflow
tensorflow/contrib/learn/python/learn/estimators/rnn_common_test.py
111
4836
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for layers.rnn_common.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.learn.python.learn.estimators import rnn_common from tensorflow.python.client import session from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.platform import test class RnnCommonTest(test.TestCase): def testMaskActivationsAndLabels(self): """Test `mask_activations_and_labels`.""" batch_size = 4 padded_length = 6 num_classes = 4 np.random.seed(1234) sequence_length = np.random.randint(0, padded_length + 1, batch_size) activations = np.random.rand(batch_size, padded_length, num_classes) labels = np.random.randint(0, num_classes, [batch_size, padded_length]) (activations_masked_t, labels_masked_t) = rnn_common.mask_activations_and_labels( constant_op.constant(activations, dtype=dtypes.float32), constant_op.constant(labels, dtype=dtypes.int32), constant_op.constant(sequence_length, dtype=dtypes.int32)) with self.test_session() as sess: activations_masked, labels_masked = sess.run( [activations_masked_t, labels_masked_t]) expected_activations_shape = [sum(sequence_length), num_classes] np.testing.assert_equal( expected_activations_shape, activations_masked.shape, 'Wrong activations shape. Expected {}; got {}.'.format( expected_activations_shape, activations_masked.shape)) expected_labels_shape = [sum(sequence_length)] np.testing.assert_equal(expected_labels_shape, labels_masked.shape, 'Wrong labels shape. Expected {}; got {}.'.format( expected_labels_shape, labels_masked.shape)) masked_index = 0 for i in range(batch_size): for j in range(sequence_length[i]): actual_activations = activations_masked[masked_index] expected_activations = activations[i, j, :] np.testing.assert_almost_equal( expected_activations, actual_activations, err_msg='Unexpected logit value at index [{}, {}, :].' ' Expected {}; got {}.'.format(i, j, expected_activations, actual_activations)) actual_labels = labels_masked[masked_index] expected_labels = labels[i, j] np.testing.assert_almost_equal( expected_labels, actual_labels, err_msg='Unexpected logit value at index [{}, {}].' ' Expected {}; got {}.'.format(i, j, expected_labels, actual_labels)) masked_index += 1 def testSelectLastActivations(self): """Test `select_last_activations`.""" batch_size = 4 padded_length = 6 num_classes = 4 np.random.seed(4444) sequence_length = np.random.randint(0, padded_length + 1, batch_size) activations = np.random.rand(batch_size, padded_length, num_classes) last_activations_t = rnn_common.select_last_activations( constant_op.constant(activations, dtype=dtypes.float32), constant_op.constant(sequence_length, dtype=dtypes.int32)) with session.Session() as sess: last_activations = sess.run(last_activations_t) expected_activations_shape = [batch_size, num_classes] np.testing.assert_equal( expected_activations_shape, last_activations.shape, 'Wrong activations shape. Expected {}; got {}.'.format( expected_activations_shape, last_activations.shape)) for i in range(batch_size): actual_activations = last_activations[i, :] expected_activations = activations[i, sequence_length[i] - 1, :] np.testing.assert_almost_equal( expected_activations, actual_activations, err_msg='Unexpected logit value at index [{}, :].' ' Expected {}; got {}.'.format(i, expected_activations, actual_activations)) if __name__ == '__main__': test.main()
apache-2.0
RomanZavodskikh/mipt-mips-2015
libs/gtest-1.6.0/test/gtest_catch_exceptions_test.py
414
9312
#!/usr/bin/env python # # Copyright 2010 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests Google Test's exception catching behavior. This script invokes gtest_catch_exceptions_test_ and gtest_catch_exceptions_ex_test_ (programs written with Google Test) and verifies their output. """ __author__ = 'vladl@google.com (Vlad Losev)' import os import gtest_test_utils # Constants. FLAG_PREFIX = '--gtest_' LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests' NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0' FILTER_FLAG = FLAG_PREFIX + 'filter' # Path to the gtest_catch_exceptions_ex_test_ binary, compiled with # exceptions enabled. EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath( 'gtest_catch_exceptions_ex_test_') # Path to the gtest_catch_exceptions_test_ binary, compiled with # exceptions disabled. EXE_PATH = gtest_test_utils.GetTestExecutablePath( 'gtest_catch_exceptions_no_ex_test_') TEST_LIST = gtest_test_utils.Subprocess([EXE_PATH, LIST_TESTS_FLAG]).output SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST if SUPPORTS_SEH_EXCEPTIONS: BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH]).output EX_BINARY_OUTPUT = gtest_test_utils.Subprocess([EX_EXE_PATH]).output # The tests. if SUPPORTS_SEH_EXCEPTIONS: # pylint:disable-msg=C6302 class CatchSehExceptionsTest(gtest_test_utils.TestCase): """Tests exception-catching behavior.""" def TestSehExceptions(self, test_output): self.assert_('SEH exception with code 0x2a thrown ' 'in the test fixture\'s constructor' in test_output) self.assert_('SEH exception with code 0x2a thrown ' 'in the test fixture\'s destructor' in test_output) self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()' in test_output) self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()' in test_output) self.assert_('SEH exception with code 0x2a thrown in SetUp()' in test_output) self.assert_('SEH exception with code 0x2a thrown in TearDown()' in test_output) self.assert_('SEH exception with code 0x2a thrown in the test body' in test_output) def testCatchesSehExceptionsWithCxxExceptionsEnabled(self): self.TestSehExceptions(EX_BINARY_OUTPUT) def testCatchesSehExceptionsWithCxxExceptionsDisabled(self): self.TestSehExceptions(BINARY_OUTPUT) class CatchCxxExceptionsTest(gtest_test_utils.TestCase): """Tests C++ exception-catching behavior. Tests in this test case verify that: * C++ exceptions are caught and logged as C++ (not SEH) exceptions * Exception thrown affect the remainder of the test work flow in the expected manner. """ def testCatchesCxxExceptionsInFixtureConstructor(self): self.assert_('C++ exception with description ' '"Standard C++ exception" thrown ' 'in the test fixture\'s constructor' in EX_BINARY_OUTPUT) self.assert_('unexpected' not in EX_BINARY_OUTPUT, 'This failure belongs in this test only if ' '"CxxExceptionInConstructorTest" (no quotes) ' 'appears on the same line as words "called unexpectedly"') def testCatchesCxxExceptionsInFixtureDestructor(self): self.assert_('C++ exception with description ' '"Standard C++ exception" thrown ' 'in the test fixture\'s destructor' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() ' 'called as expected.' in EX_BINARY_OUTPUT) def testCatchesCxxExceptionsInSetUpTestCase(self): self.assert_('C++ exception with description "Standard C++ exception"' ' thrown in SetUpTestCase()' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTestCaseTest constructor ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTestCaseTest destructor ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTestCaseTest test body ' 'called as expected.' in EX_BINARY_OUTPUT) def testCatchesCxxExceptionsInTearDownTestCase(self): self.assert_('C++ exception with description "Standard C++ exception"' ' thrown in TearDownTestCase()' in EX_BINARY_OUTPUT) def testCatchesCxxExceptionsInSetUp(self): self.assert_('C++ exception with description "Standard C++ exception"' ' thrown in SetUp()' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTest destructor ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInSetUpTest::TearDown() ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('unexpected' not in EX_BINARY_OUTPUT, 'This failure belongs in this test only if ' '"CxxExceptionInSetUpTest" (no quotes) ' 'appears on the same line as words "called unexpectedly"') def testCatchesCxxExceptionsInTearDown(self): self.assert_('C++ exception with description "Standard C++ exception"' ' thrown in TearDown()' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInTearDownTest destructor ' 'called as expected.' in EX_BINARY_OUTPUT) def testCatchesCxxExceptionsInTestBody(self): self.assert_('C++ exception with description "Standard C++ exception"' ' thrown in the test body' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInTestBodyTest destructor ' 'called as expected.' in EX_BINARY_OUTPUT) self.assert_('CxxExceptionInTestBodyTest::TearDown() ' 'called as expected.' in EX_BINARY_OUTPUT) def testCatchesNonStdCxxExceptions(self): self.assert_('Unknown C++ exception thrown in the test body' in EX_BINARY_OUTPUT) def testUnhandledCxxExceptionsAbortTheProgram(self): # Filters out SEH exception tests on Windows. Unhandled SEH exceptions # cause tests to show pop-up windows there. FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*' # By default, Google Test doesn't catch the exceptions. uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess( [EX_EXE_PATH, NO_CATCH_EXCEPTIONS_FLAG, FITLER_OUT_SEH_TESTS_FLAG]).output self.assert_('Unhandled C++ exception terminating the program' in uncaught_exceptions_ex_binary_output) self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output) if __name__ == '__main__': gtest_test_utils.Main()
mit
chouseknecht/ansible
lib/ansible/modules/cloud/packet/packet_device.py
18
21614
#!/usr/bin/python # (c) 2016, Tomas Karasek <tom.to.the.k@gmail.com> # (c) 2016, Matt Baldwin <baldwin@stackpointcloud.com> # (c) 2016, Thibaud Morel l'Horset <teebes@gmail.com> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: packet_device short_description: Manage a bare metal server in the Packet Host. description: - Manage a bare metal server in the Packet Host (a "device" in the API terms). - When the machine is created it can optionally wait for public IP address, or for active state. - This module has a dependency on packet >= 1.0. - API is documented at U(https://www.packet.net/developers/api/devices). version_added: "2.3" author: - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com> - Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com> - Thibaud Morel l'Horset (@teebes) <teebes@gmail.com> options: auth_token: description: - Packet api token. You can also supply it in env var C(PACKET_API_TOKEN). count: description: - The number of devices to create. Count number can be included in hostname via the %d string formatter. default: 1 count_offset: description: - From which number to start the count. default: 1 device_ids: description: - List of device IDs on which to operate. facility: description: - Facility slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/facilities/). features: description: - Dict with "features" for device creation. See Packet API docs for details. hostnames: description: - A hostname of a device, or a list of hostnames. - If given string or one-item list, you can use the C("%d") Python string format to expand numbers from I(count). - If only one hostname, it might be expanded to list if I(count)>1. aliases: [name] locked: description: - Whether to lock a created device. default: false version_added: "2.4" aliases: [lock] type: bool operating_system: description: - OS slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/operatingsystems/). plan: description: - Plan slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/plans/). project_id: description: - ID of project of the device. required: true state: description: - Desired state of the device. - If set to C(present) (the default), the module call will return immediately after the device-creating HTTP request successfully returns. - If set to C(active), the module call will block until all the specified devices are in state active due to the Packet API, or until I(wait_timeout). choices: [present, absent, active, inactive, rebooted] default: present user_data: description: - Userdata blob made available to the machine wait_for_public_IPv: description: - Whether to wait for the instance to be assigned a public IPv4/IPv6 address. - If set to 4, it will wait until IPv4 is assigned to the instance. - If set to 6, wait until public IPv6 is assigned to the instance. choices: [4,6] version_added: "2.4" wait_timeout: description: - How long (seconds) to wait either for automatic IP address assignment, or for the device to reach the C(active) I(state). - If I(wait_for_public_IPv) is set and I(state) is C(active), the module will wait for both events consequently, applying the timeout twice. default: 900 ipxe_script_url: description: - URL of custom iPXE script for provisioning. - More about custom iPXE for Packet devices at U(https://help.packet.net/technical/infrastructure/custom-ipxe). version_added: "2.4" always_pxe: description: - Persist PXE as the first boot option. - Normally, the PXE process happens only on the first boot. Set this arg to have your device continuously boot to iPXE. default: false version_added: "2.4" type: bool requirements: - "packet-python >= 1.35" notes: - Doesn't support check mode. ''' EXAMPLES = ''' # All the examples assume that you have your Packet api token in env var PACKET_API_TOKEN. # You can also pass it to the auth_token parameter of the module instead. # Creating devices - name: create 1 device hosts: localhost tasks: - packet_device: project_id: 89b497ee-5afc-420a-8fb5-56984898f4df hostnames: myserver operating_system: ubuntu_16_04 plan: baremetal_0 facility: sjc1 # Create the same device and wait until it is in state "active", (when it's # ready for other API operations). Fail if the devices in not "active" in # 10 minutes. - name: create device and wait up to 10 minutes for active state hosts: localhost tasks: - packet_device: project_id: 89b497ee-5afc-420a-8fb5-56984898f4df hostnames: myserver operating_system: ubuntu_16_04 plan: baremetal_0 facility: sjc1 state: active wait_timeout: 600 - name: create 3 ubuntu devices called server-01, server-02 and server-03 hosts: localhost tasks: - packet_device: project_id: 89b497ee-5afc-420a-8fb5-56984898f4df hostnames: server-%02d count: 3 operating_system: ubuntu_16_04 plan: baremetal_0 facility: sjc1 - name: Create 3 coreos devices with userdata, wait until they get IPs and then wait for SSH hosts: localhost tasks: - name: create 3 devices and register their facts packet_device: hostnames: [coreos-one, coreos-two, coreos-three] operating_system: coreos_stable plan: baremetal_0 facility: ewr1 locked: true project_id: 89b497ee-5afc-420a-8fb5-56984898f4df wait_for_public_IPv: 4 user_data: | #cloud-config ssh_authorized_keys: - {{ lookup('file', 'my_packet_sshkey') }} coreos: etcd: discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3 addr: $private_ipv4:4001 peer-addr: $private_ipv4:7001 fleet: public-ip: $private_ipv4 units: - name: etcd.service command: start - name: fleet.service command: start register: newhosts - name: wait for ssh wait_for: delay: 1 host: "{{ item.public_ipv4 }}" port: 22 state: started timeout: 500 with_items: "{{ newhosts.devices }}" # Other states of devices - name: remove 3 devices by uuid hosts: localhost tasks: - packet_device: project_id: 89b497ee-5afc-420a-8fb5-56984898f4df state: absent device_ids: - 1fb4faf8-a638-4ac7-8f47-86fe514c30d8 - 2eb4faf8-a638-4ac7-8f47-86fe514c3043 - 6bb4faf8-a638-4ac7-8f47-86fe514c301f ''' RETURN = ''' changed: description: True if a device was altered in any way (created, modified or removed) type: bool sample: True returned: success devices: description: Information about each device that was processed type: list sample: '[{"hostname": "my-server.com", "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7", "public_ipv4": "147.229.15.12", "private-ipv4": "10.0.15.12", "tags": [], "locked": false, "state": "provisioning", "public_ipv6": ""2604:1380:2:5200::3"}]' returned: success ''' # NOQA import os import re import time import uuid import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native HAS_PACKET_SDK = True try: import packet except ImportError: HAS_PACKET_SDK = False from ansible.module_utils.basic import AnsibleModule NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]') HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE) MAX_DEVICES = 100 PACKET_DEVICE_STATES = ( 'queued', 'provisioning', 'failed', 'powering_on', 'active', 'powering_off', 'inactive', 'rebooting', ) PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN" ALLOWED_STATES = ['absent', 'active', 'inactive', 'rebooted', 'present'] def serialize_device(device): """ Standard representation for a device as returned by various tasks:: { 'id': 'device_id' 'hostname': 'device_hostname', 'tags': [], 'locked': false, 'state': 'provisioning', 'ip_addresses': [ { "address": "147.75.194.227", "address_family": 4, "public": true }, { "address": "2604:1380:2:5200::3", "address_family": 6, "public": true }, { "address": "10.100.11.129", "address_family": 4, "public": false } ], "private_ipv4": "10.100.11.129", "public_ipv4": "147.75.194.227", "public_ipv6": "2604:1380:2:5200::3", } """ device_data = {} device_data['id'] = device.id device_data['hostname'] = device.hostname device_data['tags'] = device.tags device_data['locked'] = device.locked device_data['state'] = device.state device_data['ip_addresses'] = [ { 'address': addr_data['address'], 'address_family': addr_data['address_family'], 'public': addr_data['public'], } for addr_data in device.ip_addresses ] # Also include each IPs as a key for easier lookup in roles. # Key names: # - public_ipv4 # - public_ipv6 # - private_ipv4 # - private_ipv6 (if there is one) for ipdata in device_data['ip_addresses']: if ipdata['public']: if ipdata['address_family'] == 6: device_data['public_ipv6'] = ipdata['address'] elif ipdata['address_family'] == 4: device_data['public_ipv4'] = ipdata['address'] elif not ipdata['public']: if ipdata['address_family'] == 6: # Packet doesn't give public ipv6 yet, but maybe one # day they will device_data['private_ipv6'] = ipdata['address'] elif ipdata['address_family'] == 4: device_data['private_ipv4'] = ipdata['address'] return device_data def is_valid_hostname(hostname): return re.match(HOSTNAME_RE, hostname) is not None def is_valid_uuid(myuuid): try: val = uuid.UUID(myuuid, version=4) except ValueError: return False return str(val) == myuuid def listify_string_name_or_id(s): if ',' in s: return s.split(',') else: return [s] def get_hostname_list(module): # hostname is a list-typed param, so I guess it should return list # (and it does, in Ansible 2.2.1) but in order to be defensive, # I keep here the code to convert an eventual string to list hostnames = module.params.get('hostnames') count = module.params.get('count') count_offset = module.params.get('count_offset') if isinstance(hostnames, str): hostnames = listify_string_name_or_id(hostnames) if not isinstance(hostnames, list): raise Exception("name %s is not convertible to list" % hostnames) # at this point, hostnames is a list hostnames = [h.strip() for h in hostnames] if (len(hostnames) > 1) and (count > 1): _msg = ("If you set count>1, you should only specify one hostname " "with the %d formatter, not a list of hostnames.") raise Exception(_msg) if (len(hostnames) == 1) and (count > 0): hostname_spec = hostnames[0] count_range = range(count_offset, count_offset + count) if re.search(r"%\d{0,2}d", hostname_spec): hostnames = [hostname_spec % i for i in count_range] elif count > 1: hostname_spec = '%s%%02d' % hostname_spec hostnames = [hostname_spec % i for i in count_range] for hn in hostnames: if not is_valid_hostname(hn): raise Exception("Hostname '%s' does not seem to be valid" % hn) if len(hostnames) > MAX_DEVICES: raise Exception("You specified too many hostnames, max is %d" % MAX_DEVICES) return hostnames def get_device_id_list(module): device_ids = module.params.get('device_ids') if isinstance(device_ids, str): device_ids = listify_string_name_or_id(device_ids) device_ids = [di.strip() for di in device_ids] for di in device_ids: if not is_valid_uuid(di): raise Exception("Device ID '%s' does not seem to be valid" % di) if len(device_ids) > MAX_DEVICES: raise Exception("You specified too many devices, max is %d" % MAX_DEVICES) return device_ids def create_single_device(module, packet_conn, hostname): for param in ('hostnames', 'operating_system', 'plan'): if not module.params.get(param): raise Exception("%s parameter is required for new device." % param) project_id = module.params.get('project_id') plan = module.params.get('plan') user_data = module.params.get('user_data') facility = module.params.get('facility') operating_system = module.params.get('operating_system') locked = module.params.get('locked') ipxe_script_url = module.params.get('ipxe_script_url') always_pxe = module.params.get('always_pxe') if operating_system != 'custom_ipxe': for param in ('ipxe_script_url', 'always_pxe'): if module.params.get(param): raise Exception('%s parameter is not valid for non custom_ipxe operating_system.' % param) device = packet_conn.create_device( project_id=project_id, hostname=hostname, plan=plan, facility=facility, operating_system=operating_system, userdata=user_data, locked=locked, ipxe_script_url=ipxe_script_url, always_pxe=always_pxe) return device def refresh_device_list(module, packet_conn, devices): device_ids = [d.id for d in devices] new_device_list = get_existing_devices(module, packet_conn) return [d for d in new_device_list if d.id in device_ids] def wait_for_devices_active(module, packet_conn, watched_devices): wait_timeout = module.params.get('wait_timeout') wait_timeout = time.time() + wait_timeout refreshed = watched_devices while wait_timeout > time.time(): refreshed = refresh_device_list(module, packet_conn, watched_devices) if all(d.state == 'active' for d in refreshed): return refreshed time.sleep(5) raise Exception("Waiting for state \"active\" timed out for devices: %s" % [d.hostname for d in refreshed if d.state != "active"]) def wait_for_public_IPv(module, packet_conn, created_devices): def has_public_ip(addr_list, ip_v): return any([a['public'] and a['address_family'] == ip_v and a['address'] for a in addr_list]) def all_have_public_ip(ds, ip_v): return all([has_public_ip(d.ip_addresses, ip_v) for d in ds]) address_family = module.params.get('wait_for_public_IPv') wait_timeout = module.params.get('wait_timeout') wait_timeout = time.time() + wait_timeout while wait_timeout > time.time(): refreshed = refresh_device_list(module, packet_conn, created_devices) if all_have_public_ip(refreshed, address_family): return refreshed time.sleep(5) raise Exception("Waiting for IPv%d address timed out. Hostnames: %s" % (address_family, [d.hostname for d in created_devices])) def get_existing_devices(module, packet_conn): project_id = module.params.get('project_id') return packet_conn.list_devices( project_id, params={ 'per_page': MAX_DEVICES}) def get_specified_device_identifiers(module): if module.params.get('device_ids'): device_id_list = get_device_id_list(module) return {'ids': device_id_list, 'hostnames': []} elif module.params.get('hostnames'): hostname_list = get_hostname_list(module) return {'hostnames': hostname_list, 'ids': []} def act_on_devices(module, packet_conn, target_state): specified_identifiers = get_specified_device_identifiers(module) existing_devices = get_existing_devices(module, packet_conn) changed = False create_hostnames = [] if target_state in ['present', 'active', 'rebooted']: # states where we might create non-existing specified devices existing_devices_names = [ed.hostname for ed in existing_devices] create_hostnames = [hn for hn in specified_identifiers['hostnames'] if hn not in existing_devices_names] process_devices = [d for d in existing_devices if (d.id in specified_identifiers['ids']) or (d.hostname in specified_identifiers['hostnames'])] if target_state != 'present': _absent_state_map = {} for s in PACKET_DEVICE_STATES: _absent_state_map[s] = packet.Device.delete state_map = { 'absent': _absent_state_map, 'active': {'inactive': packet.Device.power_on, 'provisioning': None, 'rebooting': None }, 'inactive': {'active': packet.Device.power_off}, 'rebooted': {'active': packet.Device.reboot, 'inactive': packet.Device.power_on, 'provisioning': None, 'rebooting': None }, } # First do non-creation actions, it might be faster for d in process_devices: if d.state == target_state: continue if d.state in state_map[target_state]: api_operation = state_map[target_state].get(d.state) if api_operation is not None: api_operation(d) changed = True else: _msg = ( "I don't know how to process existing device %s from state %s " "to state %s" % (d.hostname, d.state, target_state)) raise Exception(_msg) # At last create missing devices created_devices = [] if create_hostnames: created_devices = [create_single_device(module, packet_conn, n) for n in create_hostnames] if module.params.get('wait_for_public_IPv'): created_devices = wait_for_public_IPv( module, packet_conn, created_devices) changed = True processed_devices = created_devices + process_devices if target_state == 'active': processed_devices = wait_for_devices_active( module, packet_conn, processed_devices) return { 'changed': changed, 'devices': [serialize_device(d) for d in processed_devices] } def main(): module = AnsibleModule( argument_spec=dict( auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR), no_log=True), count=dict(type='int', default=1), count_offset=dict(type='int', default=1), device_ids=dict(type='list'), facility=dict(), features=dict(type='dict'), hostnames=dict(type='list', aliases=['name']), locked=dict(type='bool', default=False, aliases=['lock']), operating_system=dict(), plan=dict(), project_id=dict(required=True), state=dict(choices=ALLOWED_STATES, default='present'), user_data=dict(default=None), wait_for_public_IPv=dict(type='int', choices=[4, 6]), wait_timeout=dict(type='int', default=900), ipxe_script_url=dict(default=''), always_pxe=dict(type='bool', default=False), ), required_one_of=[('device_ids', 'hostnames',)], mutually_exclusive=[ ('hostnames', 'device_ids'), ('count', 'device_ids'), ('count_offset', 'device_ids'), ] ) if not HAS_PACKET_SDK: module.fail_json(msg='packet required for this module') if not module.params.get('auth_token'): _fail_msg = ("if Packet API token is not in environment variable %s, " "the auth_token parameter is required" % PACKET_API_TOKEN_ENV_VAR) module.fail_json(msg=_fail_msg) auth_token = module.params.get('auth_token') packet_conn = packet.Manager(auth_token=auth_token) state = module.params.get('state') try: module.exit_json(**act_on_devices(module, packet_conn, state)) except Exception as e: module.fail_json(msg='failed to set device state %s, error: %s' % (state, to_native(e)), exception=traceback.format_exc()) if __name__ == '__main__': main()
gpl-3.0
shifter/grr
checks/stat_test.py
6
7329
#!/usr/bin/env python # -*- coding: utf-8 -*- """Tests for stat checks.""" from grr.lib import flags from grr.lib import test_lib from grr.lib.checks import checks_test_lib class StatOnlyTests(checks_test_lib.HostCheckTest): @classmethod def setUpClass(cls): cls.LoadCheck("stat.yaml") def testRootPATHCheck(self): """Ensure root $PATH check detects files that non-root users can edit.""" data = [self.CreateStat("/usr/local/bin/hit-123", 50, 0, 0o0100640), self.CreateStat("/usr/local/bin/no-hit-123", 0, 6000, 0o0100440), self.CreateStat("/usr/local/bin/no-hit-234", 0, 0, 0o0100640), self.CreateStat("/usr/local/bin/hit-345", 70, 0, 0o0100660), self.CreateStat("/bin/hit-symlink-567", 70, 0, 0o0120777), self.CreateStat("/bin/no-hit-symlink-456", 0, 0, 0o0120777)] results = self.GenResults(["RootEnvPath"], [data]) check_id = "CIS-ROOT-PATH-HAS-FILES-WRITABLE-BY-NON-ROOT" sym = ("Found: Files in default $PATH of root can be modified " "by non-privileged users.") found = ["/usr/local/bin/hit-123 user: 50, group: 0, mode: -rw-r-----", "/usr/local/bin/hit-345 user: 70, group: 0, mode: -rw-rw----", "/bin/hit-symlink-567 user: 70, group: 0, mode: lrwxrwxrwx"] self.assertCheckDetectedAnom(check_id, results, sym, found) def testRootPATHDirCheck(self): """Ensure root $PATH directory entries are editable only by root.""" data = [ # Bad cases: # Non-root group ownership & permissions. self.CreateStat("/usr/local/bin", 0, 60, 0o0040775), # File & Non-root owner. self.CreateStat("/bin", 70, 0, 0o0100660), # A non-root symlink. self.CreateStat("/usr/local/sbin", 1, 0, 0o0120777), # File not owned by root but has no write permissions. self.CreateStat("/sbin", 1, 0, 0o0100400), # Fully root owned dir, but world writable. self.CreateStat("/usr", 0, 0, 0o0040666), # Safe cases: self.CreateStat("/usr/local", 0, 0, 0o0040755), # Root owned directory. self.CreateStat("/usr/bin", 0, 0, 0o0120777), # Root owned symlink. self.CreateStat("/usr/sbin", 0, 0, 0o0100775)] # Root owned file. results = self.GenResults(["RootEnvPathDirs"], [data]) check_id = "CIS-ROOT-PATH-HAS-FOLDERS-WRITABLE-BY-NON-ROOT" sym = ("Found: Folders that comprise the default $PATH of root can be " "modified by non-privileged users.") found = ["/usr/local/bin user: 0, group: 60, mode: drwxrwxr-x", "/bin user: 70, group: 0, mode: -rw-rw----", "/usr/local/sbin user: 1, group: 0, mode: lrwxrwxrwx", "/sbin user: 1, group: 0, mode: -r--------", "/usr user: 0, group: 0, mode: drw-rw-rw-"] self.assertCheckDetectedAnom(check_id, results, sym, found) def testUserHomeDirCheck(self): """Ensure user home dir check detect folders modifiable by non-owners.""" data = [self.CreateStat("/root", 0, 0, 0o0040750), self.CreateStat("/home/non-matching-user1", 1000, 600, 0o0040700), self.CreateStat("/home/user2", 200, 60, 0o0040770), self.CreateStat("/home/user3", 300, 70, 0o0040777), self.CreateStat("/home/user4", 400, 70, 0o0040760), self.CreateStat("/home/non-matching-user2", 500, 80, 0o0040755), self.CreateStat("/home/non-matching-user3", 2000, 800, 0o0040750), self.CreateStat("/home/non-matching-user4", 6000, 600, 0o0040751), self.CreateStat("/home/user8", 700, 70, 0o0040752)] results = self.GenResults(["UserHomeDirs"], [data]) check_id = "CIS-HOME-DIRS-WRITABLE-BY-NON-OWNERS" sym = ("Found: User home directory can be written to by " "group or others.") found = ["/home/user2 user: 200, group: 60, mode: drwxrwx---", "/home/user3 user: 300, group: 70, mode: drwxrwxrwx", "/home/user4 user: 400, group: 70, mode: drwxrw----", "/home/user8 user: 700, group: 70, mode: drwxr-x-w-",] self.assertCheckDetectedAnom(check_id, results, sym, found) def testUserDotFilesCheck(self): """Ensure user dot files check detects files that are world writable.""" data = [self.CreateStat("/root/.bash_history", 0, 0, 0o0100755), self.CreateStat("/root/.bash_logout", 0, 0, 0o0100775), self.CreateStat("/root/.bashrc", 0, 0, 0o0100772), # match self.CreateStat("/root/.gitconfig", 0, 0, 0o0100773), # match self.CreateStat("/home/user/.mozilla", 100, 70, 0o0100755), self.CreateStat("/home/user/.vim", 100, 70, 0o0040777), # match self.CreateStat("/home/user/.netrc", 100, 70, 0o0100664)] results = self.GenResults(["UserDotFiles"], [data]) check_id = "CIS-USER-DOT-FILES-DIRS-WORLD-WRITABLE" sym = ("Found: Dot files or folders in user home directory are world " "writable.") found = ["/root/.bashrc user: 0, group: 0, mode: -rwxrwx-w-", "/root/.gitconfig user: 0, group: 0, mode: -rwxrwx-wx", "/home/user/.vim user: 100, group: 70, mode: drwxrwxrwx",] self.assertCheckDetectedAnom(check_id, results, sym, found) check_id = "CIS-DOT-NETRC-FILE-EXISTS" sym = "Found: The .netrc file exists in a user's home directory." found = ["/home/user/.netrc user: 100, group: 70, mode: -rw-rw-r--"] self.assertCheckDetectedAnom(check_id, results, sym, found) def testLogFilesCheck(self): """Ensure log files check detects files modifiable by non-root.""" data = [self.CreateStat("/var/log/syslog", 0, 0, 0o0100666), self.CreateStat("/var/log/auth.log.1", 0, 4, 0o0100774), self.CreateStat("/var/log/debug.1.gz", 10, 0, 0o0100774), self.CreateStat("/var/log/mail.log", 0, 2, 0o0100770), self.CreateStat("/var/log/user.log.1.gz", 0, 4, 0o0100642), self.CreateStat("/var/log/puppet/mail.log", 30, 70, 0o0100777), self.CreateStat("/var/log/dpkg.log", 200, 70, 0o0100664)] results = self.GenResults(["LinuxLogFiles"], [data]) check_id = "CIS-LOG-FILES-PERMISSIONS-WRONG-OWNER" sym = "Found: Vital system log files have wrong owner." found = ["/var/log/debug.1.gz user: 10, group: 0, mode: -rwxrwxr--", "/var/log/dpkg.log user: 200, group: 70, mode: -rw-rw-r--"] self.assertCheckDetectedAnom(check_id, results, sym, found) check_id = "CIS-LOG-FILES-PERMISSIONS-WRONG-GROUP" sym = "Found: Vital system log files have wrong group." found = ["/var/log/mail.log user: 0, group: 2, mode: -rwxrwx---", "/var/log/dpkg.log user: 200, group: 70, mode: -rw-rw-r--"] self.assertCheckDetectedAnom(check_id, results, sym, found) check_id = "CIS-LOG-FILES-PERMISSIONS-WORLD-WRITABLE" sym = "Found: Log files are world writable." found = ["/var/log/syslog user: 0, group: 0, mode: -rw-rw-rw-", "/var/log/puppet/mail.log user: 30, group: 70, mode: -rwxrwxrwx", "/var/log/user.log.1.gz user: 0, group: 4, mode: -rw-r---w-"] self.assertCheckDetectedAnom(check_id, results, sym, found) def main(argv): test_lib.GrrTestProgram(argv=argv) if __name__ == "__main__": flags.StartMain(main)
apache-2.0
vinegret/youtube-dl
youtube_dl/extractor/lrt.py
67
3234
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, parse_duration, remove_end, ) class LRTIE(InfoExtractor): IE_NAME = 'lrt.lt' _VALID_URL = r'https?://(?:www\.)?lrt\.lt/mediateka/irasas/(?P<id>[0-9]+)' _TESTS = [{ # m3u8 download 'url': 'http://www.lrt.lt/mediateka/irasas/54391/', 'md5': 'fe44cf7e4ab3198055f2c598fc175cb0', 'info_dict': { 'id': '54391', 'ext': 'mp4', 'title': 'Septynios Kauno dienos', 'description': 'md5:24d84534c7dc76581e59f5689462411a', 'duration': 1783, 'view_count': int, 'like_count': int, }, }, { # direct mp3 download 'url': 'http://www.lrt.lt/mediateka/irasas/1013074524/', 'md5': '389da8ca3cad0f51d12bed0c844f6a0a', 'info_dict': { 'id': '1013074524', 'ext': 'mp3', 'title': 'Kita tema 2016-09-05 15:05', 'description': 'md5:1b295a8fc7219ed0d543fc228c931fb5', 'duration': 3008, 'view_count': int, 'like_count': int, }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = remove_end(self._og_search_title(webpage), ' - LRT') formats = [] for _, file_url in re.findall( r'file\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage): ext = determine_ext(file_url) if ext not in ('m3u8', 'mp3'): continue # mp3 served as m3u8 produces stuttered media file if ext == 'm3u8' and '.mp3' in file_url: continue if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( file_url, video_id, 'mp4', entry_protocol='m3u8_native', fatal=False)) elif ext == 'mp3': formats.append({ 'url': file_url, 'vcodec': 'none', }) self._sort_formats(formats) thumbnail = self._og_search_thumbnail(webpage) description = self._og_search_description(webpage) duration = parse_duration(self._search_regex( r'var\s+record_len\s*=\s*(["\'])(?P<duration>[0-9]+:[0-9]+:[0-9]+)\1', webpage, 'duration', default=None, group='duration')) view_count = int_or_none(self._html_search_regex( r'<div[^>]+class=(["\']).*?record-desc-seen.*?\1[^>]*>(?P<count>.+?)</div>', webpage, 'view count', fatal=False, group='count')) like_count = int_or_none(self._search_regex( r'<span[^>]+id=(["\'])flikesCount.*?\1>(?P<count>\d+)<', webpage, 'like count', fatal=False, group='count')) return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnail': thumbnail, 'description': description, 'duration': duration, 'view_count': view_count, 'like_count': like_count, }
unlicense
Pavaka/Pygorithms
tests/KP_input_chekcer_test.py
1
3090
import unittest import sys import os path = os.path.abspath("../input_checkers") sys.path.append(path) from KP_input_chekcer import * class TestKPInputChecker(unittest.TestCase): def test_negative_capacity(self): items = [(3, 4), (2, 3), (4, 2), (4, 3)] capacity = -6 with self.assertRaises(NegativeCapacityError): check_input_data(items, capacity) def test_item_with_non_pisitive_weight(self): items = [(3, 4), (2, 3), (4, -2), (4, -3)] capacity = 6 with self.assertRaises(ItemWithNegativeWeightError): check_input_data(items, capacity) def test_capacity_NaN(self): items = [(3, 4), (2, 3), (4, 2), (4, 3)] capacity = "Error" with self.assertRaises(CapacityNotAnIntegerError): check_input_data(items, capacity) def test_weight_NaN(self): items = [(3, "four"), (2, 3), (4, 2), (4, 3)] capacity = 6 with self.assertRaises(InvalidItemError): check_input_data(items, capacity) def test_value_NaN(self): items = [(3, 4), ("two", 3), (4, 2), (4, 3)] capacity = 6 with self.assertRaises(InvalidItemError): check_input_data(items, capacity) def test_items_not_a_list(self): items = 3 capacity = 6 with self.assertRaises(ItemsNotAListOrTupleError): check_input_data(items, capacity) def test_item_with_non_positive_value(self): items = [(3, 4), (-2, 3), (4, 2), (4, 3)] capacity = 5 with self.assertRaises( ItemWithNegativeValueError): check_input_data(items, capacity) def test_item_three_tuple(self): items = [(3, 4), (2, 3), (4, 2), (4, 3, 7)] capacity = 6 with self.assertRaises(InvalidItemError): check_input_data(items, capacity) def test_negative_value_item_error(self): items = [(45, 3), (30, 5), (-45, 9), (10, 5)] capacity = 1 with self.assertRaises( ItemWithNegativeValueError): check_input_data(items, capacity) def test_negative_weight_item_error(self): items = [(45, 3), (30, 5), (45, 9), (10, -5)] capacity = 1 with self.assertRaises( ItemWithNegativeWeightError): check_input_data(items, capacity) def test_invalid_items_exmp_1(self): items = "Items" capacity = 1 with self.assertRaises(ItemsNotAListOrTupleError): check_input_data(items, capacity) def test_invalid_items_exmp_2(self): items = [(45, 3), 3, (45, 9), (10, 5)] capacity = 1 with self.assertRaises(InvalidItemError): check_input_data(items, capacity) def test_invalid_capacity(self): items = [(45, 3), 3, (45, 9), (10, 5)] capacity = "This is not capacity" with self.assertRaises(CapacityNotAnIntegerError): check_input_data(items, capacity) if __name__ == '__main__': unittest.main()
gpl-2.0
googleapis/python-aiplatform
schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py
3
3481
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore __protobuf__ = proto.module( package='google.cloud.aiplatform.v1beta1.schema.predict.params', manifest={ 'VideoClassificationPredictionParams', }, ) class VideoClassificationPredictionParams(proto.Message): r"""Prediction model parameters for Video Classification. Attributes: confidence_threshold (float): The Model only returns predictions with at least this confidence score. Default value is 0.0 max_predictions (int): The Model only returns up to that many top, by confidence score, predictions per instance. If this number is very high, the Model may return fewer predictions. Default value is 10,000. segment_classification (bool): Set to true to request segment-level classification. AI Platform returns labels and their confidence scores for the entire time segment of the video that user specified in the input instance. Default value is true shot_classification (bool): Set to true to request shot-level classification. AI Platform determines the boundaries for each camera shot in the entire time segment of the video that user specified in the input instance. AI Platform then returns labels and their confidence scores for each detected shot, along with the start and end time of the shot. WARNING: Model evaluation is not done for this classification type, the quality of it depends on the training data, but there are no metrics provided to describe that quality. Default value is false one_sec_interval_classification (bool): Set to true to request classification for a video at one-second intervals. AI Platform returns labels and their confidence scores for each second of the entire time segment of the video that user specified in the input WARNING: Model evaluation is not done for this classification type, the quality of it depends on the training data, but there are no metrics provided to describe that quality. Default value is false """ confidence_threshold = proto.Field( proto.FLOAT, number=1, ) max_predictions = proto.Field( proto.INT32, number=2, ) segment_classification = proto.Field( proto.BOOL, number=3, ) shot_classification = proto.Field( proto.BOOL, number=4, ) one_sec_interval_classification = proto.Field( proto.BOOL, number=5, ) __all__ = tuple(sorted(__protobuf__.manifest))
apache-2.0
kustodian/ansible
lib/ansible/modules/cloud/google/gce_snapshot.py
29
6899
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gce_snapshot version_added: "2.3" short_description: Create or destroy snapshots for GCE storage volumes description: - Manages snapshots for GCE instances. This module manages snapshots for the storage volumes of a GCE compute instance. If there are multiple volumes, each snapshot will be prepended with the disk name options: instance_name: description: - The GCE instance to snapshot required: True snapshot_name: description: - The name of the snapshot to manage disks: description: - A list of disks to create snapshots for. If none is provided, all of the volumes will be snapshotted default: all required: False state: description: - Whether a snapshot should be C(present) or C(absent) required: false default: present choices: [present, absent] service_account_email: description: - GCP service account email for the project where the instance resides required: true credentials_file: description: - The path to the credentials file associated with the service account required: true project_id: description: - The GCP project ID to use required: true requirements: - "python >= 2.6" - "apache-libcloud >= 0.19.0" author: Rob Wagner (@robwagner33) ''' EXAMPLES = ''' - name: Create gce snapshot gce_snapshot: instance_name: example-instance snapshot_name: example-snapshot state: present service_account_email: project_name@appspot.gserviceaccount.com credentials_file: /path/to/credentials project_id: project_name delegate_to: localhost - name: Delete gce snapshot gce_snapshot: instance_name: example-instance snapshot_name: example-snapshot state: absent service_account_email: project_name@appspot.gserviceaccount.com credentials_file: /path/to/credentials project_id: project_name delegate_to: localhost # This example creates snapshots for only two of the available disks as # disk0-example-snapshot and disk1-example-snapshot - name: Create snapshots of specific disks gce_snapshot: instance_name: example-instance snapshot_name: example-snapshot state: present disks: - disk0 - disk1 service_account_email: project_name@appspot.gserviceaccount.com credentials_file: /path/to/credentials project_id: project_name delegate_to: localhost ''' RETURN = ''' snapshots_created: description: List of newly created snapshots returned: When snapshots are created type: list sample: "[disk0-example-snapshot, disk1-example-snapshot]" snapshots_deleted: description: List of destroyed snapshots returned: When snapshots are deleted type: list sample: "[disk0-example-snapshot, disk1-example-snapshot]" snapshots_existing: description: List of snapshots that already existed (no-op) returned: When snapshots were already present type: list sample: "[disk0-example-snapshot, disk1-example-snapshot]" snapshots_absent: description: List of snapshots that were already absent (no-op) returned: When snapshots were already absent type: list sample: "[disk0-example-snapshot, disk1-example-snapshot]" ''' try: from libcloud.compute.types import Provider _ = Provider.GCE HAS_LIBCLOUD = True except ImportError: HAS_LIBCLOUD = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.gce import gce_connect def find_snapshot(volume, name): ''' Check if there is a snapshot already created with the given name for the passed in volume. Args: volume: A gce StorageVolume object to manage name: The name of the snapshot to look for Returns: The VolumeSnapshot object if one is found ''' found_snapshot = None snapshots = volume.list_snapshots() for snapshot in snapshots: if name == snapshot.name: found_snapshot = snapshot return found_snapshot def main(): module = AnsibleModule( argument_spec=dict( instance_name=dict(required=True), snapshot_name=dict(required=True), state=dict(choices=['present', 'absent'], default='present'), disks=dict(default=None, type='list'), service_account_email=dict(type='str'), credentials_file=dict(type='path'), project_id=dict(type='str') ) ) if not HAS_LIBCLOUD: module.fail_json(msg='libcloud with GCE support (0.19.0+) is required for this module') gce = gce_connect(module) instance_name = module.params.get('instance_name') snapshot_name = module.params.get('snapshot_name') disks = module.params.get('disks') state = module.params.get('state') json_output = dict( changed=False, snapshots_created=[], snapshots_deleted=[], snapshots_existing=[], snapshots_absent=[] ) snapshot = None instance = gce.ex_get_node(instance_name, 'all') instance_disks = instance.extra['disks'] for instance_disk in instance_disks: disk_snapshot_name = snapshot_name disk_info = gce._get_components_from_path(instance_disk['source']) device_name = disk_info['name'] device_zone = disk_info['zone'] if disks is None or device_name in disks: volume_obj = gce.ex_get_volume(device_name, device_zone) # If we have more than one disk to snapshot, prepend the disk name if len(instance_disks) > 1: disk_snapshot_name = device_name + "-" + disk_snapshot_name snapshot = find_snapshot(volume_obj, disk_snapshot_name) if snapshot and state == 'present': json_output['snapshots_existing'].append(disk_snapshot_name) elif snapshot and state == 'absent': snapshot.destroy() json_output['changed'] = True json_output['snapshots_deleted'].append(disk_snapshot_name) elif not snapshot and state == 'present': volume_obj.snapshot(disk_snapshot_name) json_output['changed'] = True json_output['snapshots_created'].append(disk_snapshot_name) elif not snapshot and state == 'absent': json_output['snapshots_absent'].append(disk_snapshot_name) module.exit_json(**json_output) if __name__ == '__main__': main()
gpl-3.0
kagel/foobnix
foobnix/playlists/m3u_reader.py
2
3221
''' Created on Apr 26, 2013 @author: dimitry ''' import logging import os.path from foobnix.gui.model import FModel from foobnix.util.file_utils import get_file_extension class M3UReader: def __init__(self, path): self.path = path try: self.m3u = open(unicode(path)) except Exception as e: logging.error(str(e)) self.m3u = None def get_common_beans(self): paths_and_texts = self.parse() if not paths_and_texts: return [] beans = [FModel(path=path_and_text[0], text=path_and_text[1]) .add_is_file(True) for path_and_text in paths_and_texts] return beans def parse(self): try: if not self.m3u: return lines = self.m3u.readlines() paths = [os.path.normpath(line).strip('\r\n') for line in lines if line.startswith("##") or not line.startswith("#")] dirname = os.path.dirname(self.path) full_paths = [] paths = iter(paths) for path in paths: text = None if path.startswith("##"): def task(path): text = path[2 : ] try: next_path = paths.next() path = next_path if not next_path.startswith("##") else None except StopIteration: path = None next_path = None if not path: full_paths.append( [path, text.strip('\r\n')] ) if next_path: path, text = task(next_path) return path, text path, text = task(path) if not path: break if text: text = text.strip('\r\n') else: new_text = path.rsplit('/', 1)[-1] if path == new_text: text = path.rsplit('\\', 1)[-1] else: text = new_text if (path in "\\/"): full_paths.append( [path.replace("\\", "/"), text] ) elif path.startswith('http'): if not text: text = path.rsplit('/', 1)[-1] full_paths.append( [path.replace('/', '//', 1), text] ) else: full_paths.append([os.path.join(dirname, path).replace("\\", "/"), text] ) return full_paths except IndexError: logging.warn("You try to load empty playlist") def update_id3_for_m3u(beans): result = [] for bean in beans: if bean.path and get_file_extension(bean.path) in [".m3u", ".m3u8"]: reader = M3UReader(bean.path) m3u_beans = reader.get_common_beans() for bean in m3u_beans: result.append(bean) else: result.append(bean) return result
gpl-3.0
eddiemonroe/opencog
examples/python/untested/viz_graph.py
47
12766
## # @file viz_graph.py # @brief # @author Dingjie.Wang # @version 1.0 # @date 2012-07-31 import networkx as nx from collections import defaultdict from m_util import log class Tree(object): """docstring for Tree""" def __init__(self, op, children = []): assert type(op) != type(None) self._op = op self._children = children def is_leaf(self): '''docstring for is_leaf''' return True if self._children else False def get_op(self): return self._op def set_op(self, value): self._op = value op = property(get_op, set_op) def get_children(self): return self._children def set_children(self, value): self._children = value children = property(get_children, set_children) def __str__(self): if self.is_leaf(): return str(self.op) else: return '(' + str(self.op) + ' '+ ' '.join(map(str, self.children)) + ')' def __repr__(self): return str(self) #def trees_to_forest( trees ): #'''docstring for trees_to_forest''' #assert type(trees) == list #return Tree("forest", trees) def tree_to_viz_graphic(tree, graph): ''' transfer a simpler and more efficient tree StructureNode to Viz_Graph for visualisation purpose ''' if tree.children: # inner node assert isinstance(tree.op, str) for i, child in enumerate(tree.children): # make name of tree node unique child_name = graph.unique_id(child.op) child.op = child_name child_name = tree_to_viz_graphic(child, graph) graph.add_edge(tree.op, child_name, order = i) return tree.op else: # leaf node return tree.op import pygephi class Gephi_Output: def __init__(self): self.gephi = pygephi.JSONClient('http://localhost:8080/workspace0', autoflush=True) self.gephi.clean() self.default_node_attr = {'size':10, 'r':0.0, 'g':0.0, 'b':1.0, 'x':1} self.default_edge_attr = { } #def start(self): #pass #def stop(self): #pass def write(self, filename = None): pass def output_node(self, node_id, **attr): if attr: self.gephi.add_node(str(node_id), label=attr.get('label', None), **attr) else: self.gephi.add_node(str(node_id), label=attr.get('label', None), **self.default_node_attr) def output_edge(self, source, target, **attr): edge_id = attr.get('edge_id',None) assert edge_id self.gephi.add_edge(str(edge_id), source, target, attr.get("directed", None), label = attr.get("label", None)) class Dotty_Output(object): """docstring for Dot_output""" def __init__(self): self.body = "" def output_node(self, node_id, **attr): '''docstring for output_node''' line = '"%s" '% str(node_id) if attr: line += "[%s]" str_attr = "" try: str_attr += "color=%s," % attr['color'] except Exception: pass try: str_attr += "shape=%s," % attr['shape'] except Exception: pass try: str_attr += "style=%s," % attr['style'] except Exception: pass str_attr = str_attr.strip(',') line = line % str_attr self.body += line + ";\n" def output_edge(self, source, target, **attr): line = '"%s" -> "%s" ' %(str(source), str(target)) if attr: line += "[%s]" str_attr = "" try: str_attr += "color=%s," % attr['color'] except Exception: pass try: str_attr += "color=%s," % attr['attr']['color'] except Exception: pass try: str_attr += "shape=%s," % attr['shape'] except Exception: pass try: str_attr += "shape=%s," % attr['attr']['shape'] except Exception: pass try: str_attr += "style=%s," % attr['style'] except Exception: pass try: str_attr += 'label="%s",' % attr['attr']['style'] except Exception: pass try: str_attr += 'label="%s",' % attr['order'] except Exception: pass try: str_attr += 'label="%s",' % attr['attr']['order'] except Exception: pass str_attr = str_attr.strip(',') line = line % str_attr self.body += line + ";\n" def write(self, filename): '''docstring for write''' try: f = open(filename,'w') content = ''' digraph visualisation{ node[style = filled] %s } ''' content = content % self.body f.write(content) except IOError: log.error("can't write dot file: %s" % filename) finally: f.close() class Viz_Graph(object): """ a wrapper to networkx, which work as a graph drawer""" def __init__(self, viz = Dotty_Output()): self._nx_graph = nx.DiGraph() self.viz = viz self.no_nodes = defaultdict(int) def add_edge(self, source, target, **attr): self._nx_graph.add_edge(str(source), str(target)) if attr: self.set_edge_attr(str(source), str(target), **attr) # should use carefully def add_edge_unique(self, source, target, **attr): '''docstring for add_edge_unique''' source = self.unique_id(source) target = self.unique_id(target) self.add_edge(source, target, **attr) # should use carefully, help to get a unique node id # when display link.type_name, it will make add an number behind to make it unique def unique_id(self, node): ''' supposed to added this node later''' node = str(node) self.no_nodes[node] += 1 no_node = self.no_nodes[node] if no_node > 1: # have node with id source already, make it unique node = node + "[%s]" % str(no_node) return node def reset_unique(self): '''docstring for reset_unique_no''' self.no_nodes.clear() def add_node(self, node_id, **attr): self._nx_graph.add_node(str(node_id)) for key, value in attr.items(): self._nx_graph.node[str(node_id)][key] = value def output_node(self, node_id): '''docstring for output_node''' assert self.viz and callable(getattr(self.viz, "output_node")) self.viz.output_node(str(node_id)) def output_edge(self, edge_id, source, target): '''docstring for output_edge''' assert self.viz and callable(getattr(self.viz, "output_edge")) self.viz.output_edge(str(edge_id), str(source), str(target)) def neighbors(self, node_id): '''return a list of node's neighbors''' return self._nx_graph.neighbors(str(node_id)) def set_node_attr(self, node_id, **attr): for key, value in attr.items(): self._nx_graph.node[str(node_id)][key] = value def get_node_attr(self, node_id): return self._nx_graph.node[str(node_id)] def set_edge_attr(self, source, target, **attr): for key, value in attr.items(): self._nx_graph[str(source)][str(target)][key] = value def get_edge_attr(self, source, target): #return self._nx_graph.node[id] return self._nx_graph[str(source)][str(target)] def number_of_nodes(self): '''docstring for number_of_nodes''' return self._nx_graph.number_of_nodes() def write(self, filename): """ draw the graph""" assert self.viz # output nodes for node in self._nx_graph.nodes(): attr_dict = self._nx_graph.node[str(node)] self.viz.output_node(node, attr = attr_dict) # output edges for edge in self._nx_graph.edges(): attr_dict = self._nx_graph.edge[edge[0]][edge[1]] self.viz.output_edge(edge[0], edge[1], attr = attr_dict) self.viz.write(filename) #def write_json(self, root,parent = None): #""" write to a javascript library""" #data = '''{ #"id": "%s", #"name" : "%s", #"data": { #"band": "%s", #"relation": "member of band" #}, #"children":[%s] } ''' #children = "" #for child in self._nx_graph.neighbors(root): #str_child = self.write_json(child,root) #if str_child: #temp = "%s," %(str_child) #children += temp #if children: #children = children[0:len(children) - 1 ] #return data %(root,root,parent,children) def clear(self): """docstring for clear""" self._nx_graph.clear() self.reset_unique() class Graph_Abserver(object): """ abstract class that help to abserve the graph according to the given filter imfo""" def __init__(self, source, e_types, n_types, inheritance = True): self.source = source self.valid_edge_types = e_types self.valid_node_types = n_types self.graph = Viz_Graph() self.inheritance = inheritance def write(self, filename): '''docstring for write_dot''' self.graph.write(filename) def filter_graph(self): '''docstring for run()''' # add edges of valid type # iterate over valid edges for e_type in self.valid_edge_types: edges = self._get_edges(e_type) for edge in edges: nodes = self._nodes_from_edge(edge) # none empty edges! if len(nodes) > 0: if self.valid_edge(edge,nodes): self.graph.add_edge(str(nodes[0]), str(nodes[1])) # add edge attribute # abstract fuctions must be institated def graph_info(self): '''docstring for graph_info''' #nodes = { } #for e_type in self.valid_edge_types: #edges = self._get_edges(e_type) #for edge in edges: #nodes = self._nodes_from_edge(edge) ## none empty edges! #if len(nodes) > 0: #if self.valid_edge(edge,nodes): #self.edge_types.setdefault(self._edge_type(edge), 0) #self.edge_types[self._edge_type(edge)] += 1 #nodes[self._node_type(nodes[0])].append(nodes[0].) #self.graph.add_edge(nodes[0], nodes[1]) pass def _nodes_from_edge(self,edge): pass def _get_edges(self,e_type): '''type of e_type should be consistency with valid_edge_types ''' pass def _edge_type(self,edge): '''type of edge should be consistency with valid_edge_types ''' return edge.type def _node_type(self,node): '''type of node should be consistency with valid_node_types ''' return node.type # end def _edge_is_a(self, source, target): '''type of source and target is consistency with valid_edge_types ''' return source == target def _node_is_a(self, source, target): '''type of source and target is consistency with valid_node_types ''' return source == target def valid_edge(self,edge,nodes): """make sure the type edge and it targets are required type, if one of the target is invalid, then the edge is invalid """ assert len(self.valid_edge_types) and len(self.valid_node_types) > 0 #import pdb #pdb.set_trace() for arg in self.valid_edge_types: if self._edge_is_a(self._edge_type(edge), arg): break else: # invalid edge return False #determine if the outs of link is required node type for node in nodes: for arg in self.valid_node_types: if self._node_is_a(self._node_type(node), arg): break else: # invalid node return False return True __all__ = ["Dotty_Output","Gephi_Output", "Tree", "tree_to_viz_graphic", "Graph_Abserver" ]
agpl-3.0
nzavagli/UnrealPy
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/numpy-1.9.2/numpy/distutils/fcompiler/ibm.py
184
3408
from __future__ import division, absolute_import, print_function import os import re import sys from numpy.distutils.fcompiler import FCompiler from numpy.distutils.exec_command import exec_command, find_executable from numpy.distutils.misc_util import make_temp_file from distutils import log compilers = ['IBMFCompiler'] class IBMFCompiler(FCompiler): compiler_type = 'ibm' description = 'IBM XL Fortran Compiler' version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P<version>[^\s*]*)' #IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004 executables = { 'version_cmd' : ["<F77>", "-qversion"], 'compiler_f77' : ["xlf"], 'compiler_fix' : ["xlf90", "-qfixed"], 'compiler_f90' : ["xlf90"], 'linker_so' : ["xlf95"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } def get_version(self,*args,**kwds): version = FCompiler.get_version(self,*args,**kwds) if version is None and sys.platform.startswith('aix'): # use lslpp to find out xlf version lslpp = find_executable('lslpp') xlf = find_executable('xlf') if os.path.exists(xlf) and os.path.exists(lslpp): s, o = exec_command(lslpp + ' -Lc xlfcmp') m = re.search('xlfcmp:(?P<version>\d+([.]\d+)+)', o) if m: version = m.group('version') xlf_dir = '/etc/opt/ibmcmp/xlf' if version is None and os.path.isdir(xlf_dir): # linux: # If the output of xlf does not contain version info # (that's the case with xlf 8.1, for instance) then # let's try another method: l = sorted(os.listdir(xlf_dir)) l.reverse() l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))] if l: from distutils.version import LooseVersion self.version = version = LooseVersion(l[0]) return version def get_flags(self): return ['-qextname'] def get_flags_debug(self): return ['-g'] def get_flags_linker_so(self): opt = [] if sys.platform=='darwin': opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress') else: opt.append('-bshared') version = self.get_version(ok_status=[0, 40]) if version is not None: if sys.platform.startswith('aix'): xlf_cfg = '/etc/xlf.cfg' else: xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version fo, new_cfg = make_temp_file(suffix='_xlf.cfg') log.info('Creating '+new_cfg) fi = open(xlf_cfg, 'r') crt1_match = re.compile(r'\s*crt\s*[=]\s*(?P<path>.*)/crt1.o').match for line in fi: m = crt1_match(line) if m: fo.write('crt = %s/bundle1.o\n' % (m.group('path'))) else: fo.write(line) fi.close() fo.close() opt.append('-F'+new_cfg) return opt def get_flags_opt(self): return ['-O3'] if __name__ == '__main__': log.set_verbosity(2) compiler = IBMFCompiler() compiler.customize() print(compiler.get_version())
mit
JCBarahona/edX
openedx/core/lib/block_cache/tests/test_block_structure.py
33
8848
""" Tests for block_structure.py """ # pylint: disable=protected-access from collections import namedtuple from copy import deepcopy import ddt import itertools from unittest import TestCase from openedx.core.lib.graph_traversals import traverse_post_order from ..block_structure import BlockStructure, BlockStructureModulestoreData, BlockStructureBlockData from ..exceptions import TransformerException from .test_utils import MockXBlock, MockTransformer, ChildrenMapTestMixin @ddt.ddt class TestBlockStructure(TestCase, ChildrenMapTestMixin): """ Tests for BlockStructure """ @ddt.data( [], ChildrenMapTestMixin.SIMPLE_CHILDREN_MAP, ChildrenMapTestMixin.LINEAR_CHILDREN_MAP, ChildrenMapTestMixin.DAG_CHILDREN_MAP, ) def test_relations(self, children_map): block_structure = self.create_block_structure(BlockStructure, children_map) # get_children for parent, children in enumerate(children_map): self.assertSetEqual(set(block_structure.get_children(parent)), set(children)) # get_parents for child, parents in enumerate(self.get_parents_map(children_map)): self.assertSetEqual(set(block_structure.get_parents(child)), set(parents)) # has_block for node in range(len(children_map)): self.assertTrue(block_structure.has_block(node)) self.assertFalse(block_structure.has_block(len(children_map) + 1)) @ddt.ddt class TestBlockStructureData(TestCase, ChildrenMapTestMixin): """ Tests for BlockStructureBlockData and BlockStructureModulestoreData """ def test_non_versioned_transformer(self): class TestNonVersionedTransformer(MockTransformer): """ Test transformer with default version number (0). """ VERSION = 0 block_structure = BlockStructureModulestoreData(root_block_usage_key=0) with self.assertRaisesRegexp(TransformerException, "VERSION attribute is not set"): block_structure._add_transformer(TestNonVersionedTransformer()) def test_transformer_data(self): # transformer test cases TransformerInfo = namedtuple("TransformerInfo", "transformer structure_wide_data block_specific_data") # pylint: disable=invalid-name transformers_info = [ TransformerInfo( transformer=MockTransformer(), structure_wide_data=[("t1.global1", "t1.g.val1"), ("t1.global2", "t1.g.val2")], block_specific_data={ "B1": [("t1.key1", "t1.b1.val1"), ("t1.key2", "t1.b1.val2")], "B2": [("t1.key1", "t1.b2.val1"), ("t1.key2", "t1.b2.val2")], "B3": [("t1.key1", True), ("t1.key2", False)], "B4": [("t1.key1", None), ("t1.key2", False)], }, ), TransformerInfo( transformer=MockTransformer(), structure_wide_data=[("t2.global1", "t2.g.val1"), ("t2.global2", "t2.g.val2")], block_specific_data={ "B1": [("t2.key1", "t2.b1.val1"), ("t2.key2", "t2.b1.val2")], "B2": [("t2.key1", "t2.b2.val1"), ("t2.key2", "t2.b2.val2")], }, ), ] # create block structure block_structure = BlockStructureModulestoreData(root_block_usage_key=0) # set transformer data for t_info in transformers_info: block_structure._add_transformer(t_info.transformer) for key, val in t_info.structure_wide_data: block_structure.set_transformer_data(t_info.transformer, key, val) for block, block_data in t_info.block_specific_data.iteritems(): for key, val in block_data: block_structure.set_transformer_block_field(block, t_info.transformer, key, val) # verify transformer data for t_info in transformers_info: self.assertEquals( block_structure._get_transformer_data_version(t_info.transformer), MockTransformer.VERSION ) for key, val in t_info.structure_wide_data: self.assertEquals( block_structure.get_transformer_data(t_info.transformer, key), val, ) for block, block_data in t_info.block_specific_data.iteritems(): for key, val in block_data: self.assertEquals( block_structure.get_transformer_block_field(block, t_info.transformer, key), val, ) def test_xblock_data(self): # block test cases blocks = [ MockXBlock("A", {}), MockXBlock("B", {"field1": "B.val1"}), MockXBlock("C", {"field1": "C.val1", "field2": "C.val2"}), MockXBlock("D", {"field1": True, "field2": False}), MockXBlock("E", {"field1": None, "field2": False}), ] # add each block block_structure = BlockStructureModulestoreData(root_block_usage_key=0) for block in blocks: block_structure._add_xblock(block.location, block) # request fields fields = ["field1", "field2", "field3"] block_structure.request_xblock_fields(*fields) # verify fields have not been collected yet for block in blocks: for field in fields: self.assertIsNone(block_structure.get_xblock_field(block.location, field)) # collect fields block_structure._collect_requested_xblock_fields() # verify values of collected fields for block in blocks: for field in fields: self.assertEquals( block_structure.get_xblock_field(block.location, field), block.field_map.get(field), ) @ddt.data( *itertools.product( [True, False], range(7), [ ChildrenMapTestMixin.SIMPLE_CHILDREN_MAP, ChildrenMapTestMixin.LINEAR_CHILDREN_MAP, ChildrenMapTestMixin.DAG_CHILDREN_MAP, ], ) ) @ddt.unpack def test_remove_block(self, keep_descendants, block_to_remove, children_map): ### skip test if invalid if (block_to_remove >= len(children_map)) or (keep_descendants and block_to_remove == 0): return ### create structure block_structure = self.create_block_structure(BlockStructureBlockData, children_map) parents_map = self.get_parents_map(children_map) ### verify blocks pre-exist self.assert_block_structure(block_structure, children_map) ### remove block block_structure.remove_block(block_to_remove, keep_descendants) missing_blocks = [block_to_remove] ### compute and verify updated children_map removed_children_map = deepcopy(children_map) removed_children_map[block_to_remove] = [] for parent in parents_map[block_to_remove]: removed_children_map[parent].remove(block_to_remove) if keep_descendants: # update the graph connecting the old parents to the old children for child in children_map[block_to_remove]: for parent in parents_map[block_to_remove]: removed_children_map[parent].append(child) self.assert_block_structure(block_structure, removed_children_map, missing_blocks) ### prune the structure block_structure._prune_unreachable() ### compute and verify updated children_map pruned_children_map = deepcopy(removed_children_map) if not keep_descendants: pruned_parents_map = self.get_parents_map(pruned_children_map) # update all descendants for child in children_map[block_to_remove]: # if the child has another parent, continue if pruned_parents_map[child]: continue for block in traverse_post_order(child, get_children=lambda block: pruned_children_map[block]): # add descendant to missing blocks and empty its # children missing_blocks.append(block) pruned_children_map[block] = [] self.assert_block_structure(block_structure, pruned_children_map, missing_blocks) def test_remove_block_if(self): block_structure = self.create_block_structure(BlockStructureBlockData, ChildrenMapTestMixin.LINEAR_CHILDREN_MAP) block_structure.remove_block_if(lambda block: block == 2) self.assert_block_structure(block_structure, [[1], [], [], []], missing_blocks=[2])
agpl-3.0