repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
xkcd1253/Mimi
flask/lib/python2.7/site-packages/pip-1.4.1-py2.7.egg/pip/vendor/distlib/locators.py
79
43778
# -*- coding: utf-8 -*- # # Copyright (C) 2012-2013 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # import gzip from io import BytesIO import json import logging import os import posixpath import re import threading import zlib from . import DistlibException from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url, queue, quote, unescape, string_types, build_opener, HTTPRedirectHandler as BaseRedirectHandler, Request, HTTPError, URLError) from .database import Distribution, DistributionPath, make_dist from .metadata import Metadata from .util import (cached_property, parse_credentials, ensure_slash, split_filename, get_project_data, parse_requirement, ServerProxy) from .version import get_scheme, UnsupportedVersionError from .wheel import Wheel, is_compatible logger = logging.getLogger(__name__) MD5_HASH = re.compile('^md5=([a-f0-9]+)$') CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I) HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml') DEFAULT_INDEX = 'http://python.org/pypi' def get_all_distribution_names(url=None): """ Return all distribution names known by an index. :param url: The URL of the index. :return: A list of all known distribution names. """ if url is None: url = DEFAULT_INDEX client = ServerProxy(url, timeout=3.0) return client.list_packages() class RedirectHandler(BaseRedirectHandler): """ A class to work around a bug in some Python 3.2.x releases. """ # There's a bug in the base version for some 3.2.x # (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header # returns e.g. /abc, it bails because it says the scheme '' # is bogus, when actually it should use the request's # URL for the scheme. See Python issue #13696. def http_error_302(self, req, fp, code, msg, headers): # Some servers (incorrectly) return multiple Location headers # (so probably same goes for URI). Use first header. newurl = None for key in ('location', 'uri'): if key in headers: newurl = headers[key] break if newurl is None: return urlparts = urlparse(newurl) if urlparts.scheme == '': newurl = urljoin(req.get_full_url(), newurl) if hasattr(headers, 'replace_header'): headers.replace_header(key, newurl) else: headers[key] = newurl return BaseRedirectHandler.http_error_302(self, req, fp, code, msg, headers) http_error_301 = http_error_303 = http_error_307 = http_error_302 class Locator(object): """ A base class for locators - things that locate distributions. """ source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz') binary_extensions = ('.egg', '.exe', '.whl') excluded_extensions = ('.pdf',) # A list of tags indicating which wheels you want to match. The default # value of None matches against the tags compatible with the running # Python. If you want to match other values, set wheel_tags on a locator # instance to a list of tuples (pyver, abi, arch) which you want to match. wheel_tags = None downloadable_extensions = source_extensions + ('.whl',) def __init__(self, scheme='default'): """ Initialise an instance. :param scheme: Because locators look for most recent versions, they need to know the version scheme to use. This specifies the current PEP-recommended scheme - use ``'legacy'`` if you need to support existing distributions on PyPI. """ self._cache = {} self.scheme = scheme # Because of bugs in some of the handlers on some of the platforms, # we use our own opener rather than just using urlopen. self.opener = build_opener(RedirectHandler()) def clear_cache(self): self._cache.clear() def _get_scheme(self): return self._scheme def _set_scheme(self, value): self._scheme = value scheme = property(_get_scheme, _set_scheme) def _get_project(self, name): """ For a given project, get a dictionary mapping available versions to Distribution instances. This should be implemented in subclasses. """ raise NotImplementedError('Please implement in the subclass') def get_distribution_names(self): """ Return all the distribution names known to this locator. """ raise NotImplementedError('Please implement in the subclass') def get_project(self, name): """ For a given project, get a dictionary mapping available versions to Distribution instances. This calls _get_project to do all the work, and just implements a caching layer on top. """ if self._cache is None: result = self._get_project(name) elif name in self._cache: result = self._cache[name] else: result = self._get_project(name) self._cache[name] = result return result def score_url(self, url): """ Give an url a score which can be used to choose preferred URLs for a given project release. """ t = urlparse(url) return (t.scheme != 'https', 'pypi.python.org' in t.netloc, posixpath.basename(t.path)) def prefer_url(self, url1, url2): """ Choose one of two URLs where both are candidates for distribution archives for the same version of a distribution (for example, .tar.gz vs. zip). The current implement favours http:// URLs over https://, archives from PyPI over those from other locations and then the archive name. """ if url1 == 'UNKNOWN': result = url2 else: result = url2 s1 = self.score_url(url1) s2 = self.score_url(url2) if s1 > s2: result = url1 if result != url2: logger.debug('Not replacing %r with %r', url1, url2) else: logger.debug('Replacing %r with %r', url1, url2) return result def split_filename(self, filename, project_name): """ Attempt to split a filename in project name, version and Python version. """ return split_filename(filename, project_name) def convert_url_to_download_info(self, url, project_name): """ See if a URL is a candidate for a download URL for a project (the URL has typically been scraped from an HTML page). If it is, a dictionary is returned with keys "name", "version", "filename" and "url"; otherwise, None is returned. """ def same_project(name1, name2): name1, name2 = name1.lower(), name2.lower() if name1 == name2: result = True else: # distribute replaces '-' by '_' in project names, so it # can tell where the version starts in a filename. result = name1.replace('_', '-') == name2.replace('_', '-') return result result = None scheme, netloc, path, params, query, frag = urlparse(url) if frag.lower().startswith('egg='): logger.debug('%s: version hint in fragment: %r', project_name, frag) origpath = path if path and path[-1] == '/': path = path[:-1] if path.endswith('.whl'): try: wheel = Wheel(path) if is_compatible(wheel, self.wheel_tags): if project_name is None: include = True else: include = same_project(wheel.name, project_name) if include: result = { 'name': wheel.name, 'version': wheel.version, 'filename': wheel.filename, 'url': urlunparse((scheme, netloc, origpath, params, query, '')), 'python-version': ', '.join( ['.'.join(list(v[2:])) for v in wheel.pyver]), } m = MD5_HASH.match(frag) if m: result['md5_digest'] = m.group(1) except Exception as e: logger.warning('invalid path for wheel: %s', path) elif path.endswith(self.downloadable_extensions): path = filename = posixpath.basename(path) for ext in self.downloadable_extensions: if path.endswith(ext): path = path[:-len(ext)] t = self.split_filename(path, project_name) if not t: logger.debug('No match for project/version: %s', path) else: name, version, pyver = t if not project_name or same_project(project_name, name): result = { 'name': name, 'version': version, 'filename': filename, 'url': urlunparse((scheme, netloc, origpath, params, query, '')), #'packagetype': 'sdist', } if pyver: result['python-version'] = pyver m = MD5_HASH.match(frag) if m: result['md5_digest'] = m.group(1) break return result def _update_version_data(self, result, info): """ Update a result dictionary (the final result from _get_project) with a dictionary for a specific version, whih typically holds information gleaned from a filename or URL for an archive for the distribution. """ name = info.pop('name') version = info.pop('version') if version in result: dist = result[version] md = dist.metadata else: dist = make_dist(name, version, scheme=self.scheme) md = dist.metadata dist.md5_digest = info.get('md5_digest') if 'python-version' in info: md['Requires-Python'] = info['python-version'] if md['Download-URL'] != info['url']: md['Download-URL'] = self.prefer_url(md['Download-URL'], info['url']) dist.locator = self result[version] = dist def locate(self, requirement, prereleases=False): """ Find the most recent distribution which matches the given requirement. :param requirement: A requirement of the form 'foo (1.0)' or perhaps 'foo (>= 1.0, < 2.0, != 1.3)' :param prereleases: If ``True``, allow pre-release versions to be located. Otherwise, pre-release versions are not returned. :return: A :class:`Distribution` instance, or ``None`` if no such distribution could be located. """ result = None scheme = get_scheme(self.scheme) r = parse_requirement(requirement) if r is None: raise DistlibException('Not a valid requirement: %r' % requirement) if r.extras: # lose the extras part of the requirement requirement = r.requirement matcher = scheme.matcher(requirement) vcls = matcher.version_class logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__) versions = self.get_project(matcher.name) if versions: # sometimes, versions are invalid slist = [] for k in versions: try: if not matcher.match(k): logger.debug('%s did not match %r', matcher, k) else: if prereleases or not vcls(k).is_prerelease: slist.append(k) else: logger.debug('skipping pre-release version %s', k) except Exception: logger.warning('error matching %s with %r', matcher, k) pass # slist.append(k) if len(slist) > 1: slist = sorted(slist, key=scheme.key) if slist: logger.debug('sorted list: %s', slist) result = versions[slist[-1]] if result and r.extras: result.extras = r.extras return result class PyPIRPCLocator(Locator): """ This locator uses XML-RPC to locate distributions. It therefore cannot be used with simple mirrors (that only mirror file content). """ def __init__(self, url, **kwargs): """ Initialise an instance. :param url: The URL to use for XML-RPC. :param kwargs: Passed to the superclass constructor. """ super(PyPIRPCLocator, self).__init__(**kwargs) self.base_url = url self.client = ServerProxy(url, timeout=3.0) def get_distribution_names(self): """ Return all the distribution names known to this locator. """ return set(self.client.list_packages()) def _get_project(self, name): result = {} versions = self.client.package_releases(name, True) for v in versions: urls = self.client.release_urls(name, v) data = self.client.release_data(name, v) metadata = Metadata(scheme=self.scheme) metadata.update(data) dist = Distribution(metadata) if urls: info = urls[0] metadata['Download-URL'] = info['url'] dist.md5_digest = info.get('md5_digest') dist.locator = self result[v] = dist return result class PyPIJSONLocator(Locator): """ This locator uses PyPI's JSON interface. It's very limited in functionality nad probably not worth using. """ def __init__(self, url, **kwargs): super(PyPIJSONLocator, self).__init__(**kwargs) self.base_url = ensure_slash(url) def get_distribution_names(self): """ Return all the distribution names known to this locator. """ raise NotImplementedError('Not available from this locator') def _get_project(self, name): result = {} url = urljoin(self.base_url, '%s/json' % quote(name)) try: resp = self.opener.open(url) data = resp.read().decode() # for now d = json.loads(data) md = Metadata(scheme=self.scheme) md.update(d['info']) dist = Distribution(md) urls = d['urls'] if urls: info = urls[0] md['Download-URL'] = info['url'] dist.md5_digest = info.get('md5_digest') dist.locator = self result[md.version] = dist except Exception as e: logger.exception('JSON fetch failed: %s', e) return result class Page(object): """ This class represents a scraped HTML page. """ # The following slightly hairy-looking regex just looks for the contents of # an anchor link, which has an attribute "href" either immediately preceded # or immediately followed by a "rel" attribute. The attribute values can be # declared with double quotes, single quotes or no quotes - which leads to # the length of the expression. _href = re.compile(""" (rel\s*=\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\s\n]*))\s+)? href\s*=\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\s\n]*)) (\s+rel\s*=\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\s\n]*)))? """, re.I | re.S | re.X) _base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S) def __init__(self, data, url): """ Initialise an instance with the Unicode page contents and the URL they came from. """ self.data = data self.base_url = self.url = url m = self._base.search(self.data) if m: self.base_url = m.group(1) _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) @cached_property def links(self): """ Return the URLs of all the links on a page together with information about their "rel" attribute, for determining which ones to treat as downloads and which ones to queue for further scraping. """ def clean(url): "Tidy up an URL." scheme, netloc, path, params, query, frag = urlparse(url) return urlunparse((scheme, netloc, quote(path), params, query, frag)) result = set() for match in self._href.finditer(self.data): d = match.groupdict('') rel = (d['rel1'] or d['rel2'] or d['rel3'] or d['rel4'] or d['rel5'] or d['rel6']) url = d['url1'] or d['url2'] or d['url3'] url = urljoin(self.base_url, url) url = unescape(url) url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url) result.add((url, rel)) # We sort the result, hoping to bring the most recent versions # to the front result = sorted(result, key=lambda t: t[0], reverse=True) return result class SimpleScrapingLocator(Locator): """ A locator which scrapes HTML pages to locate downloads for a distribution. This runs multiple threads to do the I/O; performance is at least as good as pip's PackageFinder, which works in an analogous fashion. """ # These are used to deal with various Content-Encoding schemes. decoders = { 'deflate': zlib.decompress, 'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(), 'none': lambda b: b, } def __init__(self, url, timeout=None, num_workers=10, **kwargs): """ Initialise an instance. :param url: The root URL to use for scraping. :param timeout: The timeout, in seconds, to be applied to requests. This defaults to ``None`` (no timeout specified). :param num_workers: The number of worker threads you want to do I/O, This defaults to 10. :param kwargs: Passed to the superclass. """ super(SimpleScrapingLocator, self).__init__(**kwargs) self.base_url = ensure_slash(url) self.timeout = timeout self._page_cache = {} self._seen = set() self._to_fetch = queue.Queue() self._bad_hosts = set() self.skip_externals = False self.num_workers = num_workers self._lock = threading.RLock() def _prepare_threads(self): """ Threads are created only when get_project is called, and terminate before it returns. They are there primarily to parallelise I/O (i.e. fetching web pages). """ self._threads = [] for i in range(self.num_workers): t = threading.Thread(target=self._fetch) t.setDaemon(True) t.start() self._threads.append(t) def _wait_threads(self): """ Tell all the threads to terminate (by sending a sentinel value) and wait for them to do so. """ # Note that you need two loops, since you can't say which # thread will get each sentinel for t in self._threads: self._to_fetch.put(None) # sentinel for t in self._threads: t.join() self._threads = [] def _get_project(self, name): self.result = result = {} self.project_name = name url = urljoin(self.base_url, '%s/' % quote(name)) self._seen.clear() self._page_cache.clear() self._prepare_threads() try: logger.debug('Queueing %s', url) self._to_fetch.put(url) self._to_fetch.join() finally: self._wait_threads() del self.result return result platform_dependent = re.compile(r'\b(linux-(i\d86|x86_64|arm\w+)|' r'win(32|-amd64)|macosx-?\d+)\b', re.I) def _is_platform_dependent(self, url): """ Does an URL refer to a platform-specific download? """ return self.platform_dependent.search(url) def _process_download(self, url): """ See if an URL is a suitable download for a project. If it is, register information in the result dictionary (for _get_project) about the specific version it's for. Note that the return value isn't actually used other than as a boolean value. """ if self._is_platform_dependent(url): info = None else: info = self.convert_url_to_download_info(url, self.project_name) logger.debug('process_download: %s -> %s', url, info) if info: with self._lock: # needed because self.result is shared self._update_version_data(self.result, info) return info def _should_queue(self, link, referrer, rel): """ Determine whether a link URL from a referring page and with a particular "rel" attribute should be queued for scraping. """ scheme, netloc, path, _, _, _ = urlparse(link) if path.endswith(self.source_extensions + self.binary_extensions + self.excluded_extensions): result = False elif self.skip_externals and not link.startswith(self.base_url): result = False elif not referrer.startswith(self.base_url): result = False elif rel not in ('homepage', 'download'): result = False elif scheme not in ('http', 'https', 'ftp'): result = False elif self._is_platform_dependent(link): result = False else: host = netloc.split(':', 1)[0] if host.lower() == 'localhost': result = False else: result = True logger.debug('should_queue: %s (%s) from %s -> %s', link, rel, referrer, result) return result def _fetch(self): """ Get a URL to fetch from the work queue, get the HTML page, examine its links for download candidates and candidates for further scraping. This is a handy method to run in a thread. """ while True: url = self._to_fetch.get() try: if url: page = self.get_page(url) if page is None: # e.g. after an error continue for link, rel in page.links: if link not in self._seen: self._seen.add(link) if (not self._process_download(link) and self._should_queue(link, url, rel)): logger.debug('Queueing %s from %s', link, url) self._to_fetch.put(link) finally: # always do this, to avoid hangs :-) self._to_fetch.task_done() if not url: #logger.debug('Sentinel seen, quitting.') break def get_page(self, url): """ Get the HTML for an URL, possibly from an in-memory cache. XXX TODO Note: this cache is never actually cleared. It's assumed that the data won't get stale over the lifetime of a locator instance (not necessarily true for the default_locator). """ # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api scheme, netloc, path, _, _, _ = urlparse(url) if scheme == 'file' and os.path.isdir(url2pathname(path)): url = urljoin(ensure_slash(url), 'index.html') if url in self._page_cache: result = self._page_cache[url] logger.debug('Returning %s from cache: %s', url, result) else: host = netloc.split(':', 1)[0] result = None if host in self._bad_hosts: logger.debug('Skipping %s due to bad host %s', url, host) else: req = Request(url, headers={'Accept-encoding': 'identity'}) try: logger.debug('Fetching %s', url) resp = self.opener.open(req, timeout=self.timeout) logger.debug('Fetched %s', url) headers = resp.info() content_type = headers.get('Content-Type', '') if HTML_CONTENT_TYPE.match(content_type): final_url = resp.geturl() data = resp.read() encoding = headers.get('Content-Encoding') if encoding: decoder = self.decoders[encoding] # fail if not found data = decoder(data) encoding = 'utf-8' m = CHARSET.search(content_type) if m: encoding = m.group(1) try: data = data.decode(encoding) except UnicodeError: data = data.decode('latin-1') # fallback result = Page(data, final_url) self._page_cache[final_url] = result except HTTPError as e: if e.code != 404: logger.exception('Fetch failed: %s: %s', url, e) except URLError as e: logger.exception('Fetch failed: %s: %s', url, e) with self._lock: self._bad_hosts.add(host) except Exception as e: logger.exception('Fetch failed: %s: %s', url, e) finally: self._page_cache[url] = result # even if None (failure) return result _distname_re = re.compile('<a href=[^>]*>([^<]+)<') def get_distribution_names(self): """ Return all the distribution names known to this locator. """ result = set() page = self.get_page(self.base_url) if not page: raise DistlibException('Unable to get %s' % self.base_url) for match in self._distname_re.finditer(page.data): result.add(match.group(1)) return result class DirectoryLocator(Locator): """ This class locates distributions in a directory tree. """ def __init__(self, path, **kwargs): """ Initialise an instance. :param path: The root of the directory tree to search. :param kwargs: Passed to the superclass constructor, except for: * recursive - if True (the default), subdirectories are recursed into. If False, only the top-level directory is searched, """ self.recursive = kwargs.pop('recursive', True) super(DirectoryLocator, self).__init__(**kwargs) path = os.path.abspath(path) if not os.path.isdir(path): raise DistlibException('Not a directory: %r' % path) self.base_dir = path def should_include(self, filename, parent): """ Should a filename be considered as a candidate for a distribution archive? As well as the filename, the directory which contains it is provided, though not used by the current implementation. """ return filename.endswith(self.downloadable_extensions) def _get_project(self, name): result = {} for root, dirs, files in os.walk(self.base_dir): for fn in files: if self.should_include(fn, root): fn = os.path.join(root, fn) url = urlunparse(('file', '', pathname2url(os.path.abspath(fn)), '', '', '')) info = self.convert_url_to_download_info(url, name) if info: self._update_version_data(result, info) if not self.recursive: break return result def get_distribution_names(self): """ Return all the distribution names known to this locator. """ result = set() for root, dirs, files in os.walk(self.base_dir): for fn in files: if self.should_include(fn, root): fn = os.path.join(root, fn) url = urlunparse(('file', '', pathname2url(os.path.abspath(fn)), '', '', '')) info = self.convert_url_to_download_info(url, None) if info: result.add(info['name']) if not self.recursive: break return result class JSONLocator(Locator): """ This locator uses special extended metadata (not available on PyPI) and is the basis of performant dependency resolution in distlib. Other locators require archive downloads before dependencies can be determined! As you might imagine, that can be slow. """ def get_distribution_names(self): """ Return all the distribution names known to this locator. """ raise NotImplementedError('Not available from this locator') def _get_project(self, name): result = {} data = get_project_data(name) if data: for info in data.get('files', []): if info['ptype'] != 'sdist' or info['pyversion'] != 'source': continue dist = make_dist(data['name'], info['version'], scheme=self.scheme) md = dist.metadata md['Download-URL'] = info['url'] dist.md5_digest = info.get('digest') md.dependencies = info.get('requirements', {}) dist.exports = info.get('exports', {}) result[dist.version] = dist return result class DistPathLocator(Locator): """ This locator finds installed distributions in a path. It can be useful for adding to an :class:`AggregatingLocator`. """ def __init__(self, distpath, **kwargs): """ Initialise an instance. :param distpath: A :class:`DistributionPath` instance to search. """ super(DistPathLocator, self).__init__(**kwargs) assert isinstance(distpath, DistributionPath) self.distpath = distpath def _get_project(self, name): dist = self.distpath.get_distribution(name) if dist is None: result = {} else: result = { dist.version: dist } return result class AggregatingLocator(Locator): """ This class allows you to chain and/or merge a list of locators. """ def __init__(self, *locators, **kwargs): """ Initialise an instance. :param locators: The list of locators to search. :param kwargs: Passed to the superclass constructor, except for: * merge - if False (the default), the first successful search from any of the locators is returned. If True, the results from all locators are merged (this can be slow). """ self.merge = kwargs.pop('merge', False) self.locators = locators super(AggregatingLocator, self).__init__(**kwargs) def clear_cache(self): super(AggregatingLocator, self).clear_cache() for locator in self.locators: locator.clear_cache() def _set_scheme(self, value): self._scheme = value for locator in self.locators: locator.scheme = value scheme = property(Locator.scheme.fget, _set_scheme) def _get_project(self, name): result = {} for locator in self.locators: r = locator.get_project(name) if r: if self.merge: result.update(r) else: result = r break return result def get_distribution_names(self): """ Return all the distribution names known to this locator. """ result = set() for locator in self.locators: try: result |= locator.get_distribution_names() except NotImplementedError: pass return result default_locator = AggregatingLocator( JSONLocator(), SimpleScrapingLocator('https://pypi.python.org/simple/', timeout=3.0)) locate = default_locator.locate class DependencyFinder(object): """ Locate dependencies for distributions. """ def __init__(self, locator=None): """ Initialise an instance, using the specified locator to locate distributions. """ self.locator = locator or default_locator self.scheme = get_scheme(self.locator.scheme) def _get_name_and_version(self, p): """ A utility method used to get name and version from e.g. a Provides-Dist value. :param p: A value in a form foo (1.0) :return: The name and version as a tuple. """ comps = p.strip().rsplit(' ', 1) name = comps[0] version = None if len(comps) == 2: version = comps[1] if len(version) < 3 or version[0] != '(' or version[-1] != ')': raise DistlibException('Ill-formed provides field: %r' % p) version = version[1:-1] # trim off parentheses # Name in lower case for case-insensitivity return name.lower(), version def add_distribution(self, dist): """ Add a distribution to the finder. This will update internal information about who provides what. :param dist: The distribution to add. """ logger.debug('adding distribution %s', dist) name = dist.key self.dists_by_name[name] = dist self.dists[(name, dist.version)] = dist for p in dist.provides: name, version = self._get_name_and_version(p) logger.debug('Add to provided: %s, %s, %s', name, version, dist) self.provided.setdefault(name, set()).add((version, dist)) def remove_distribution(self, dist): """ Remove a distribution from the finder. This will update internal information about who provides what. :param dist: The distribution to remove. """ logger.debug('removing distribution %s', dist) name = dist.key del self.dists_by_name[name] del self.dists[(name, dist.version)] for p in dist.provides: name, version = self._get_name_and_version(p) logger.debug('Remove from provided: %s, %s, %s', name, version, dist) s = self.provided[name] s.remove((version, dist)) if not s: del self.provided[name] def get_matcher(self, reqt): """ Get a version matcher for a requirement. :param reqt: The requirement :type reqt: str :return: A version matcher (an instance of :class:`distlib.version.Matcher`). """ try: matcher = self.scheme.matcher(reqt) except UnsupportedVersionError: # XXX compat-mode if cannot read the version name = reqt.split()[0] matcher = self.scheme.matcher(name) return matcher def find_providers(self, reqt): """ Find the distributions which can fulfill a requirement. :param reqt: The requirement. :type reqt: str :return: A set of distribution which can fulfill the requirement. """ matcher = self.get_matcher(reqt) name = matcher.key # case-insensitive result = set() provided = self.provided if name in provided: for version, provider in provided[name]: try: match = matcher.match(version) except UnsupportedVersionError: match = False if match: result.add(provider) break return result def try_to_replace(self, provider, other, problems): """ Attempt to replace one provider with another. This is typically used when resolving dependencies from multiple sources, e.g. A requires (B >= 1.0) while C requires (B >= 1.1). For successful replacement, ``provider`` must meet all the requirements which ``other`` fulfills. :param provider: The provider we are trying to replace with. :param other: The provider we're trying to replace. :param problems: If False is returned, this will contain what problems prevented replacement. This is currently a tuple of the literal string 'cantreplace', ``provider``, ``other`` and the set of requirements that ``provider`` couldn't fulfill. :return: True if we can replace ``other`` with ``provider``, else False. """ rlist = self.reqts[other] unmatched = set() for s in rlist: matcher = self.get_matcher(s) if not matcher.match(provider.version): unmatched.add(s) if unmatched: # can't replace other with provider problems.add(('cantreplace', provider, other, unmatched)) result = False else: # can replace other with provider self.remove_distribution(other) del self.reqts[other] for s in rlist: self.reqts.setdefault(provider, set()).add(s) self.add_distribution(provider) result = True return result def find(self, requirement, tests=False, prereleases=False): """ Find a distribution matching requirement and all distributions it depends on. Use the ``tests`` argument to determine whether distributions used only for testing should be included in the results. Allow ``requirement`` to be either a :class:`Distribution` instance or a string expressing a requirement. If ``prereleases`` is True, allow pre-release versions to be returned - otherwise, don't. Return a set of :class:`Distribution` instances and a set of problems. The distributions returned should be such that they have the :attr:`required` attribute set to ``True`` if they were from the ``requirement`` passed to ``find()``, and they have the :attr:`build_time_dependency` attribute set to ``True`` unless they are post-installation dependencies of the ``requirement``. The problems should be a tuple consisting of the string ``'unsatisfied'`` and the requirement which couldn't be satisfied by any distribution known to the locator. """ self.provided = {} self.dists = {} self.dists_by_name = {} self.reqts = {} if isinstance(requirement, Distribution): dist = odist = requirement logger.debug('passed %s as requirement', odist) else: dist = odist = self.locator.locate(requirement, prereleases=prereleases) if dist is None: raise DistlibException('Unable to locate %r' % requirement) logger.debug('located %s', odist) dist.requested = True problems = set() todo = set([dist]) install_dists = set([odist]) while todo: dist = todo.pop() name = dist.key # case-insensitive if name not in self.dists_by_name: self.add_distribution(dist) else: #import pdb; pdb.set_trace() other = self.dists_by_name[name] if other != dist: self.try_to_replace(dist, other, problems) ireqts = dist.requires sreqts = dist.setup_requires ereqts = set() if not tests or dist not in install_dists: treqts = set() else: treqts = dist.test_requires all_reqts = ireqts | sreqts | treqts | ereqts for r in all_reqts: providers = self.find_providers(r) if not providers: logger.debug('No providers found for %r', r) provider = self.locator.locate(r, prereleases=prereleases) if provider is None: logger.debug('Cannot satisfy %r', r) problems.add(('unsatisfied', r)) else: n, v = provider.key, provider.version if (n, v) not in self.dists: todo.add(provider) providers.add(provider) if r in ireqts and dist in install_dists: install_dists.add(provider) logger.debug('Adding %s to install_dists', provider.name_and_version) for p in providers: name = p.key if name not in self.dists_by_name: self.reqts.setdefault(p, set()).add(r) else: other = self.dists_by_name[name] if other != p: # see if other can be replaced by p self.try_to_replace(p, other, problems) dists = set(self.dists.values()) for dist in dists: dist.build_time_dependency = dist not in install_dists if dist.build_time_dependency: logger.debug('%s is a build-time dependency only.', dist.name_and_version) logger.debug('find done for %s', odist) return dists, problems
gpl-2.0
newsteinking/docker-registry
docker_registry/drivers/s3.py
29
5509
# -*- coding: utf-8 -*- """ docker_registry.drivers.s3 ~~~~~~~~~~~~~~~~~~~~~~~~~~ This is a s3 based driver. """ # This fixes an issue where boto sends unicode to gevent.socket.getaddrinfo in # an eventlet causing the event pool to hang in a deadlock state. # This initiates the unicode => idna conversion outside of getaddrinfo, # preventing the deadlock. # See https://github.com/gevent/gevent/issues/349 for context. u'fix for gevent deadlock'.encode('idna') import gevent.monkey gevent.monkey.patch_all() import docker_registry.core.boto as coreboto from docker_registry.core import compat from docker_registry.core import exceptions from docker_registry.core import lru import logging import os import re import time import boto.exception import boto.s3 import boto.s3.connection import boto.s3.key logger = logging.getLogger(__name__) class Cloudfront(object): def __init__(self, awsaccess, awssecret, base, keyid, privatekey): boto.connect_cloudfront( awsaccess, awssecret ) host = re.compile('^https?://([^/]+)').findall(base) self.dist = boto.cloudfront.distribution.Distribution(domain_name=host) self.base = base self.keyid = keyid self.privatekey = privatekey try: self.privatekey = open(privatekey).read() except Exception: logger.debug('Passed private key is not readable. Assume string.') def sign(self, url, expire_time=0): path = os.path.join(self.base, url) if expire_time: expire_time = time.time() + expire_time return self.dist.create_signed_url( path, self.keyid, private_key_string=self.privatekey, expire_time=int(expire_time) ) def pub(self, path): return os.path.join(self.base, path) class Storage(coreboto.Base): def __init__(self, path, config): super(Storage, self).__init__(path, config) def _build_connection_params(self): kwargs = super(Storage, self)._build_connection_params() if self._config.s3_secure is not None: kwargs['is_secure'] = (self._config.s3_secure is True) return kwargs def makeConnection(self): kwargs = self._build_connection_params() # Connect cloudfront if we are required to if self._config.cloudfront: self.signer = Cloudfront( self._config.s3_access_key, self._config.s3_secret_key, self._config.cloudfront['base'], self._config.cloudfront['keyid'], self._config.cloudfront['keysecret'] ).sign else: self.signer = None if self._config.s3_use_sigv4 is True: if self._config.boto_host is None: logger.warn("No S3 Host specified, Boto won't use SIGV4!") boto.config.add_section('s3') boto.config.set('s3', 'use-sigv4', 'True') if self._config.s3_region is not None: return boto.s3.connect_to_region( region_name=self._config.s3_region, aws_access_key_id=self._config.s3_access_key, aws_secret_access_key=self._config.s3_secret_key, **kwargs) logger.warn("No S3 region specified, using boto default region, " + "this may affect performance and stability.") return boto.s3.connection.S3Connection( self._config.s3_access_key, self._config.s3_secret_key, **kwargs) def makeKey(self, path): return boto.s3.key.Key(self._boto_bucket, path) @lru.set def put_content(self, path, content): path = self._init_path(path) key = self.makeKey(path) key.set_contents_from_string( content, encrypt_key=(self._config.s3_encrypt is True)) return path def stream_write(self, path, fp): # Minimum size of upload part size on S3 is 5MB buffer_size = 5 * 1024 * 1024 if self.buffer_size > buffer_size: buffer_size = self.buffer_size path = self._init_path(path) mp = self._boto_bucket.initiate_multipart_upload( path, encrypt_key=(self._config.s3_encrypt is True)) num_part = 1 try: while True: buf = fp.read(buffer_size) if not buf: break io = compat.StringIO(buf) mp.upload_part_from_file(io, num_part) num_part += 1 io.close() except IOError as e: raise e mp.complete_upload() def content_redirect_url(self, path): path = self._init_path(path) key = self.makeKey(path) if not key.exists(): raise IOError('No such key: \'{0}\''.format(path)) # No cloudfront? Sign to the bucket if not self.signer: return key.generate_url( expires_in=1200, method='GET', query_auth=True) # Have cloudfront? Sign it return self.signer(path, expire_time=60) def get_content(self, path, tries=0): try: return super(Storage, self).get_content(path) except exceptions.FileNotFoundError as e: if tries <= 3: time.sleep(.1) return self.get_content(path, tries + 1) else: raise e
apache-2.0
c0710204/edx-platform
common/lib/xmodule/xmodule/tests/test_word_cloud.py
53
1794
# -*- coding: utf-8 -*- """Test for Word cloud Xmodule functional logic.""" from webob.multidict import MultiDict from xmodule.word_cloud_module import WordCloudDescriptor from . import LogicTest class WordCloudModuleTest(LogicTest): """Logic tests for Word Cloud Xmodule.""" descriptor_class = WordCloudDescriptor raw_field_data = { 'all_words': {'cat': 10, 'dog': 5, 'mom': 1, 'dad': 2}, 'top_words': {'cat': 10, 'dog': 5, 'dad': 2}, 'submitted': False } def test_bad_ajax_request(self): "Make sure that answer for incorrect request is error json" response = self.ajax_request('bad_dispatch', {}) self.assertDictEqual(response, { 'status': 'fail', 'error': 'Unknown Command!' }) def test_good_ajax_request(self): "Make shure that ajax request works correctly" post_data = MultiDict(('student_words[]', word) for word in ['cat', 'cat', 'dog', 'sun']) response = self.ajax_request('submit', post_data) self.assertEqual(response['status'], 'success') self.assertEqual(response['submitted'], True) self.assertEqual(response['total_count'], 22) self.assertDictEqual( response['student_words'], {'sun': 1, 'dog': 6, 'cat': 12} ) self.assertListEqual( response['top_words'], [{'text': 'dad', 'size': 2, 'percent': 9.0}, {'text': 'sun', 'size': 1, 'percent': 5.0}, {'text': 'dog', 'size': 6, 'percent': 27.0}, {'text': 'mom', 'size': 1, 'percent': 5.0}, {'text': 'cat', 'size': 12, 'percent': 54.0}] ) self.assertEqual( 100.0, sum(i['percent'] for i in response['top_words']))
agpl-3.0
containscafeine/hubops
exec_scripts/docker_launch.py
1
7700
#!/usr/bin/python3 from docker import Client import json, requests, sys sys.path.append("/root/hubops") from config.docker_config import docker_config from sys import argv from random import randint class ContainerClass: def __init__(self): self.name = "hubops{}".format(randint(100000, 999999)) self.hostname = self.name def host_config_defaults(self): port_bindings = {22: 4022} dns = ["8.8.8.8", "8.8.4.4"] # network_mode = 'host' # mem_limit = "512m" # memswap_limit = "700m" # command = "/bin/bash" privileged = True return locals() def host_config_variables(self, **kwargs): host_config_dict = self.host_config_defaults() del host_config_dict["self"] for config_param, config_value in kwargs.items(): host_config_dict.update({config_param: config_value}) return host_config_dict def get_image_remote(client_instance, image_repository, image_repository_tags): # User wants official or unofficial images? while True: official_or_not = input("Search for official images or unofficial images?\n" "Official(O)\n" "Unofficial(U)\n").lower() if official_or_not in list("ou"): break found_list = search_images_name_remote(image_repository, client_instance, official_or_not) if len(found_list) == 1: print("No official image found for {}".format(image_repository)) else: print("Official images on Docker Hub:") for official_name in found_list[1:]: print("{}. {}".format(found_list.index(official_name), official_name)) while True: try: image_index_to_pull = int(input("\nWhich image would you like to work on from the above list?\n" "Enter the number which appears with the image name.\n" "e.g. If you want to pull\n" "3. tomcat7\n" "Then type in 3 and press the Enter key.\n")) image_name_to_pull = found_list[image_index_to_pull] break except (IndexError, ValueError): pass image_tags = list(['']) image_tags_request = requests.get( "https://registry.hub.docker.com/v1/repositories/{}/tags".format(image_name_to_pull)) for image_tag_details in image_tags_request.json(): image_tags.append(image_tag_details['name']) print("\n\nThe tags available for {} on Docker Hub are:\n".format(image_name_to_pull)) for tag_name in image_tags[1:]: print("{}. {}".format(image_tags.index(tag_name), tag_name)) print("\n") if image_repository_tags in image_tags[1:]: print("Found specified {} tag in the available tags.".format(image_repository_tags)) image_tag_to_pull = image_repository_tags else: print("Could not find {} tag in the available tags.\n" "Which {} tag do you want to work with?\n".format(image_repository_tags, image_name_to_pull)) while True: try: tag_input = int(input()) image_tag_to_pull = image_tags[tag_input] break except (IndexError, ValueError): print("Enter a valid tag number!") pass image_to_pull = "{}:{}".format(image_name_to_pull, image_tag_to_pull) print("Pulling {} now...".format(image_to_pull)) pull_process(image_to_pull, client_instance) def search_images_local(opened_client, user_repotag): local_repotags = list() for image_name in opened_client.images(): for separate_tags in image_name['RepoTags']: local_repotags.append(separate_tags) if user_repotag in local_repotags: return True else: return False def search_images_name_remote(name_to_search, opened_client, controller_official): hub_results = opened_client.search(name_to_search) found_list = list(['']) for filtered_results in hub_results: if (controller_official == "o") and (filtered_results['is_official']): print("Found {}".format(filtered_results['name'])) found_list.append(filtered_results['name']) elif controller_official == "u": print("Found {}".format(filtered_results['name'])) found_list.append(filtered_results['name']) return found_list def pull_process(image_to_pull, opened_client): for pull_progress in opened_client.pull(image_to_pull, stream=True): for pull_element, pull_key in json.loads(pull_progress.decode()).items(): if pull_element == 'id': print("For image id {}:".format(pull_key.lower())) if pull_element == 'status': print("The current status is {}".format(pull_key.lower())) def start_container(opened_client, container_up): container_object = ContainerClass() host_config_dict_var = container_object.host_config_variables() host_config_dict = dict() for param, value in host_config_dict_var.items(): # exec(param + '=value') host_config_dict.update(opened_client.create_host_config(**host_config_dict_var)) container = opened_client.create_container(image=container_up, name=container_object.name, hostname=container_object.hostname, entrypoint="/bin/bash", stdin_open=True, tty=True, ports=[22], host_config=host_config_dict) container_id = container['Id'] print("Container with ID {} has been created.".format(container_id)) if container['Warnings'] is None: print("The container was created with no warnings.") else: print("These warning(s) popped up while creating the container.\n{}".format(container['Warnings'])) print("Starting container {}.".format(container_object.name)) opened_client.start(container= container_id) def foreground_process(image_repository, image_repository_tags): # Establishing connection client_instance = Client(base_url=docker_config()) user_repotag = "{}:{}".format(image_repository, image_repository_tags) # Searching for images locally local_exists = search_images_local(client_instance, user_repotag) if local_exists: print("Found the image {} locally, proceeding now.".format(user_repotag)) elif not local_exists: print("The image does not exist locally.\n" "HubOps will now search for images on Docker Hub.\n") get_image_remote(client_instance, image_repository, image_repository_tags) print("Setting up the container now...") start_container(client_instance, user_repotag) if __name__ == '__main__': try: image_name_argv = argv[1] except IndexError: image_name_argv = "centos" print("No image name entered. Setting {} by default.".format(image_name_argv)) try: image_tag_argv = argv[2] except IndexError: image_tag_argv = "latest" print("No image tag entered. Setting {} by default.".format(image_tag_argv)) foreground_process(image_name_argv, image_tag_argv)
gpl-2.0
iamjy/beaglebone-kernel
tools/perf/scripts/python/failed-syscalls-by-pid.py
11180
2058
# failed system call counts, by pid # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide failed system call totals, broken down by pid. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n"; for_comm = None for_pid = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: try: for_pid = int(sys.argv[1]) except: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_error_totals() def raw_syscalls__sys_exit(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, ret): if (for_comm and common_comm != for_comm) or \ (for_pid and common_pid != for_pid ): return if ret < 0: try: syscalls[common_comm][common_pid][id][ret] += 1 except TypeError: syscalls[common_comm][common_pid][id][ret] = 1 def print_error_totals(): if for_comm is not None: print "\nsyscall errors for %s:\n\n" % (for_comm), else: print "\nsyscall errors:\n\n", print "%-30s %10s\n" % ("comm [pid]", "count"), print "%-30s %10s\n" % ("------------------------------", \ "----------"), comm_keys = syscalls.keys() for comm in comm_keys: pid_keys = syscalls[comm].keys() for pid in pid_keys: print "\n%s [%d]\n" % (comm, pid), id_keys = syscalls[comm][pid].keys() for id in id_keys: print " syscall: %-16s\n" % syscall_name(id), ret_keys = syscalls[comm][pid][id].keys() for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True): print " err = %-20s %10d\n" % (strerror(ret), val),
gpl-2.0
tuos/FlowAndCorrelations
tracking/eff/june2015/v1/recoMatch_cfg.py
1
11099
# EXTRA reconstruction and matching # # Started from: # # Auto generated configuration file # using: # Revision: 1.381.2.28 # Source: /local/reps/CMSSW/CMSSW/Configuration/PyReleaseValidation/python/ConfigBuilder.py,v # with command line options: step1 --filein dbs:/Hydjet1p8_TuneDrum_Quenched_MinBias_2760GeV/HiFall13-STARTHI53_V28-v2/GEN-SIM --fileout file:step1.root --mc --eventcontent RECODEBUG --datatier GEN-SIM-RECO --conditions STARTHI53_LV1::All --step DIGI,L1,DIGI2RAW,HLT:HIon,RAW2DIGI,L1Reco,RECO --scenario HeavyIons -n 3 --no_exec import FWCore.ParameterSet.Config as cms process = cms.Process('HLT') # import of standard configurations process.load('Configuration.StandardSequences.Services_cff') process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi') process.load('FWCore.MessageService.MessageLogger_cfi') process.load('Configuration.EventContent.EventContentHeavyIons_cff') process.load('SimGeneral.MixingModule.mixNoPU_cfi') process.load('Configuration.StandardSequences.GeometryRecoDB_cff') process.load('Configuration.StandardSequences.MagneticField_38T_cff') process.load('Configuration.StandardSequences.Digi_cff') process.load('Configuration.StandardSequences.SimL1Emulator_cff') process.load('Configuration.StandardSequences.DigiToRaw_cff') process.load('HLTrigger.Configuration.HLT_HIon_cff') process.load('Configuration.StandardSequences.RawToDigi_cff') process.load('Configuration.StandardSequences.L1Reco_cff') process.load('Configuration.StandardSequences.ReconstructionHeavyIons_cff') process.load('Configuration.StandardSequences.EndOfProcess_cff') process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff') process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(4) ) process.options = cms.untracked.PSet(wantSummary = cms.untracked.bool(True)) # Input source process.source = cms.Source("PoolSource", secondaryFileNames = cms.untracked.vstring(), fileNames = cms.untracked.vstring('/store/himc/HiFall13/Hydjet1p8_TuneDrum_Quenched_MinBias_2760GeV/GEN-SIM/STARTHI53_V28-v2/00000/00699AE5-5A5E-E311-83B4-008CFA007B98.root', '/store/himc/HiFall13/Hydjet1p8_TuneDrum_Quenched_MinBias_2760GeV/GEN-SIM/STARTHI53_V28-v2/00000/02B3F103-995E-E311-BD92-003048C9AC48.root', '/store/himc/HiFall13/Hydjet1p8_TuneDrum_Quenched_MinBias_2760GeV/GEN-SIM/STARTHI53_V28-v2/00000/02F3428A-B35E-E311-8A73-848F69FD4553.root', '/store/himc/HiFall13/Hydjet1p8_TuneDrum_Quenched_MinBias_2760GeV/GEN-SIM/STARTHI53_V28-v2/00000/0444BD90-995E-E311-924E-02163E00B749.root', '/store/himc/HiFall13/Hydjet1p8_TuneDrum_Quenched_MinBias_2760GeV/GEN-SIM/STARTHI53_V28-v2/00000/0486ABC5-985E-E311-8E54-02163E008CDD.root', '/store/himc/HiFall13/Hydjet1p8_TuneDrum_Quenched_MinBias_2760GeV/GEN-SIM/STARTHI53_V28-v2/00000/0806398C-985E-E311-BF1C-003048FEAEF0.root', '/store/himc/HiFall13/Hydjet1p8_TuneDrum_Quenched_MinBias_2760GeV/GEN-SIM/STARTHI53_V28-v2/00000/0889A20B-C35E-E311-9A53-008CFA001D7C.root', '/store/himc/HiFall13/Hydjet1p8_TuneDrum_Quenched_MinBias_2760GeV/GEN-SIM/STARTHI53_V28-v2/00000/0A9BD954-985E-E311-AF53-003048FEB9F6.root', '/store/himc/HiFall13/Hydjet1p8_TuneDrum_Quenched_MinBias_2760GeV/GEN-SIM/STARTHI53_V28-v2/00000/0E02A540-FF5C-E311-9950-848F69FD298E.root', '/store/himc/HiFall13/Hydjet1p8_TuneDrum_Quenched_MinBias_2760GeV/GEN-SIM/STARTHI53_V28-v2/00000/0E142734-995E-E311-8698-02163E00B50F.root') ) # Production Info process.configurationMetadata = cms.untracked.PSet( version = cms.untracked.string('$Revision: 1.381.2.28 $'), annotation = cms.untracked.string('step1 nevts:3'), name = cms.untracked.string('PyReleaseValidation') ) # Output definition #process.RECODEBUGoutput = cms.OutputModule("PoolOutputModule", # splitLevel = cms.untracked.int32(0), # eventAutoFlushCompressedSize = cms.untracked.int32(5242880), # outputCommands = process.RECODEBUGEventContent.outputCommands, # fileName = cms.untracked.string('file:step1.root'), # SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('filter_step')), # dataset = cms.untracked.PSet( # filterName = cms.untracked.string(''), # dataTier = cms.untracked.string('GEN-SIM-RECO') # ) #) # Additional output definition #process.RECODEBUGoutput.outputCommands += ['keep *_hiLowPtPixelTracks_*_*'] #process.RECODEBUGoutput.outputCommands += ['keep *_tpRecoAssocHiLowPtPixelTracks_*_*'] # Other statements from Configuration.AlCa.GlobalTag import GlobalTag process.GlobalTag = GlobalTag(process.GlobalTag, 'STARTHI53_LV1::All', '') # Impact Parameter Filtering process.load('Appeltel.ImpactParameterFilter.ImpactParameterFilter_cfi') process.ipf = process.impactParameterFilter.clone( bMin = -1.0, bMax = 4.0 # bMin = 11.0, # bMax = 20.0 ) # Track Association process.load("SimTracker.TrackAssociation.trackingParticleRecoTrackAsssociation_cfi") process.tpRecoAssocHiLowPtPixelTracks = process.trackingParticleRecoTrackAsssociation.clone() process.tpRecoAssocHiLowPtPixelTracks.label_tr = cms.InputTag("hiGeneralAndPixelTracks") #process.tpRecoAssocHiLowPtPixelTracks.associator = cms.string('TrackAssociatorByHits') #process.tpRecoAssocHiLowPtPixelTracks.associator = cms.string('TrackAssociatorByChi2') process.tpRecoAssocHiLowPtPixelTracks.associator = cms.string('quickTrackAssociatorByHits') process.tpRecoAssocHiGeneralTracks = process.trackingParticleRecoTrackAsssociation.clone() process.tpRecoAssocHiGeneralTracks.label_tr = cms.InputTag("hiGeneralTracks") #process.tpRecoAssocHiGeneralTracks.associator = cms.string('TrackAssociatorByChi2') #process.load("SimTracker.TrackAssociation.TrackAssociatorByHits_cfi") #process.TrackAssociatorByHits.SimToRecoDenominator = cms.string('reco') #process.load("SimTracker.TrackAssociation.TrackAssociatorByChi2_cfi") #process.TrackAssociatorByChi2ESProducer.chi2cut = cms.double(100.0) process.load("SimTracker.TrackAssociation.quickTrackAssociatorByHits_cfi") process.quickTrackAssociatorByHits.SimToRecoDenominator = cms.string('reco') # Centrality from CommonFunctions_cff import * overrideCentrality(process) process.HeavyIonGlobalParameters = cms.PSet( centralityVariable = cms.string("HFtowers"), nonDefaultGlauberModel = cms.string("Hydjet_Drum"), centralitySrc = cms.InputTag("hiCentrality") ) # Analysis etabins = [] eta = -2.5 for i in range(0,51): eta = -2.5 + i*0.1 etabins.append(eta) process.load('Appeltel.RpPbAnalysis.RpPbTrackingCorrections_cfi') process.trkCorr_merged = process.pPbTrkCorr.clone( trackSrc = cms.InputTag("hiGeneralAndPixelTracks"), vertexSrc = cms.InputTag("hiSelectedVertex"), tpEffSrc = cms.InputTag('mergedtruth','MergedTrackTruth'), tpFakSrc = cms.InputTag('mergedtruth','MergedTrackTruth'), associatorMap = cms.InputTag('tpRecoAssocHiLowPtPixelTracks'), qualityString = cms.string("highPurity"), dxyErrMax = cms.double(999.0), dzErrMax = cms.double(999.0), ptErrMax = cms.double(999.0), occByPhi = cms.bool(False), occByCentrality = cms.bool(True), # occBins = cms.vdouble(-3.2, -3.0, -2.8, -2.6, -2.4, -2.2, -2.0, -1.8, # -1.6, -1.4, -1.2, -1.0, -0.8, -0.6, -0.4, -0.2, 0.0, # 0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0, # 2.2, 2.4, 2.6, 2.8, 3.0, 3.2), occBins = cms.vdouble(0,20,40,60,80,100,200), etaBins = cms.vdouble(-0.8,-0.48,-0.16,0.16,0.48,0.8), # etaBins = cms.vdouble( # -2.4, -2.2, -2.0, -1.8, -1.6, -1.4, # -1.2, -1.0, -0.8, -0.6, -0.4, -0.2, 0.0, # 0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, # 1.6, 1.8, 2.0, 2.2, 2.4 # ), # ptBins = cms.vdouble( # 0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, # 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, # 1.0, 1.05, 1.1, 1.15, 1.2, # # 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.55, # 1.6, 1.65, 1.7, 1.75, 1.8, 1.85, 1.9, 1.95, # 2.0, 2.05, # 2.1, 2.15, 2.2, 2.25, 2.3, 2.35, 2.4, 2.45, # 2.5, 2.6, 2.7, 2.8, 2.9, # 3.0, 3.2, 3.4, 3.6, 3.8, 4.0, 4.5, 5.0, 7.5, 10.0, 12.0, 15.0, # 20.0, 25.0, 30.0, 45.0, 60.0, 90.0, 120.0, # 180.0, 300.0, 500.0 # ), ptBins = cms.vdouble( 0.2,0.3,0.4,0.5,0.6,0.8,1.0,1.2, 1.4,1.6,1.8,2.0,2.2, 2.4,2.6,2.8,3.0,3.5, 4.0,4.5,5.0,6.0,7.0, 8.0,9.0,10.0,12,14, 16,18,20 ), fillTrkPerfHistos = cms.bool(False), fillTrkPerfHistosRF = cms.bool(False) ) process.trkCorr_general = process.trkCorr_merged.clone( trackSrc = cms.InputTag("hiGeneralTracks"), associatorMap = cms.InputTag('tpRecoAssocHiGeneralTracks') ) # TFile Output process.TFileService = cms.Service("TFileService", fileName = cms.string('trackCorrections.root') ) # Path and EndPath definitions process.filter_step = cms.Path(process.ipf) process.digitisation_step = cms.Path(process.pdigi) process.L1simulation_step = cms.Path(process.SimL1Emulator) process.digi2raw_step = cms.Path(process.DigiToRaw) process.raw2digi_step = cms.Path(process.RawToDigi) process.L1Reco_step = cms.Path(process.L1Reco) process.reconstruction_step = cms.Path(process.reconstructionHeavyIons) process.pixelTracking_step = cms.Path(process.hiConformalPixelTracks*process.hiPixelOnlyStepSelector*process.hiHighPtStepSelector*process.hiGeneralAndPixelTracks) process.association_step = cms.Path(process.tpRecoAssocHiLowPtPixelTracks*process.tpRecoAssocHiGeneralTracks) process.ana_step = cms.Path(process.trkCorr_merged*process.trkCorr_general) #process.endjob_step = cms.EndPath(process.endOfProcess) #process.RECODEBUGoutput_step = cms.EndPath(process.RECODEBUGoutput) # Schedule definition #process.schedule = cms.Schedule(process.filter_step,process.digitisation_step,process.L1simulation_step,process.digi2raw_step) #process.schedule.extend(process.HLTSchedule) #process.schedule.extend([process.raw2digi_step,process.L1Reco_step,process.reconstruction_step,process.pixelTracking_step,process.association_step,process.endjob_step,process.RECODEBUGoutput_step]) process.schedule = cms.Schedule(process.filter_step,process.digitisation_step,process.L1simulation_step,process.digi2raw_step) #process.schedule.extend(process.HLTSchedule) #process.schedule.extend([process.raw2digi_step,process.reconstruction_step,process.pixelTracking_step,process.association_step,process.ana_step,process.endjob_step,process.RECODEBUGoutput_step]) process.schedule.extend([process.raw2digi_step,process.reconstruction_step,process.pixelTracking_step,process.association_step,process.ana_step]) # customisation of the process. # Automatic addition of the customisation function from HLTrigger.Configuration.customizeHLTforMC from HLTrigger.Configuration.customizeHLTforMC import customizeHLTforMC #call to customisation function customizeHLTforMC imported from HLTrigger.Configuration.customizeHLTforMC process = customizeHLTforMC(process) # End of customisation functions
mit
CYBAI/servo
components/script/dom/bindings/codegen/parser/tests/test_unenumerable_own_properties.py
15
1634
def WebIDLTest(parser, harness): parser.parse( """ interface Foo {}; [LegacyUnenumerableNamedProperties] interface Bar : Foo { getter long(DOMString name); }; interface Baz : Bar { getter long(DOMString name); }; """); results = parser.finish(); harness.check(len(results), 3, "Should have three interfaces") parser = parser.reset() threw = False try: parser.parse(""" [LegacyUnenumerableNamedProperties] interface NoNamedGetter { }; """) results = parser.finish() except Exception as x: threw = True harness.ok(threw, "Should have thrown.") parser = parser.reset() threw = False try: parser.parse(""" [LegacyUnenumerableNamedProperties=Foo] interface ShouldNotHaveArg { getter long(DOMString name); }; """) results = parser.finish() except Exception as x: threw = True harness.ok(threw, "Should have thrown.") parser = parser.reset() threw = False try: parser.parse(""" [LegacyUnenumerableNamedProperties] interface Foo { getter long(DOMString name); }; interface Bar : Foo {}; [LegacyUnenumerableNamedProperties] interface Baz : Bar { getter long(DOMString name); }; """) results = parser.finish() except Exception as x: threw = True harness.ok(threw, "Should have thrown.")
mpl-2.0
ProgVal/cjdns
node_build/dependencies/libuv/build/gyp/pylib/gyp/win_tool.py
395
12634
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Utility functions for Windows builds. These functions are executed via gyp-win-tool when using the ninja generator. """ import os import re import shutil import subprocess import stat import string import sys BASE_DIR = os.path.dirname(os.path.abspath(__file__)) # A regex matching an argument corresponding to the output filename passed to # link.exe. _LINK_EXE_OUT_ARG = re.compile('/OUT:(?P<out>.+)$', re.IGNORECASE) def main(args): executor = WinTool() exit_code = executor.Dispatch(args) if exit_code is not None: sys.exit(exit_code) class WinTool(object): """This class performs all the Windows tooling steps. The methods can either be executed directly, or dispatched from an argument list.""" def _UseSeparateMspdbsrv(self, env, args): """Allows to use a unique instance of mspdbsrv.exe per linker instead of a shared one.""" if len(args) < 1: raise Exception("Not enough arguments") if args[0] != 'link.exe': return # Use the output filename passed to the linker to generate an endpoint name # for mspdbsrv.exe. endpoint_name = None for arg in args: m = _LINK_EXE_OUT_ARG.match(arg) if m: endpoint_name = re.sub(r'\W+', '', '%s_%d' % (m.group('out'), os.getpid())) break if endpoint_name is None: return # Adds the appropriate environment variable. This will be read by link.exe # to know which instance of mspdbsrv.exe it should connect to (if it's # not set then the default endpoint is used). env['_MSPDBSRV_ENDPOINT_'] = endpoint_name def Dispatch(self, args): """Dispatches a string command to a method.""" if len(args) < 1: raise Exception("Not enough arguments") method = "Exec%s" % self._CommandifyName(args[0]) return getattr(self, method)(*args[1:]) def _CommandifyName(self, name_string): """Transforms a tool name like recursive-mirror to RecursiveMirror.""" return name_string.title().replace('-', '') def _GetEnv(self, arch): """Gets the saved environment from a file for a given architecture.""" # The environment is saved as an "environment block" (see CreateProcess # and msvs_emulation for details). We convert to a dict here. # Drop last 2 NULs, one for list terminator, one for trailing vs. separator. pairs = open(arch).read()[:-2].split('\0') kvs = [item.split('=', 1) for item in pairs] return dict(kvs) def ExecStamp(self, path): """Simple stamp command.""" open(path, 'w').close() def ExecRecursiveMirror(self, source, dest): """Emulation of rm -rf out && cp -af in out.""" if os.path.exists(dest): if os.path.isdir(dest): def _on_error(fn, path, excinfo): # The operation failed, possibly because the file is set to # read-only. If that's why, make it writable and try the op again. if not os.access(path, os.W_OK): os.chmod(path, stat.S_IWRITE) fn(path) shutil.rmtree(dest, onerror=_on_error) else: if not os.access(dest, os.W_OK): # Attempt to make the file writable before deleting it. os.chmod(dest, stat.S_IWRITE) os.unlink(dest) if os.path.isdir(source): shutil.copytree(source, dest) else: shutil.copy2(source, dest) def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args): """Filter diagnostic output from link that looks like: ' Creating library ui.dll.lib and object ui.dll.exp' This happens when there are exports from the dll or exe. """ env = self._GetEnv(arch) if use_separate_mspdbsrv == 'True': self._UseSeparateMspdbsrv(env, args) link = subprocess.Popen([args[0].replace('/', '\\')] + list(args[1:]), shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = link.communicate() for line in out.splitlines(): if not line.startswith(' Creating library '): print line return link.returncode def ExecLinkWithManifests(self, arch, embed_manifest, out, ldcmd, resname, mt, rc, intermediate_manifest, *manifests): """A wrapper for handling creating a manifest resource and then executing a link command.""" # The 'normal' way to do manifests is to have link generate a manifest # based on gathering dependencies from the object files, then merge that # manifest with other manifests supplied as sources, convert the merged # manifest to a resource, and then *relink*, including the compiled # version of the manifest resource. This breaks incremental linking, and # is generally overly complicated. Instead, we merge all the manifests # provided (along with one that includes what would normally be in the # linker-generated one, see msvs_emulation.py), and include that into the # first and only link. We still tell link to generate a manifest, but we # only use that to assert that our simpler process did not miss anything. variables = { 'python': sys.executable, 'arch': arch, 'out': out, 'ldcmd': ldcmd, 'resname': resname, 'mt': mt, 'rc': rc, 'intermediate_manifest': intermediate_manifest, 'manifests': ' '.join(manifests), } add_to_ld = '' if manifests: subprocess.check_call( '%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo ' '-manifest %(manifests)s -out:%(out)s.manifest' % variables) if embed_manifest == 'True': subprocess.check_call( '%(python)s gyp-win-tool manifest-to-rc %(arch)s %(out)s.manifest' ' %(out)s.manifest.rc %(resname)s' % variables) subprocess.check_call( '%(python)s gyp-win-tool rc-wrapper %(arch)s %(rc)s ' '%(out)s.manifest.rc' % variables) add_to_ld = ' %(out)s.manifest.res' % variables subprocess.check_call(ldcmd + add_to_ld) # Run mt.exe on the theoretically complete manifest we generated, merging # it with the one the linker generated to confirm that the linker # generated one does not add anything. This is strictly unnecessary for # correctness, it's only to verify that e.g. /MANIFESTDEPENDENCY was not # used in a #pragma comment. if manifests: # Merge the intermediate one with ours to .assert.manifest, then check # that .assert.manifest is identical to ours. subprocess.check_call( '%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo ' '-manifest %(out)s.manifest %(intermediate_manifest)s ' '-out:%(out)s.assert.manifest' % variables) assert_manifest = '%(out)s.assert.manifest' % variables our_manifest = '%(out)s.manifest' % variables # Load and normalize the manifests. mt.exe sometimes removes whitespace, # and sometimes doesn't unfortunately. with open(our_manifest, 'rb') as our_f: with open(assert_manifest, 'rb') as assert_f: our_data = our_f.read().translate(None, string.whitespace) assert_data = assert_f.read().translate(None, string.whitespace) if our_data != assert_data: os.unlink(out) def dump(filename): sys.stderr.write('%s\n-----\n' % filename) with open(filename, 'rb') as f: sys.stderr.write(f.read() + '\n-----\n') dump(intermediate_manifest) dump(our_manifest) dump(assert_manifest) sys.stderr.write( 'Linker generated manifest "%s" added to final manifest "%s" ' '(result in "%s"). ' 'Were /MANIFEST switches used in #pragma statements? ' % ( intermediate_manifest, our_manifest, assert_manifest)) return 1 def ExecManifestWrapper(self, arch, *args): """Run manifest tool with environment set. Strip out undesirable warning (some XML blocks are recognized by the OS loader, but not the manifest tool).""" env = self._GetEnv(arch) popen = subprocess.Popen(args, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = popen.communicate() for line in out.splitlines(): if line and 'manifest authoring warning 81010002' not in line: print line return popen.returncode def ExecManifestToRc(self, arch, *args): """Creates a resource file pointing a SxS assembly manifest. |args| is tuple containing path to resource file, path to manifest file and resource name which can be "1" (for executables) or "2" (for DLLs).""" manifest_path, resource_path, resource_name = args with open(resource_path, 'wb') as output: output.write('#include <windows.h>\n%s RT_MANIFEST "%s"' % ( resource_name, os.path.abspath(manifest_path).replace('\\', '/'))) def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl, *flags): """Filter noisy filenames output from MIDL compile step that isn't quietable via command line flags. """ args = ['midl', '/nologo'] + list(flags) + [ '/out', outdir, '/tlb', tlb, '/h', h, '/dlldata', dlldata, '/iid', iid, '/proxy', proxy, idl] env = self._GetEnv(arch) popen = subprocess.Popen(args, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = popen.communicate() # Filter junk out of stdout, and write filtered versions. Output we want # to filter is pairs of lines that look like this: # Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl # objidl.idl lines = out.splitlines() prefixes = ('Processing ', '64 bit Processing ') processing = set(os.path.basename(x) for x in lines if x.startswith(prefixes)) for line in lines: if not line.startswith(prefixes) and line not in processing: print line return popen.returncode def ExecAsmWrapper(self, arch, *args): """Filter logo banner from invocations of asm.exe.""" env = self._GetEnv(arch) popen = subprocess.Popen(args, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = popen.communicate() for line in out.splitlines(): if (not line.startswith('Copyright (C) Microsoft Corporation') and not line.startswith('Microsoft (R) Macro Assembler') and not line.startswith(' Assembling: ') and line): print line return popen.returncode def ExecRcWrapper(self, arch, *args): """Filter logo banner from invocations of rc.exe. Older versions of RC don't support the /nologo flag.""" env = self._GetEnv(arch) popen = subprocess.Popen(args, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = popen.communicate() for line in out.splitlines(): if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and not line.startswith('Copyright (C) Microsoft Corporation') and line): print line return popen.returncode def ExecActionWrapper(self, arch, rspfile, *dir): """Runs an action command line from a response file using the environment for |arch|. If |dir| is supplied, use that as the working directory.""" env = self._GetEnv(arch) # TODO(scottmg): This is a temporary hack to get some specific variables # through to actions that are set after gyp-time. http://crbug.com/333738. for k, v in os.environ.iteritems(): if k not in env: env[k] = v args = open(rspfile).read() dir = dir[0] if dir else None return subprocess.call(args, shell=True, env=env, cwd=dir) def ExecClCompile(self, project_dir, selected_files): """Executed by msvs-ninja projects when the 'ClCompile' target is used to build selected C/C++ files.""" project_dir = os.path.relpath(project_dir, BASE_DIR) selected_files = selected_files.split(';') ninja_targets = [os.path.join(project_dir, filename) + '^^' for filename in selected_files] cmd = ['ninja.exe'] cmd.extend(ninja_targets) return subprocess.call(cmd, shell=True, cwd=BASE_DIR) if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
gpl-3.0
harterj/moose
python/chigger/tests/clipping/clip_change.py
12
1086
#!/usr/bin/env python3 #pylint: disable=missing-docstring #* This file is part of the MOOSE framework #* https://www.mooseframework.org #* #* All rights reserved, see COPYRIGHT for full restrictions #* https://github.com/idaholab/moose/blob/master/COPYRIGHT #* #* Licensed under LGPL 2.1, please see LICENSE for details #* https://www.gnu.org/licenses/lgpl-2.1.html import vtk import chigger camera = vtk.vtkCamera() camera.SetViewUp(0.2603, 0.5500, 0.7936) camera.SetPosition(-12.3985, 5.2867, 0.8286) camera.SetFocalPoint(0.2326, 0.0324, 0.3278) clip = chigger.filters.PlaneClipper(normal=[1,1,1], normalized=False) reader = chigger.exodus.ExodusReader('../input/mug_blocks_out.e') mug = chigger.exodus.ExodusResult(reader, cmap='viridis', variable='diffused', camera=camera, range=[0,2], filters=[clip]) # Create the window window = chigger.RenderWindow(mug, size=[300,300], test=True) # Render the results and write a file steps = [-1, 0, 1] for i in range(len(steps)): clip.setOptions(origin=[steps[i]]*3) window.write('clip_change' + str(i) + '.png') window.start()
lgpl-2.1
dydek/django
django/db/backends/base/schema.py
339
43421
import hashlib import logging from django.db.backends.utils import truncate_name from django.db.transaction import atomic from django.utils import six from django.utils.encoding import force_bytes logger = logging.getLogger('django.db.backends.schema') def _related_non_m2m_objects(old_field, new_field): # Filters out m2m objects from reverse relations. # Returns (old_relation, new_relation) tuples. return zip( (obj for obj in old_field.model._meta.related_objects if not obj.field.many_to_many), (obj for obj in new_field.model._meta.related_objects if not obj.field.many_to_many) ) class BaseDatabaseSchemaEditor(object): """ This class (and its subclasses) are responsible for emitting schema-changing statements to the databases - model creation/removal/alteration, field renaming, index fiddling, and so on. It is intended to eventually completely replace DatabaseCreation. This class should be used by creating an instance for each set of schema changes (e.g. a migration file), and by first calling start(), then the relevant actions, and then commit(). This is necessary to allow things like circular foreign key references - FKs will only be created once commit() is called. """ # Overrideable SQL templates sql_create_table = "CREATE TABLE %(table)s (%(definition)s)" sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s" sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s" sql_delete_table = "DROP TABLE %(table)s CASCADE" sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s" sql_alter_column = "ALTER TABLE %(table)s %(changes)s" sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s" sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL" sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL" sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s" sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT" sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE" sql_rename_column = "ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s" sql_update_with_default = "UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL" sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)" sql_delete_check = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" sql_create_unique = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s UNIQUE (%(columns)s)" sql_delete_unique = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" sql_create_fk = ( "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) " "REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED" ) sql_create_inline_fk = None sql_delete_fk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s" sql_delete_index = "DROP INDEX %(name)s" sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)" sql_delete_pk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" def __init__(self, connection, collect_sql=False): self.connection = connection self.collect_sql = collect_sql if self.collect_sql: self.collected_sql = [] # State-managing methods def __enter__(self): self.deferred_sql = [] if self.connection.features.can_rollback_ddl: self.atomic = atomic(self.connection.alias) self.atomic.__enter__() return self def __exit__(self, exc_type, exc_value, traceback): if exc_type is None: for sql in self.deferred_sql: self.execute(sql) if self.connection.features.can_rollback_ddl: self.atomic.__exit__(exc_type, exc_value, traceback) # Core utility functions def execute(self, sql, params=[]): """ Executes the given SQL statement, with optional parameters. """ # Log the command we're running, then run it logger.debug("%s; (params %r)" % (sql, params)) if self.collect_sql: ending = "" if sql.endswith(";") else ";" if params is not None: self.collected_sql.append((sql % tuple(map(self.quote_value, params))) + ending) else: self.collected_sql.append(sql + ending) else: with self.connection.cursor() as cursor: cursor.execute(sql, params) def quote_name(self, name): return self.connection.ops.quote_name(name) @classmethod def _digest(cls, *args): """ Generates a 32-bit digest of a set of arguments that can be used to shorten identifying names. """ h = hashlib.md5() for arg in args: h.update(force_bytes(arg)) return h.hexdigest()[:8] # Field <-> database mapping functions def column_sql(self, model, field, include_default=False): """ Takes a field and returns its column definition. The field must already have had set_attributes_from_name called. """ # Get the column's type and use that as the basis of the SQL db_params = field.db_parameters(connection=self.connection) sql = db_params['type'] params = [] # Check for fields that aren't actually columns (e.g. M2M) if sql is None: return None, None # Work out nullability null = field.null # If we were told to include a default value, do so include_default = include_default and not self.skip_default(field) if include_default: default_value = self.effective_default(field) if default_value is not None: if self.connection.features.requires_literal_defaults: # Some databases can't take defaults as a parameter (oracle) # If this is the case, the individual schema backend should # implement prepare_default sql += " DEFAULT %s" % self.prepare_default(default_value) else: sql += " DEFAULT %s" params += [default_value] # Oracle treats the empty string ('') as null, so coerce the null # option whenever '' is a possible value. if (field.empty_strings_allowed and not field.primary_key and self.connection.features.interprets_empty_strings_as_nulls): null = True if null and not self.connection.features.implied_column_null: sql += " NULL" elif not null: sql += " NOT NULL" # Primary key/unique outputs if field.primary_key: sql += " PRIMARY KEY" elif field.unique: sql += " UNIQUE" # Optionally add the tablespace if it's an implicitly indexed column tablespace = field.db_tablespace or model._meta.db_tablespace if tablespace and self.connection.features.supports_tablespaces and field.unique: sql += " %s" % self.connection.ops.tablespace_sql(tablespace, inline=True) # Return the sql return sql, params def skip_default(self, field): """ Some backends don't accept default values for certain columns types (i.e. MySQL longtext and longblob). """ return False def prepare_default(self, value): """ Only used for backends which have requires_literal_defaults feature """ raise NotImplementedError( 'subclasses of BaseDatabaseSchemaEditor for backends which have ' 'requires_literal_defaults must provide a prepare_default() method' ) def effective_default(self, field): """ Returns a field's effective database default value """ if field.has_default(): default = field.get_default() elif not field.null and field.blank and field.empty_strings_allowed: if field.get_internal_type() == "BinaryField": default = six.binary_type() else: default = six.text_type() else: default = None # If it's a callable, call it if six.callable(default): default = default() # Run it through the field's get_db_prep_save method so we can send it # to the database. default = field.get_db_prep_save(default, self.connection) return default def quote_value(self, value): """ Returns a quoted version of the value so it's safe to use in an SQL string. This is not safe against injection from user code; it is intended only for use in making SQL scripts or preparing default values for particularly tricky backends (defaults are not user-defined, though, so this is safe). """ raise NotImplementedError() # Actions def create_model(self, model): """ Takes a model and creates a table for it in the database. Will also create any accompanying indexes or unique constraints. """ # Create column SQL, add FK deferreds if needed column_sqls = [] params = [] for field in model._meta.local_fields: # SQL definition, extra_params = self.column_sql(model, field) if definition is None: continue # Check constraints can go on the column SQL here db_params = field.db_parameters(connection=self.connection) if db_params['check']: definition += " CHECK (%s)" % db_params['check'] # Autoincrement SQL (for backends with inline variant) col_type_suffix = field.db_type_suffix(connection=self.connection) if col_type_suffix: definition += " %s" % col_type_suffix params.extend(extra_params) # FK if field.remote_field and field.db_constraint: to_table = field.remote_field.model._meta.db_table to_column = field.remote_field.model._meta.get_field(field.remote_field.field_name).column if self.connection.features.supports_foreign_keys: self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s")) elif self.sql_create_inline_fk: definition += " " + self.sql_create_inline_fk % { "to_table": self.quote_name(to_table), "to_column": self.quote_name(to_column), } # Add the SQL to our big list column_sqls.append("%s %s" % ( self.quote_name(field.column), definition, )) # Autoincrement SQL (for backends with post table definition variant) if field.get_internal_type() == "AutoField": autoinc_sql = self.connection.ops.autoinc_sql(model._meta.db_table, field.column) if autoinc_sql: self.deferred_sql.extend(autoinc_sql) # Add any unique_togethers (always deferred, as some fields might be # created afterwards, like geometry fields with some backends) for fields in model._meta.unique_together: columns = [model._meta.get_field(field).column for field in fields] self.deferred_sql.append(self._create_unique_sql(model, columns)) # Make the table sql = self.sql_create_table % { "table": self.quote_name(model._meta.db_table), "definition": ", ".join(column_sqls) } if model._meta.db_tablespace: tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace) if tablespace_sql: sql += ' ' + tablespace_sql # Prevent using [] as params, in the case a literal '%' is used in the definition self.execute(sql, params or None) # Add any field index and index_together's (deferred as SQLite3 _remake_table needs it) self.deferred_sql.extend(self._model_indexes_sql(model)) # Make M2M tables for field in model._meta.local_many_to_many: if field.remote_field.through._meta.auto_created: self.create_model(field.remote_field.through) def delete_model(self, model): """ Deletes a model from the database. """ # Handle auto-created intermediary models for field in model._meta.local_many_to_many: if field.remote_field.through._meta.auto_created: self.delete_model(field.remote_field.through) # Delete the table self.execute(self.sql_delete_table % { "table": self.quote_name(model._meta.db_table), }) def alter_unique_together(self, model, old_unique_together, new_unique_together): """ Deals with a model changing its unique_together. Note: The input unique_togethers must be doubly-nested, not the single- nested ["foo", "bar"] format. """ olds = set(tuple(fields) for fields in old_unique_together) news = set(tuple(fields) for fields in new_unique_together) # Deleted uniques for fields in olds.difference(news): self._delete_composed_index(model, fields, {'unique': True}, self.sql_delete_unique) # Created uniques for fields in news.difference(olds): columns = [model._meta.get_field(field).column for field in fields] self.execute(self._create_unique_sql(model, columns)) def alter_index_together(self, model, old_index_together, new_index_together): """ Deals with a model changing its index_together. Note: The input index_togethers must be doubly-nested, not the single- nested ["foo", "bar"] format. """ olds = set(tuple(fields) for fields in old_index_together) news = set(tuple(fields) for fields in new_index_together) # Deleted indexes for fields in olds.difference(news): self._delete_composed_index(model, fields, {'index': True}, self.sql_delete_index) # Created indexes for field_names in news.difference(olds): fields = [model._meta.get_field(field) for field in field_names] self.execute(self._create_index_sql(model, fields, suffix="_idx")) def _delete_composed_index(self, model, fields, constraint_kwargs, sql): columns = [model._meta.get_field(field).column for field in fields] constraint_names = self._constraint_names(model, columns, **constraint_kwargs) if len(constraint_names) != 1: raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % ( len(constraint_names), model._meta.db_table, ", ".join(columns), )) self.execute(self._delete_constraint_sql(sql, model, constraint_names[0])) def alter_db_table(self, model, old_db_table, new_db_table): """ Renames the table a model points to. """ if old_db_table == new_db_table: return self.execute(self.sql_rename_table % { "old_table": self.quote_name(old_db_table), "new_table": self.quote_name(new_db_table), }) def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace): """ Moves a model's table between tablespaces """ self.execute(self.sql_retablespace_table % { "table": self.quote_name(model._meta.db_table), "old_tablespace": self.quote_name(old_db_tablespace), "new_tablespace": self.quote_name(new_db_tablespace), }) def add_field(self, model, field): """ Creates a field on a model. Usually involves adding a column, but may involve adding a table instead (for M2M fields) """ # Special-case implicit M2M tables if field.many_to_many and field.remote_field.through._meta.auto_created: return self.create_model(field.remote_field.through) # Get the column's definition definition, params = self.column_sql(model, field, include_default=True) # It might not actually have a column behind it if definition is None: return # Check constraints can go on the column SQL here db_params = field.db_parameters(connection=self.connection) if db_params['check']: definition += " CHECK (%s)" % db_params['check'] # Build the SQL and run it sql = self.sql_create_column % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(field.column), "definition": definition, } self.execute(sql, params) # Drop the default if we need to # (Django usually does not use in-database defaults) if not self.skip_default(field) and field.default is not None: sql = self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": self.sql_alter_column_no_default % { "column": self.quote_name(field.column), } } self.execute(sql) # Add an index, if required if field.db_index and not field.unique: self.deferred_sql.append(self._create_index_sql(model, [field])) # Add any FK constraints later if field.remote_field and self.connection.features.supports_foreign_keys and field.db_constraint: self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s")) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() def remove_field(self, model, field): """ Removes a field from a model. Usually involves deleting a column, but for M2Ms may involve deleting a table. """ # Special-case implicit M2M tables if field.many_to_many and field.remote_field.through._meta.auto_created: return self.delete_model(field.remote_field.through) # It might not actually have a column behind it if field.db_parameters(connection=self.connection)['type'] is None: return # Drop any FK constraints, MySQL requires explicit deletion if field.remote_field: fk_names = self._constraint_names(model, [field.column], foreign_key=True) for fk_name in fk_names: self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name)) # Delete the column sql = self.sql_delete_column % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(field.column), } self.execute(sql) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() def alter_field(self, model, old_field, new_field, strict=False): """ Allows a field's type, uniqueness, nullability, default, column, constraints etc. to be modified. Requires a copy of the old field as well so we can only perform changes that are required. If strict is true, raises errors if the old column does not match old_field precisely. """ # Ensure this field is even column-based old_db_params = old_field.db_parameters(connection=self.connection) old_type = old_db_params['type'] new_db_params = new_field.db_parameters(connection=self.connection) new_type = new_db_params['type'] if ((old_type is None and old_field.remote_field is None) or (new_type is None and new_field.remote_field is None)): raise ValueError( "Cannot alter field %s into %s - they do not properly define " "db_type (are you using a badly-written custom field?)" % (old_field, new_field), ) elif old_type is None and new_type is None and ( old_field.remote_field.through and new_field.remote_field.through and old_field.remote_field.through._meta.auto_created and new_field.remote_field.through._meta.auto_created): return self._alter_many_to_many(model, old_field, new_field, strict) elif old_type is None and new_type is None and ( old_field.remote_field.through and new_field.remote_field.through and not old_field.remote_field.through._meta.auto_created and not new_field.remote_field.through._meta.auto_created): # Both sides have through models; this is a no-op. return elif old_type is None or new_type is None: raise ValueError( "Cannot alter field %s into %s - they are not compatible types " "(you cannot alter to or from M2M fields, or add or remove " "through= on M2M fields)" % (old_field, new_field) ) self._alter_field(model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict) def _alter_field(self, model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict=False): """Actually perform a "physical" (non-ManyToMany) field update.""" # Drop any FK constraints, we'll remake them later fks_dropped = set() if old_field.remote_field and old_field.db_constraint: fk_names = self._constraint_names(model, [old_field.column], foreign_key=True) if strict and len(fk_names) != 1: raise ValueError("Found wrong number (%s) of foreign key constraints for %s.%s" % ( len(fk_names), model._meta.db_table, old_field.column, )) for fk_name in fk_names: fks_dropped.add((old_field.column,)) self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name)) # Has unique been removed? if old_field.unique and (not new_field.unique or (not old_field.primary_key and new_field.primary_key)): # Find the unique constraint for this field constraint_names = self._constraint_names(model, [old_field.column], unique=True) if strict and len(constraint_names) != 1: raise ValueError("Found wrong number (%s) of unique constraints for %s.%s" % ( len(constraint_names), model._meta.db_table, old_field.column, )) for constraint_name in constraint_names: self.execute(self._delete_constraint_sql(self.sql_delete_unique, model, constraint_name)) # Drop incoming FK constraints if we're a primary key and things are going # to change. if old_field.primary_key and new_field.primary_key and old_type != new_type: # '_meta.related_field' also contains M2M reverse fields, these # will be filtered out for _old_rel, new_rel in _related_non_m2m_objects(old_field, new_field): rel_fk_names = self._constraint_names( new_rel.related_model, [new_rel.field.column], foreign_key=True ) for fk_name in rel_fk_names: self.execute(self._delete_constraint_sql(self.sql_delete_fk, new_rel.related_model, fk_name)) # Removed an index? (no strict check, as multiple indexes are possible) if (old_field.db_index and not new_field.db_index and not old_field.unique and not (not new_field.unique and old_field.unique)): # Find the index for this field index_names = self._constraint_names(model, [old_field.column], index=True) for index_name in index_names: self.execute(self._delete_constraint_sql(self.sql_delete_index, model, index_name)) # Change check constraints? if old_db_params['check'] != new_db_params['check'] and old_db_params['check']: constraint_names = self._constraint_names(model, [old_field.column], check=True) if strict and len(constraint_names) != 1: raise ValueError("Found wrong number (%s) of check constraints for %s.%s" % ( len(constraint_names), model._meta.db_table, old_field.column, )) for constraint_name in constraint_names: self.execute(self._delete_constraint_sql(self.sql_delete_check, model, constraint_name)) # Have they renamed the column? if old_field.column != new_field.column: self.execute(self._rename_field_sql(model._meta.db_table, old_field, new_field, new_type)) # Next, start accumulating actions to do actions = [] null_actions = [] post_actions = [] # Type change? if old_type != new_type: fragment, other_actions = self._alter_column_type_sql( model._meta.db_table, old_field, new_field, new_type ) actions.append(fragment) post_actions.extend(other_actions) # When changing a column NULL constraint to NOT NULL with a given # default value, we need to perform 4 steps: # 1. Add a default for new incoming writes # 2. Update existing NULL rows with new default # 3. Replace NULL constraint with NOT NULL # 4. Drop the default again. # Default change? old_default = self.effective_default(old_field) new_default = self.effective_default(new_field) needs_database_default = ( old_default != new_default and new_default is not None and not self.skip_default(new_field) ) if needs_database_default: if self.connection.features.requires_literal_defaults: # Some databases can't take defaults as a parameter (oracle) # If this is the case, the individual schema backend should # implement prepare_default actions.append(( self.sql_alter_column_default % { "column": self.quote_name(new_field.column), "default": self.prepare_default(new_default), }, [], )) else: actions.append(( self.sql_alter_column_default % { "column": self.quote_name(new_field.column), "default": "%s", }, [new_default], )) # Nullability change? if old_field.null != new_field.null: if (self.connection.features.interprets_empty_strings_as_nulls and new_field.get_internal_type() in ("CharField", "TextField")): # The field is nullable in the database anyway, leave it alone pass elif new_field.null: null_actions.append(( self.sql_alter_column_null % { "column": self.quote_name(new_field.column), "type": new_type, }, [], )) else: null_actions.append(( self.sql_alter_column_not_null % { "column": self.quote_name(new_field.column), "type": new_type, }, [], )) # Only if we have a default and there is a change from NULL to NOT NULL four_way_default_alteration = ( new_field.has_default() and (old_field.null and not new_field.null) ) if actions or null_actions: if not four_way_default_alteration: # If we don't have to do a 4-way default alteration we can # directly run a (NOT) NULL alteration actions = actions + null_actions # Combine actions together if we can (e.g. postgres) if self.connection.features.supports_combined_alters and actions: sql, params = tuple(zip(*actions)) actions = [(", ".join(sql), sum(params, []))] # Apply those actions for sql, params in actions: self.execute( self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": sql, }, params, ) if four_way_default_alteration: # Update existing rows with default value self.execute( self.sql_update_with_default % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(new_field.column), "default": "%s", }, [new_default], ) # Since we didn't run a NOT NULL change before we need to do it # now for sql, params in null_actions: self.execute( self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": sql, }, params, ) if post_actions: for sql, params in post_actions: self.execute(sql, params) # Added a unique? if (not old_field.unique and new_field.unique) or ( old_field.primary_key and not new_field.primary_key and new_field.unique ): self.execute(self._create_unique_sql(model, [new_field.column])) # Added an index? if (not old_field.db_index and new_field.db_index and not new_field.unique and not (not old_field.unique and new_field.unique)): self.execute(self._create_index_sql(model, [new_field], suffix="_uniq")) # Type alteration on primary key? Then we need to alter the column # referring to us. rels_to_update = [] if old_field.primary_key and new_field.primary_key and old_type != new_type: rels_to_update.extend(_related_non_m2m_objects(old_field, new_field)) # Changed to become primary key? # Note that we don't detect unsetting of a PK, as we assume another field # will always come along and replace it. if not old_field.primary_key and new_field.primary_key: # First, drop the old PK constraint_names = self._constraint_names(model, primary_key=True) if strict and len(constraint_names) != 1: raise ValueError("Found wrong number (%s) of PK constraints for %s" % ( len(constraint_names), model._meta.db_table, )) for constraint_name in constraint_names: self.execute(self._delete_constraint_sql(self.sql_delete_pk, model, constraint_name)) # Make the new one self.execute( self.sql_create_pk % { "table": self.quote_name(model._meta.db_table), "name": self.quote_name(self._create_index_name(model, [new_field.column], suffix="_pk")), "columns": self.quote_name(new_field.column), } ) # Update all referencing columns rels_to_update.extend(_related_non_m2m_objects(old_field, new_field)) # Handle our type alters on the other end of rels from the PK stuff above for old_rel, new_rel in rels_to_update: rel_db_params = new_rel.field.db_parameters(connection=self.connection) rel_type = rel_db_params['type'] fragment, other_actions = self._alter_column_type_sql( new_rel.related_model._meta.db_table, old_rel.field, new_rel.field, rel_type ) self.execute( self.sql_alter_column % { "table": self.quote_name(new_rel.related_model._meta.db_table), "changes": fragment[0], }, fragment[1], ) for sql, params in other_actions: self.execute(sql, params) # Does it have a foreign key? if (new_field.remote_field and (fks_dropped or not old_field.remote_field or not old_field.db_constraint) and new_field.db_constraint): self.execute(self._create_fk_sql(model, new_field, "_fk_%(to_table)s_%(to_column)s")) # Rebuild FKs that pointed to us if we previously had to drop them if old_field.primary_key and new_field.primary_key and old_type != new_type: for rel in new_field.model._meta.related_objects: if not rel.many_to_many: self.execute(self._create_fk_sql(rel.related_model, rel.field, "_fk")) # Does it have check constraints we need to add? if old_db_params['check'] != new_db_params['check'] and new_db_params['check']: self.execute( self.sql_create_check % { "table": self.quote_name(model._meta.db_table), "name": self.quote_name(self._create_index_name(model, [new_field.column], suffix="_check")), "column": self.quote_name(new_field.column), "check": new_db_params['check'], } ) # Drop the default if we need to # (Django usually does not use in-database defaults) if needs_database_default: sql = self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": self.sql_alter_column_no_default % { "column": self.quote_name(new_field.column), } } self.execute(sql) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() def _alter_column_type_sql(self, table, old_field, new_field, new_type): """ Hook to specialize column type alteration for different backends, for cases when a creation type is different to an alteration type (e.g. SERIAL in PostgreSQL, PostGIS fields). Should return two things; an SQL fragment of (sql, params) to insert into an ALTER TABLE statement, and a list of extra (sql, params) tuples to run once the field is altered. """ return ( ( self.sql_alter_column_type % { "column": self.quote_name(new_field.column), "type": new_type, }, [], ), [], ) def _alter_many_to_many(self, model, old_field, new_field, strict): """ Alters M2Ms to repoint their to= endpoints. """ # Rename the through table if old_field.remote_field.through._meta.db_table != new_field.remote_field.through._meta.db_table: self.alter_db_table(old_field.remote_field.through, old_field.remote_field.through._meta.db_table, new_field.remote_field.through._meta.db_table) # Repoint the FK to the other side self.alter_field( new_field.remote_field.through, # We need the field that points to the target model, so we can tell alter_field to change it - # this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model) old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()), new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()), ) self.alter_field( new_field.remote_field.through, # for self-referential models we need to alter field from the other end too old_field.remote_field.through._meta.get_field(old_field.m2m_field_name()), new_field.remote_field.through._meta.get_field(new_field.m2m_field_name()), ) def _create_index_name(self, model, column_names, suffix=""): """ Generates a unique name for an index/unique constraint. """ # If there is just one column in the index, use a default algorithm from Django if len(column_names) == 1 and not suffix: return truncate_name( '%s_%s' % (model._meta.db_table, self._digest(column_names[0])), self.connection.ops.max_name_length() ) # Else generate the name for the index using a different algorithm table_name = model._meta.db_table.replace('"', '').replace('.', '_') index_unique_name = '_%s' % self._digest(table_name, *column_names) max_length = self.connection.ops.max_name_length() or 200 # If the index name is too long, truncate it index_name = ('%s_%s%s%s' % ( table_name, column_names[0], index_unique_name, suffix, )).replace('"', '').replace('.', '_') if len(index_name) > max_length: part = ('_%s%s%s' % (column_names[0], index_unique_name, suffix)) index_name = '%s%s' % (table_name[:(max_length - len(part))], part) # It shouldn't start with an underscore (Oracle hates this) if index_name[0] == "_": index_name = index_name[1:] # If it's STILL too long, just hash it down if len(index_name) > max_length: index_name = hashlib.md5(force_bytes(index_name)).hexdigest()[:max_length] # It can't start with a number on Oracle, so prepend D if we need to if index_name[0].isdigit(): index_name = "D%s" % index_name[:-1] return index_name def _create_index_sql(self, model, fields, suffix="", sql=None): """ Return the SQL statement to create the index for one or several fields. `sql` can be specified if the syntax differs from the standard (GIS indexes, ...). """ if len(fields) == 1 and fields[0].db_tablespace: tablespace_sql = self.connection.ops.tablespace_sql(fields[0].db_tablespace) elif model._meta.db_tablespace: tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace) else: tablespace_sql = "" if tablespace_sql: tablespace_sql = " " + tablespace_sql columns = [field.column for field in fields] sql_create_index = sql or self.sql_create_index return sql_create_index % { "table": self.quote_name(model._meta.db_table), "name": self.quote_name(self._create_index_name(model, columns, suffix=suffix)), "columns": ", ".join(self.quote_name(column) for column in columns), "extra": tablespace_sql, } def _model_indexes_sql(self, model): """ Return all index SQL statements (field indexes, index_together) for the specified model, as a list. """ if not model._meta.managed or model._meta.proxy or model._meta.swapped: return [] output = [] for field in model._meta.local_fields: if field.db_index and not field.unique: output.append(self._create_index_sql(model, [field], suffix="")) for field_names in model._meta.index_together: fields = [model._meta.get_field(field) for field in field_names] output.append(self._create_index_sql(model, fields, suffix="_idx")) return output def _rename_field_sql(self, table, old_field, new_field, new_type): return self.sql_rename_column % { "table": self.quote_name(table), "old_column": self.quote_name(old_field.column), "new_column": self.quote_name(new_field.column), "type": new_type, } def _create_fk_sql(self, model, field, suffix): from_table = model._meta.db_table from_column = field.column to_table = field.target_field.model._meta.db_table to_column = field.target_field.column suffix = suffix % { "to_table": to_table, "to_column": to_column, } return self.sql_create_fk % { "table": self.quote_name(from_table), "name": self.quote_name(self._create_index_name(model, [from_column], suffix=suffix)), "column": self.quote_name(from_column), "to_table": self.quote_name(to_table), "to_column": self.quote_name(to_column), } def _create_unique_sql(self, model, columns): return self.sql_create_unique % { "table": self.quote_name(model._meta.db_table), "name": self.quote_name(self._create_index_name(model, columns, suffix="_uniq")), "columns": ", ".join(self.quote_name(column) for column in columns), } def _delete_constraint_sql(self, template, model, name): return template % { "table": self.quote_name(model._meta.db_table), "name": self.quote_name(name), } def _constraint_names(self, model, column_names=None, unique=None, primary_key=None, index=None, foreign_key=None, check=None): """ Returns all constraint names matching the columns and conditions """ column_names = list(column_names) if column_names else None with self.connection.cursor() as cursor: constraints = self.connection.introspection.get_constraints(cursor, model._meta.db_table) result = [] for name, infodict in constraints.items(): if column_names is None or column_names == infodict['columns']: if unique is not None and infodict['unique'] != unique: continue if primary_key is not None and infodict['primary_key'] != primary_key: continue if index is not None and infodict['index'] != index: continue if check is not None and infodict['check'] != check: continue if foreign_key is not None and not infodict['foreign_key']: continue result.append(name) return result
bsd-3-clause
nicolas-petit/clouder
clouder_website_payment/clouder_website_payment.py
1
9393
# -*- coding: utf-8 -*- ############################################################################## # # Author: Yannick Buron, Nicolas Petit # Copyright 2015, TODAY Clouder SASU # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License with Attribution # clause as published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License with # Attribution clause along with this program. If not, see # <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models, fields, api, http, _ from openerp.exceptions import except_orm import openerp import threading import logging _logger = logging.getLogger(__name__) class ClouderApplication(models.Model): """ Checks that the web create type has pricegrids """ _inherit = 'clouder.application' @api.one @api.constrains('pricegrid_ids', 'web_create_type') def _check_create_type_pricegrids(self): if self.web_create_type != 'disabled' and not self.pricegrid_ids: raise except_orm( _('Application error!'), _("You cannot define a web creation type without defining price grids.") ) @api.multi def create_instance_from_request(self, session_id): """ Overwrite instance creation to set session status """ created_id = False try: created_id = super(ClouderApplication, self).create_instance_from_request(session_id) except Exception as e: def thread_session_update_state(dbname, uid, context): # Creating a separate cursor to commit errors in case of exception thrown with openerp.api.Environment.manage(): with openerp.registry(dbname).cursor() as new_cr: new_env = api.Environment(new_cr, uid, context) orm_clws = new_env['clouder.web.session'] session = orm_clws.browse([session_id])[0] session.state = 'error' # Commit the change we just made new_env.cr.commit() # Return to avoid getting back to the other instructions after this thread return # Making a thread to avoid having a deadlock when updating the session sess_update = threading.Thread( target=thread_session_update_state, args=(self.env.cr.dbname, self.env.uid, self.env.context) ) sess_update.start() # Throw the error to finish the process and have it display in logs/screen raise e # Checking the results state = 'error' # If the session was created successfully: change state to done if created_id: state = 'done' # Update the session session = self.env['clouder.web.session'].browse([session_id])[0] session.state = state return created_id class ClouderWebSession(models.Model): """ Adds invoice reference to the new instance session """ _inherit = 'clouder.web.session' amount = fields.Float('Amount to pay') reference = fields.Char('Invoice Reference', required=False) state = fields.Selection( [ ('started', 'Started'), ('pending', 'Pending'), ('canceled', 'Cancelled'), ('payment_processed', 'Payment Processed'), ('error', 'Error'), ('done', 'Done') ], 'State', default='started' ) invoice_id = fields.Many2one('account.invoice', 'Invoice', required=False) @api.model def launch_update_with_invoice(self): """ Search for sessions that have been paid and launch invoice creation """ sessions = self.search([ ('state', '=', 'payment_processed'), ('invoice_id', '=', False), ('amount', '!=', False) ]) # No session meets the criteria: do nothing if not sessions: return orm_trans = self.env['payment.transaction'] # Make an empty recordset sessions_to_update = sessions[0] sessions_to_update = sessions_to_update - sessions[0] for session in sessions: transac = orm_trans.search([('reference', '=', session.reference)])[0] # Add to the sessions to update is the transaction has been completed if transac.state == 'done': sessions_to_update = sessions_to_update + session # Launch invoice creation sessions_to_update.make_invoice() @api.multi def make_invoice(self): """ Creates invoice and links it to the session """ orm_inv = self.env['account.invoice'] for session in self: # Check that the function isn't called with unsuitable sessions if session.state != 'payment_processed' or session.invoice_id: raise except_orm( _('Clouder Web Session error!'), _("You cannot launch invoice creation when a session is not process or already has an invoice") ) # Creating invoice inv_desc = "{0} {1}".format( session.application_id.invoicing_product_id.description_sale, session.name ) invoice_data = { 'amount': session.amount, 'partner_id': session.partner_id.id, 'account_id': session.partner_id.property_account_receivable.id, 'product_id': session.application_id.invoicing_product_id.id, 'name': inv_desc, 'origin': session.reference } invoice_id = orm_inv.clouder_make_invoice(invoice_data) invoice = orm_inv.browse([invoice_id])[0] session.write({'invoice_id': invoice.id}) # Validating invoice to create reference number invoice.signal_workflow('invoice_open') @api.model def create_instances(self): """ Creates an instance for suitable sessions """ # Search for sessions that generated an invoice (payment is "done") sessions = self.search([ ('invoice_id', '!=', False), ('state', '=', 'payment_processed') ]) # No session meets the criteria: do nothing if not sessions: return # Launch instance creation orm_app = self.env['clouder.application'] for session in sessions: orm_app.create_instance_from_request(session.id) @property def should_unlink(self): """ Returns true if the session should be pruned from the database """ d_from_str = fields.Datetime.from_string last_access_days = (d_from_str(fields.Datetime.now()) - d_from_str(self.write_date)).days if self.state == 'started' and last_access_days > 9: return True elif self.state == 'cancelled' and last_access_days > 2: return True elif self.state == 'done': return True return False @api.model def prune_records(self): """ Prune records that are marked as such Should not be called from a recordset! """ for record in self.search([]): if record.should_unlink: record.unlink() class PaymentTransaction(models.Model): """ Override payment form to allow """ _inherit = 'payment.transaction' def form_feedback(self, cr, uid, data, acquirer_name, context=None): # Process payment result = super(PaymentTransaction, self).form_feedback(cr, uid, data, acquirer_name, context=context) # Since this is an old-api definition we need to make the new environment ourselves env = api.Environment(cr, uid, context) # Search for corresponding web session orm_clws = env['clouder.web.session'].sudo() session = orm_clws.search([('reference', '=', data['item_number'])]) # If no session is found, skip and proceed as usual if not session: return result session = session[0] # Finding transaction tx = None tx_find_method_name = '_%s_form_get_tx_from_data' % acquirer_name if hasattr(self, tx_find_method_name): tx = getattr(self, tx_find_method_name)(cr, uid, data, context=context) if tx and tx.state in ['cancel', 'error']: # Cancel session session.write({'state', 'canceled'}) elif tx and tx.state in ['pending', 'done']: # Change session state session.write({'state': 'payment_processed'}) # Return the result from super at the end return result
gpl-3.0
weeksghost/dj-nodb
djnodb/runner.py
1
1164
from django.test import TransactionTestCase from django.test.runner import DiscoverRunner from mock import patch class NoDbMixin(object): def build_suite(self, *args, **kwargs): suite = super(NoDbMixin, self).build_suite(*args, **kwargs) self._needs_db = any([isinstance(test, TransactionTestCase) for test in suite]) return suite def setup_databases(self, **kwargs): if self._needs_db: return super(NoDbMixin, self).setup_databases(*args, **kwargs) if self.verbosity >= 1: print 'No DB tests detected. Skipping DB creation...' self._db_patch = patch('django.db.backends.util.CursorWrapper') self._db_mock.side_effect = RuntimeError('No testing the database!') return None def teardown_databases(self, old_config, **kwargs): if self._needs_db: return super(NoDbMixin, self).teardown_databases(*args, **kwargs) self._db_patch.stop() return None class NoDbTestRunner(NoDbMixin, DiscoverRunner): def setup_databases(self, **kwargs): pass def teardown_databases(self, old_config, **kwargs): pass
apache-2.0
hufsm/tu_gen2_libsigrokdecode
decoders/ds28ea00/pd.py
1
3077
## ## This file is part of the libsigrokdecode project. ## ## Copyright (C) 2012 Iztok Jeras <iztok.jeras@gmail.com> ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, see <http://www.gnu.org/licenses/>. ## import sigrokdecode as srd # Dictionary of FUNCTION commands and their names. command = { # Scratchpad 0x4e: 'Write scratchpad', 0xbe: 'Read scratchpad', 0x48: 'Copy scratchpad', # Thermometer 0x44: 'Convert temperature', 0xb4: 'Read power mode', 0xb8: 'Recall EEPROM', 0xf5: 'PIO access read', 0xA5: 'PIO access write', 0x99: 'Chain', } class Decoder(srd.Decoder): api_version = 3 id = 'ds28ea00' name = 'DS28EA00' longname = 'Maxim DS28EA00 1-Wire digital thermometer' desc = '1-Wire digital thermometer with Sequence Detect and PIO.' license = 'gplv2+' inputs = ['onewire_network'] outputs = ['ds28ea00'] annotations = ( ('text', 'Human-readable text'), ) def __init__(self): self.reset() def reset(self): self.trn_beg = 0 self.trn_end = 0 self.state = 'ROM' self.rom = 0x0000000000000000 def start(self): self.out_ann = self.register(srd.OUTPUT_ANN) def putx(self, data): self.put(self.ss, self.es, self.out_ann, data) def decode(self, ss, es, data): code, val = data self.ss, self.es = ss, es # State machine. if code == 'RESET/PRESENCE': self.putx([0, ['Reset/presence: %s' % ('true' if val else 'false')]]) self.state = 'ROM' elif code == 'ROM': self.rom = val self.putx([0, ['ROM: 0x%016x' % (val)]]) self.state = 'COMMAND' elif code == 'DATA': if self.state == 'COMMAND': if val not in command: self.putx([0, ['Unrecognized command: 0x%02x' % val]]) return self.putx([0, ['Function command: 0x%02x \'%s\'' % (val, command[val])]]) self.state = command[val].upper() elif self.state == 'READ SCRATCHPAD': self.putx([0, ['Scratchpad data: 0x%02x' % val]]) elif self.state == 'CONVERT TEMPERATURE': self.putx([0, ['Temperature conversion status: 0x%02x' % val]]) elif self.state in [s.upper() for s in command.values()]: self.putx([0, ['TODO \'%s\': 0x%02x' % (self.state, val)]])
gpl-3.0
ashhher3/invenio
modules/webcomment/lib/webcomment_regression_tests.py
16
40759
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2006, 2007, 2008, 2010, 2011, 2012, 2013 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """WebComment Regression Test Suite.""" __revision__ = "$Id$" from invenio.testutils import InvenioTestCase import shutil from mechanize import Browser, LinkNotFoundError, HTTPError from invenio.config import \ CFG_SITE_URL, \ CFG_WEBDIR, \ CFG_TMPDIR, \ CFG_SITE_RECORD from invenio.testutils import make_test_suite, run_test_suite, \ test_web_page_content, merge_error_messages from invenio.dbquery import run_sql from invenio.webcomment import query_add_comment_or_remark from invenio.webcommentadminlib import query_delete_comment_auth from invenio.webcomment_washer import EmailWasher def prepare_attachments(): """ We copy necessary files to temporary directory. Every time we will attach files to a comment, these files get moved, so this function must be called again. """ shutil.copy(CFG_WEBDIR + '/img/journal_water_dog.gif', CFG_TMPDIR) shutil.copy(CFG_WEBDIR + '/img/invenio.css', CFG_TMPDIR) class WebCommentWebPagesAvailabilityTest(InvenioTestCase): """Check WebComment web pages whether they are up or not.""" def test_your_baskets_pages_availability(self): """webcomment - availability of comments pages""" baseurl = CFG_SITE_URL + '/%s/10/comments/' % CFG_SITE_RECORD _exports = ['', 'display', 'add', 'vote', 'report'] error_messages = [] for url in [baseurl + page for page in _exports]: error_messages.extend(test_web_page_content(url)) if error_messages: self.fail(merge_error_messages(error_messages)) return def test_webcomment_admin_interface_availability(self): """webcomment - availability of WebComment Admin interface pages""" baseurl = CFG_SITE_URL + '/admin/webcomment/webcommentadmin.py/' _exports = ['', 'comments', 'delete', 'users'] error_messages = [] for url in [baseurl + page for page in _exports]: # first try as guest: error_messages.extend(test_web_page_content(url, username='guest', expected_text= 'Authorization failure')) # then try as admin: error_messages.extend(test_web_page_content(url, username='admin')) if error_messages: self.fail(merge_error_messages(error_messages)) return def test_webcomment_admin_guide_availability(self): """webcomment - availability of WebComment Admin Guide""" self.assertEqual([], test_web_page_content(CFG_SITE_URL + '/help/admin/webcomment-admin-guide', expected_text="WebComment Admin Guide")) return def test_webcomment_mini_review_availability(self): """webcomment - availability of mini-review panel on detailed record page""" url = CFG_SITE_URL + '/%s/12' % CFG_SITE_RECORD error_messages = test_web_page_content(url, expected_text="(Not yet reviewed)") class WebCommentRestrictionsTest(InvenioTestCase): """Check WebComment restrictions""" def setUp(self): """Insert some comments in some records""" # Comments have access restrictions when: # - the comment is in a restricted collection ('viewrestrcoll' action) # - the comment is in a restricted discussion page ('viewcomment' action) # - the comment itself is restricted ('viewrestrcomment' # action), either because of the markup of the record, or # because it is a reply to a restricted comment. self.public_record = 5 self.public_record_restr_comment = 6 self.restr_record = 42 self.restr_record_restr_comment = 41 self.restricted_discussion = 76 self.romeo_uid = 5 self.jekyll_uid = 2 self.attached_files = {'file1': CFG_TMPDIR + '/journal_water_dog.gif', 'file2': CFG_TMPDIR + '/invenio.css'} # Load content of texual file2 prepare_attachments() fp = file(self.attached_files['file2']) self.attached_file2_content = fp.read() fp.close() # Insert a public comment in a public record (public collection) self.msg1 = "A test comment 1" self.public_comid = query_add_comment_or_remark(reviews=0, recID=self.public_record, uid=self.romeo_uid, msg=self.msg1, editor_type='textarea', attached_files=self.attached_files) # Insert a public comment in a restricted record (restricted collection) self.msg2 = "A test comment 2" prepare_attachments() self.restr_comid_1 = \ query_add_comment_or_remark(reviews=0, recID=self.restr_record, uid=self.jekyll_uid, msg=self.msg2, editor_type='textarea', attached_files=self.attached_files) # Insert a restricted comment in a public collection self.msg3 = "A test comment 3" prepare_attachments() self.restr_comid_2 = \ query_add_comment_or_remark(reviews=0, recID=self.public_record_restr_comment, uid=self.jekyll_uid, msg=self.msg3, editor_type='textarea', attached_files=self.attached_files) # Insert a restricted comment, in a restricted collection self.msg5 = "A test comment 5" prepare_attachments() self.restr_comid_4 = \ query_add_comment_or_remark(reviews=0, recID=self.restr_record_restr_comment, uid=self.romeo_uid, msg=self.msg5, editor_type='textarea', attached_files=self.attached_files) # Insert a public comment in a restricted discussion self.msg6 = "A test comment 6" prepare_attachments() self.restr_comid_5 = \ query_add_comment_or_remark(reviews=0, recID=self.restricted_discussion, uid=self.romeo_uid, msg=self.msg6, editor_type='textarea', attached_files=self.attached_files) self.restr_comid_3 = None # Insert a public, deleted comment in a public record (public collection) self.msg7 = "A test comment 7" prepare_attachments() self.deleted_comid = query_add_comment_or_remark(reviews=0, recID=self.public_record, uid=self.romeo_uid, msg=self.msg7, editor_type='textarea', attached_files=self.attached_files) query_delete_comment_auth(self.deleted_comid) def tearDown(self): """Remove inserted comments""" run_sql("""DELETE FROM cmtRECORDCOMMENT WHERE id=%s""", (self.public_comid,)) run_sql("""DELETE FROM cmtRECORDCOMMENT WHERE id=%s""", (self.restr_comid_1,)) run_sql("""DELETE FROM cmtRECORDCOMMENT WHERE id=%s""", (self.restr_comid_2,)) if self.restr_comid_3: run_sql("""DELETE FROM cmtRECORDCOMMENT WHERE id=%s""", (self.restr_comid_3,)) run_sql("""DELETE FROM cmtRECORDCOMMENT WHERE id=%s""", (self.restr_comid_4,)) run_sql("""DELETE FROM cmtRECORDCOMMENT WHERE id=%s""", (self.restr_comid_5,)) run_sql("""DELETE FROM cmtRECORDCOMMENT WHERE id=%s""", (self.deleted_comid,)) pass def test_access_public_record_public_discussion_public_comment(self): """webcomment - accessing "public" comment in a "public" discussion of a restricted record""" # Guest user should not be able to access it self.assertNotEqual([], test_web_page_content("%s/%s/%i/comments/" % (CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record), expected_text=self.msg2)) # Accessing a non existing file for a restricted comment should also ask to login self.assertEqual([], test_web_page_content("%s/%s/%i/comments/attachments/get/%i/not_existing_file" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record, self.restr_comid_1), expected_text='You can use your nickname or your email address to login')) # Check accessing file of a restricted comment self.assertEqual([], test_web_page_content("%s/%s/%i/comments/attachments/get/%i/file2" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record, self.restr_comid_1), expected_text='You can use your nickname or your email address to login')) def test_access_restricted_record_public_discussion_public_comment(self): """webcomment - accessing "public" comment in a "public" discussion of a restricted record""" # Guest user should not be able to access it self.assertNotEqual([], test_web_page_content("%s/%s/%i/comments/" % (CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record), expected_text=self.msg2)) # Accessing a non existing file for a restricted comment should also ask to login self.assertEqual([], test_web_page_content("%s/%s/%i/comments/attachments/get/%i/not_existing_file" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record, self.restr_comid_1), expected_text='You can use your nickname or your email address to login')) # Check accessing file of a restricted comment self.assertEqual([], test_web_page_content("%s/%s/%i/comments/attachments/get/%i/file2" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record, self.restr_comid_1), expected_text='You can use your nickname or your email address to login')) # Juliet should not be able to access the comment br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'juliet' br['p_pw'] = 'j123uliet' br.submit() br.open("%s/%s/%i/comments/" % (CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record)) response = br.response().read() if not self.msg2 in response: pass else: self.fail("Oops, this user should not have access to this comment") # Juliet should not be able to access the attached files br.open("%s/%s/%i/comments/attachments/get/%i/file2" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record, self.restr_comid_1)) response = br.response().read() if "You are not authorized" in response: pass else: self.fail("Oops, this user should not have access to this comment attachment") # Jekyll should be able to access the comment br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'jekyll' br['p_pw'] = 'j123ekyll' br.submit() br.open("%s/%s/%i/comments/" % (CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record)) response = br.response().read() if not self.msg2 in response: self.fail("Oops, this user should have access to this comment") # Jekyll should be able to access the attached files br.open("%s/%s/%i/comments/attachments/get/%i/file2" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record, self.restr_comid_1)) response = br.response().read() self.assertEqual(self.attached_file2_content, response) def test_access_public_record_restricted_discussion_public_comment(self): """webcomment - accessing "public" comment in a restricted discussion of a public record""" # Guest user should not be able to access it self.assertNotEqual([], test_web_page_content("%s/%s/%i/comments/" % (CFG_SITE_URL, CFG_SITE_RECORD, self.restricted_discussion), expected_text=self.msg2)) # Accessing a non existing file for a restricted comment should also ask to login self.assertEqual([], test_web_page_content("%s/%s/%i/comments/attachments/get/%i/not_existing_file" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.restricted_discussion, self.restr_comid_5), expected_text='You can use your nickname or your email address to login')) # Check accessing file of a restricted comment self.assertEqual([], test_web_page_content("%s/%s/%i/comments/attachments/get/%i/file2" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.restricted_discussion, self.restr_comid_5), expected_text='You can use your nickname or your email address to login')) # Juliet should not be able to access the comment br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'juliet' br['p_pw'] = 'j123uliet' br.submit() br.open("%s/%s/%i/comments/" % (CFG_SITE_URL, CFG_SITE_RECORD, self.restricted_discussion)) response = br.response().read() if not self.msg6 in response: pass else: self.fail("Oops, this user should not have access to this comment") # Juliet should not be able to access the attached files br.open("%s/%s/%i/comments/attachments/get/%i/file2" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.restricted_discussion, self.restr_comid_5)) response = br.response().read() if "You are not authorized" in response: pass else: self.fail("Oops, this user should not have access to this comment attachment") # Romeo should be able to access the comment br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'romeo' br['p_pw'] = 'r123omeo' br.submit() br.open("%s/%s/%i/comments/" % (CFG_SITE_URL, CFG_SITE_RECORD, self.restricted_discussion)) response = br.response().read() if not self.msg6 in response: self.fail("Oops, this user should have access to this comment") # Romeo should be able to access the attached files br.open("%s/%s/%i/comments/attachments/get/%i/file2" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.restricted_discussion, self.restr_comid_5)) response = br.response().read() self.assertEqual(self.attached_file2_content, response) def test_comment_replies_inherit_restrictions(self): """webcomment - a reply to a comment inherits restrictions""" # In this test we reply to a restricted comment, and check if # the restriction is inherited. However, in order to make sure # that the comment restriction is inherited, and not the # record restriction, we temporary change the restriction of # the parent. self.public_record_restr_comment original_restriction = run_sql("SELECT restriction FROM cmtRECORDCOMMENT WHERE id=%s", (self.restr_comid_2,))[0][0] restriction_to_inherit = 'juliet_only' run_sql("UPDATE cmtRECORDCOMMENT SET restriction=%s WHERE id=%s", (restriction_to_inherit, self.restr_comid_2)) # Reply to a restricted comment self.msg4 = "A test comment 4" prepare_attachments() self.restr_comid_3 = \ query_add_comment_or_remark(reviews=0, recID=self.public_record_restr_comment, uid=self.jekyll_uid, msg=self.msg4, editor_type='textarea', attached_files=self.attached_files, reply_to=self.restr_comid_2) inherited_restriction = run_sql("SELECT restriction FROM cmtRECORDCOMMENT WHERE id=%s", (self.restr_comid_3,))[0][0] self.assertEqual(restriction_to_inherit, inherited_restriction) # Restore original restriction run_sql("UPDATE cmtRECORDCOMMENT SET restriction=%s WHERE id=%s", (original_restriction, self.restr_comid_2)) def test_comment_reply_with_wrong_record(self): """webcomment - replying to comment using mismatching recid""" # Juliet should not be able to reply to the comment, even through a public record br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'juliet' br['p_pw'] = 'j123uliet' br.submit() br.open("%s/%s/%i/comments/add?action=REPLY&comid=%s&ln=en" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.restr_comid_1)) response = br.response().read() if not self.msg2 in response and \ "Authorization failure" in response: pass else: self.fail("Oops, users should not be able to reply to comment using mismatching recid") # Jekyll should also not be able to reply the comment using the wrong recid br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'jekyll' br['p_pw'] = 'j123ekyll' br.submit() br.open("%s/%s/%i/comments/add?action=REPLY&comid=%s&ln=en" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.restr_comid_1)) response = br.response().read() if not self.msg2 in response and \ "Authorization failure" in response: pass else: self.fail("Oops, users should not be able to reply to comment using mismatching recid") def test_comment_access_attachment_with_wrong_record(self): """webcomment - accessing attachments using mismatching recid""" # Juliet should not be able to access these files, especially with wrong recid br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'juliet' br['p_pw'] = 'j123uliet' br.submit() try: br.open("%s/%s/%i/comments/attachments/get/%i/file2" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.restr_comid_1)) response = br.response().read() except HTTPError: pass else: self.fail("Oops, users should not be able to access comment attachment using mismatching recid") # Jekyll should also not be able to access these files when using wrong recid br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'jekyll' br['p_pw'] = 'j123ekyll' br.submit() try: br.open("%s/%s/%i/comments/attachments/get/%i/file2" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.restr_comid_1)) response = br.response().read() response = br.response().read() except HTTPError: pass else: self.fail("Oops, users should not be able to access comment attachment using mismatching recid") def test_comment_reply_to_deleted_comment(self): """webcomment - replying to a deleted comment""" # Juliet should not be able to reply to the deleted comment br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'juliet' br['p_pw'] = 'j123uliet' br.submit() br.open("%s/%s/%i/comments/add?action=REPLY&comid=%s&ln=en" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.deleted_comid)) response = br.response().read() if not self.msg7 in response: # There should be no authorization failure, in case the # comment was deleted in between. We'll simply go on but # the orginal comment will not be included pass else: self.fail("Oops, users should not be able to reply to a deleted comment") # Jekyll should also not be able to reply the deleted comment br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'jekyll' br['p_pw'] = 'j123ekyll' br.submit() br.open("%s/%s/%i/comments/add?action=REPLY&comid=%s&ln=en" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.deleted_comid)) response = br.response().read() if not self.msg7 in response: # There should be no authorization failure, in case the # comment was deleted in between. We'll simply go on but # the orginal comment will not be included pass else: self.fail("Oops, users should not be able to reply to a deleted comment") def test_comment_access_files_deleted_comment(self): """webcomment - access files of a deleted comment""" # Juliet should not be able to access the files br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'juliet' br['p_pw'] = 'j123uliet' br.submit() br.open("%s/%s/%i/comments/attachments/get/%i/file2" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.deleted_comid)) response = br.response().read() if "You cannot access files of a deleted comment" in response: pass else: self.fail("Oops, users should not have access to this deleted comment attachment") # Jekyll should also not be able to access the files br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'jekyll' br['p_pw'] = 'j123ekyll' br.submit() br.open("%s/%s/%i/comments/attachments/get/%i/file2" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.deleted_comid)) response = br.response().read() if "Authorization failure" in response: pass else: self.fail("Oops, users should not have access to this deleted comment attachment") def test_comment_report_deleted_comment(self): """webcomment - report a deleted comment""" # Juliet should not be able to report a the deleted comment br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'juliet' br['p_pw'] = 'j123uliet' br.submit() br.open("%s/%s/%i/comments/report?comid=%s&ln=en" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.deleted_comid)) response = br.response().read() if not "Authorization failure" in response: self.fail("Oops, users should not be able to report a deleted comment") def test_comment_vote_deleted_comment(self): """webcomment - report a deleted comment""" # Juliet should not be able to vote for a the deleted comment/review br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'juliet' br['p_pw'] = 'j123uliet' br.submit() br.open("%s/%s/%i/comments/vote?comid=%s&com_value=1&ln=en" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.deleted_comid)) response = br.response().read() if not "Authorization failure" in response: self.fail("Oops, users should not be able to vote for a deleted comment") def test_comment_report_with_wrong_record(self): """webcomment - report a comment using mismatching recid""" # Juliet should not be able to report a comment she cannot access, even through public recid br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'juliet' br['p_pw'] = 'j123uliet' br.submit() br.open("%s/%s/%i/comments/report?comid=%s&ln=en" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.restr_comid_1)) response = br.response().read() if not "Authorization failure" in response: self.fail("Oops, users should not be able to report using mismatching recid") # Jekyll should also not be able to report the comment using the wrong recid br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'jekyll' br['p_pw'] = 'j123ekyll' br.submit() br.open("%s/%s/%i/comments/report?comid=%s&ln=en" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.restr_comid_1)) response = br.response().read() if not "Authorization failure" in response: self.fail("Oops, users should not be able to report using mismatching recid") def test_comment_vote_with_wrong_record(self): """webcomment - vote for a comment using mismatching recid""" # Juliet should not be able to vote for a comment she cannot access, especially through public recid br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'juliet' br['p_pw'] = 'j123uliet' br.submit() br.open("%s/%s/%i/comments/vote?comid=%s&com_value=1&ln=en" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.restr_comid_1)) response = br.response().read() if not "Authorization failure" in response: self.fail("Oops, this user should not be able to report a deleted comment") # Jekyll should also not be able to vote for the comment using the wrong recid br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'jekyll' br['p_pw'] = 'j123ekyll' br.submit() br.open("%s/%s/%i/comments/vote?comid=%s&com_value=1&ln=en" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.restr_comid_1)) response = br.response().read() if not "Authorization failure" in response: self.fail("Oops, users should not be able to report using mismatching recid") def test_report_restricted_record_public_discussion_public_comment(self): """webcomment - report a comment restricted by 'viewrestrcoll'""" # Juliet should not be able to report the comment br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'juliet' br['p_pw'] = 'j123uliet' br.submit() br.open("%s/%s/%i/comments/report?comid=%s&ln=en" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record, self.restr_comid_1)) response = br.response().read() if not "Authorization failure" in response: self.fail("Oops, this user should not be able to report this comment") def test_report_public_record_restricted_discussion_public_comment(self): """webcomment - report a comment restricted by 'viewcomment'""" # Juliet should not be able to report the comment br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'juliet' br['p_pw'] = 'j123uliet' br.submit() br.open("%s/%s/%i/comments/report?comid=%s&ln=en" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.restricted_discussion, self.restr_comid_5)) response = br.response().read() if not "Authorization failure" in response: self.fail("Oops, this user should not be able to report this comment") def test_report_public_record_public_discussion_restricted_comment(self): """webcomment - report a comment restricted by 'viewrestrcomment'""" # Juliet should not be able to report the comment br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'juliet' br['p_pw'] = 'j123uliet' br.submit() br.open("%s/%s/%i/comments/report?comid=%s&ln=en" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.public_record_restr_comment, self.restr_comid_2)) response = br.response().read() if not "Authorization failure" in response: self.fail("Oops, this user should not be able to report this comment") def test_vote_restricted_record_public_discussion_public_comment(self): """webcomment - vote for a comment restricted by 'viewrestrcoll'""" # Juliet should not be able to vote for the comment br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'juliet' br['p_pw'] = 'j123uliet' br.submit() br.open("%s/%s/%i/comments/vote?comid=%s&com_value=1&ln=en" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record, self.restr_comid_1)) response = br.response().read() if not "Authorization failure" in response: self.fail("Oops, this user should not be able to report this comment") def test_vote_public_record_restricted_discussion_public_comment(self): """webcomment - vote for a comment restricted by 'viewcomment'""" # Juliet should not be able to vote for the comment br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'juliet' br['p_pw'] = 'j123uliet' br.submit() br.open("%s/%s/%i/comments/vote?comid=%s&com_value=1&ln=en" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.restricted_discussion, self.restr_comid_5)) response = br.response().read() if not "Authorization failure" in response: self.fail("Oops, this user should not be able to report this comment") def test_vote_public_record_public_discussion_restricted_comment(self): """webcomment - vote for a comment restricted by 'viewrestrcomment'""" # Juliet should not be able to vote for the comment br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'juliet' br['p_pw'] = 'j123uliet' br.submit() br.open("%s/%s/%i/comments/vote?comid=%s&com_value=1&ln=en" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.public_record_restr_comment, self.restr_comid_2)) response = br.response().read() if not "Authorization failure" in response: self.fail("Oops, this user should not be able to report this comment") def test_comment_subscribe_restricted_record_public_discussion(self): """webcomment - subscribe to a discussion restricted with 'viewrestrcoll'""" # Juliet should not be able to subscribe to the discussion br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'juliet' br['p_pw'] = 'j123uliet' br.submit() br.open("%s/%s/%i/comments/subscribe?ln=en" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record)) response = br.response().read() if not "Authorization failure" in response: self.fail("Oops, this user should not be able to subscribe to this discussion") # Jekyll should be able to subscribe br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'jekyll' br['p_pw'] = 'j123ekyll' br.submit() br.open("%s/%s/%i/comments/subscribe?ln=en" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record)) response = br.response().read() if not "You have been subscribed" in response or \ "Authorization failure" in response: self.fail("Oops, this user should be able to subscribe to this discussion") def test_comment_subscribe_public_record_restricted_discussion(self): """webcomment - subscribe to a discussion restricted with 'viewcomment'""" # Juliet should not be able to subscribe to the discussion br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'juliet' br['p_pw'] = 'j123uliet' br.submit() br.open("%s/%s/%i/comments/subscribe?ln=en" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.restricted_discussion)) response = br.response().read() if not "Authorization failure" in response: self.fail("Oops, this user should not be able to subscribe to this discussion") # Romeo should be able to subscribe br = Browser() br.open(CFG_SITE_URL + '/youraccount/login') br.select_form(nr=0) br['p_un'] = 'romeo' br['p_pw'] = 'r123omeo' br.submit() br.open("%s/%s/%i/comments/subscribe?ln=en" % \ (CFG_SITE_URL, CFG_SITE_RECORD, self.restricted_discussion)) response = br.response().read() if not "You have been subscribed" in response or \ "Authorization failure" in response: print response self.fail("Oops, this user should be able to subscribe to this discussion") class WebCommentTransformationHTMLMarkupTest(InvenioTestCase): """ Test functions related to transforming HTML markup.""" def test_unordered_lists_markup_transformation(self): """webcomment - unordered lists markup transformation """ washer = EmailWasher() body_input = """<ul> <li>foo</li> <li>bar</li> </ul>""" body_expected = """ * foo * bar """ self.assertEqual(washer.wash(body_input), body_expected) # Without spaces and EOL body_input = '<ul><li>foo</li><li>bar</li></ul>' self.assertEqual(washer.wash(body_input), body_expected) def test_ordered_lists_markup_transformation(self): """ webcomment - ordered lists markup transformation """ washer = EmailWasher() body_input = """<ol> <li>foo</li> <li>bar</li> </ol>""" body_expected = """ 1. foo 2. bar """ self.assertEqual(washer.wash(body_input), body_expected) # Without spaces and EOL body_input = '<ol><li>foo</li><li>bar</li></ol>' self.assertEqual(washer.wash(body_input), body_expected) def test_nested_lists_markup_transformation(self): """ webcomment - nested lists markup transformation """ washer = EmailWasher() body_input = """<ol> <li>foo <ol> <li>nested foo</li> </ol> </li> <li>bar</li> </ol>""" body_expected = """ 1. foo 1. nested foo 2. bar """ self.assertEqual(washer.wash(body_input), body_expected) # Without spaces and EOL body_input = '<ol><li>foo<ol><li>nested foo</li></ol></li><li>bar</li></ol>' self.assertEqual(washer.wash(body_input), body_expected) def test_links_markup_transformation(self): """ webcomment - links markup transformation """ washer = EmailWasher() body_input = 'text http://foo.com some more text' body_expected = 'text http://foo.com some more text' self.assertEqual(washer.wash(body_input), body_expected) washer = EmailWasher() body_input = '<a href="https://cds.cern.ch/">CDS</a>' body_expected = '<https://cds.cern.ch/>(CDS)' self.assertEqual(washer.wash(body_input), body_expected) washer = EmailWasher() body_input = '<a href="https://cds.cern.ch/">https://cds.cern.ch/</a>' body_expected = '<https://cds.cern.ch/>' self.assertEqual(washer.wash(body_input), body_expected) def test_global_markup_transformation(self): """ webcomment - global transformation """ washer = EmailWasher() body_input = """<a href="http://foo.com">http://foo.com</a> <ol> <li>Main Ordered List item</li> <li>Below is an example of HTML nested unordered list <ul> <li>nested list item 1</li> <li>nested list item 2</li> <li>Sub nested ordered list <ol> <li>sub nested list item A</li> <li>sub nested list item B</li> </ol> </li> </ul> </li> <li>The last line in the main ordered list</li> </ol> <a href="http://foo.com">bar</a>""" body_expected = """<http://foo.com> 1. Main Ordered List item 2. Below is an example of HTML nested unordered list * nested list item 1 * nested list item 2 * Sub nested ordered list 1. sub nested list item A 2. sub nested list item B 3. The last line in the main ordered list <http://foo.com>(bar)""" self.assertEqual(washer.wash(body_input), body_expected) # Without spaces and EOL body_input = '<a href="http://foo.com">http://foo.com</a><ol><li>Main Ordered List item</li><li>Below is an example of HTML nested unordered list<ul><li>nested list item 1</li><li>nested list item 2</li><li>Sub nested ordered list<ol><li>sub nested list item A</li><li>sub nested list item B</li></ol></li></ul></li><li>The last line in the main ordered list</li></ol> <a href="http://foo.com">bar</a>' self.assertEqual(washer.wash(body_input), body_expected) TEST_SUITE = make_test_suite(WebCommentWebPagesAvailabilityTest, WebCommentRestrictionsTest, WebCommentTransformationHTMLMarkupTest) if __name__ == "__main__": run_test_suite(TEST_SUITE, warn_user=True)
gpl-2.0
wbc2010/django1.2.5
django/contrib/contenttypes/models.py
307
4052
from django.db import models from django.utils.translation import ugettext_lazy as _ from django.utils.encoding import smart_unicode class ContentTypeManager(models.Manager): # Cache to avoid re-looking up ContentType objects all over the place. # This cache is shared by all the get_for_* methods. _cache = {} def get_by_natural_key(self, app_label, model): try: ct = self.__class__._cache[self.db][(app_label, model)] except KeyError: ct = self.get(app_label=app_label, model=model) return ct def get_for_model(self, model): """ Returns the ContentType object for a given model, creating the ContentType if necessary. Lookups are cached so that subsequent lookups for the same model don't hit the database. """ opts = model._meta while opts.proxy: model = opts.proxy_for_model opts = model._meta key = (opts.app_label, opts.object_name.lower()) try: ct = self.__class__._cache[self.db][key] except KeyError: # Load or create the ContentType entry. The smart_unicode() is # needed around opts.verbose_name_raw because name_raw might be a # django.utils.functional.__proxy__ object. ct, created = self.get_or_create( app_label = opts.app_label, model = opts.object_name.lower(), defaults = {'name': smart_unicode(opts.verbose_name_raw)}, ) self._add_to_cache(self.db, ct) return ct def get_for_id(self, id): """ Lookup a ContentType by ID. Uses the same shared cache as get_for_model (though ContentTypes are obviously not created on-the-fly by get_by_id). """ try: ct = self.__class__._cache[self.db][id] except KeyError: # This could raise a DoesNotExist; that's correct behavior and will # make sure that only correct ctypes get stored in the cache dict. ct = self.get(pk=id) self._add_to_cache(self.db, ct) return ct def clear_cache(self): """ Clear out the content-type cache. This needs to happen during database flushes to prevent caching of "stale" content type IDs (see django.contrib.contenttypes.management.update_contenttypes for where this gets called). """ self.__class__._cache.clear() def _add_to_cache(self, using, ct): """Insert a ContentType into the cache.""" model = ct.model_class() key = (model._meta.app_label, model._meta.object_name.lower()) self.__class__._cache.setdefault(using, {})[key] = ct self.__class__._cache.setdefault(using, {})[ct.id] = ct class ContentType(models.Model): name = models.CharField(max_length=100) app_label = models.CharField(max_length=100) model = models.CharField(_('python model class name'), max_length=100) objects = ContentTypeManager() class Meta: verbose_name = _('content type') verbose_name_plural = _('content types') db_table = 'django_content_type' ordering = ('name',) unique_together = (('app_label', 'model'),) def __unicode__(self): return self.name def model_class(self): "Returns the Python model class for this type of content." from django.db import models return models.get_model(self.app_label, self.model) def get_object_for_this_type(self, **kwargs): """ Returns an object of this type for the keyword arguments given. Basically, this is a proxy around this object_type's get_object() model method. The ObjectNotExist exception, if thrown, will not be caught, so code that calls this method should catch it. """ return self.model_class()._default_manager.using(self._state.db).get(**kwargs) def natural_key(self): return (self.app_label, self.model)
bsd-3-clause
inscriptionweb/soundconverter
soundconverter/utils.py
2
1263
#!/usr/bin/python # -*- coding: utf-8 -*- # # SoundConverter - GNOME application for converting between audio formats. # Copyright 2004 Lars Wirzenius # Copyright 2005-2014 Gautier Portet # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 # USA # logging & debugging from settings import settings def log(*args): """ Display a message. Can be disabled with 'quiet' option """ if not settings['quiet']: print( ' '.join([str(msg) for msg in args]) ) def debug(*args): """ Display a debug message. Only when activated by 'debug' option """ if settings['debug']: print( ' '.join([str(msg) for msg in args]) )
gpl-3.0
angelblue05/plugin.video.emby
libraries/requests/packages/chardet/latin1prober.py
1778
5232
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .charsetprober import CharSetProber from .constants import eNotMe from .compat import wrap_ord FREQ_CAT_NUM = 4 UDF = 0 # undefined OTH = 1 # other ASC = 2 # ascii capital letter ASS = 3 # ascii small letter ACV = 4 # accent capital vowel ACO = 5 # accent capital other ASV = 6 # accent small vowel ASO = 7 # accent small other CLASS_NUM = 8 # total classes Latin1_CharToClass = ( OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07 OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17 OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27 OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37 OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47 ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57 ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67 ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77 ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87 OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97 OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7 OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7 OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7 ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7 ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7 ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7 ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF ) # 0 : illegal # 1 : very unlikely # 2 : normal # 3 : very likely Latin1ClassModel = ( # UDF OTH ASC ASS ACV ACO ASV ASO 0, 0, 0, 0, 0, 0, 0, 0, # UDF 0, 3, 3, 3, 3, 3, 3, 3, # OTH 0, 3, 3, 3, 3, 3, 3, 3, # ASC 0, 3, 3, 3, 1, 1, 3, 3, # ASS 0, 3, 3, 3, 1, 2, 1, 2, # ACV 0, 3, 3, 3, 3, 3, 3, 3, # ACO 0, 3, 1, 3, 1, 1, 1, 3, # ASV 0, 3, 1, 3, 1, 1, 3, 3, # ASO ) class Latin1Prober(CharSetProber): def __init__(self): CharSetProber.__init__(self) self.reset() def reset(self): self._mLastCharClass = OTH self._mFreqCounter = [0] * FREQ_CAT_NUM CharSetProber.reset(self) def get_charset_name(self): return "windows-1252" def feed(self, aBuf): aBuf = self.filter_with_english_letters(aBuf) for c in aBuf: charClass = Latin1_CharToClass[wrap_ord(c)] freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM) + charClass] if freq == 0: self._mState = eNotMe break self._mFreqCounter[freq] += 1 self._mLastCharClass = charClass return self.get_state() def get_confidence(self): if self.get_state() == eNotMe: return 0.01 total = sum(self._mFreqCounter) if total < 0.01: confidence = 0.0 else: confidence = ((self._mFreqCounter[3] - self._mFreqCounter[1] * 20.0) / total) if confidence < 0.0: confidence = 0.0 # lower the confidence of latin1 so that other more accurate # detector can take priority. confidence = confidence * 0.73 return confidence
gpl-3.0
alexsmx/djangoAppengineSrcTemplate
django/contrib/messages/tests/base.py
152
17772
import warnings from django import http from django.test import TestCase from django.conf import settings from django.utils.translation import ugettext_lazy from django.utils.unittest import skipIf from django.contrib.messages import constants, utils, get_level, set_level from django.contrib.messages.api import MessageFailure from django.contrib.messages.storage import default_storage, base from django.contrib.messages.storage.base import Message from django.core.urlresolvers import reverse from django.contrib.auth.models import User def skipUnlessAuthIsInstalled(func): return skipIf( 'django.contrib.auth' not in settings.INSTALLED_APPS, "django.contrib.auth isn't installed")(func) def add_level_messages(storage): """ Adds 6 messages from different levels (including a custom one) to a storage instance. """ storage.add(constants.INFO, 'A generic info message') storage.add(29, 'Some custom level') storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag') storage.add(constants.WARNING, 'A warning') storage.add(constants.ERROR, 'An error') storage.add(constants.SUCCESS, 'This was a triumph.') class BaseTest(TestCase): storage_class = default_storage restore_settings = ['MESSAGE_LEVEL', 'MESSAGE_TAGS'] urls = 'django.contrib.messages.tests.urls' levels = { 'debug': constants.DEBUG, 'info': constants.INFO, 'success': constants.SUCCESS, 'warning': constants.WARNING, 'error': constants.ERROR, } def setUp(self): self._remembered_settings = {} for setting in self.restore_settings: if hasattr(settings, setting): self._remembered_settings[setting] = getattr(settings, setting) delattr(settings._wrapped, setting) # Backup these manually because we do not want them deleted. self._middleware_classes = settings.MIDDLEWARE_CLASSES self._template_context_processors = \ settings.TEMPLATE_CONTEXT_PROCESSORS self._installed_apps = settings.INSTALLED_APPS self._message_storage = settings.MESSAGE_STORAGE settings.MESSAGE_STORAGE = '%s.%s' % (self.storage_class.__module__, self.storage_class.__name__) self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS settings.TEMPLATE_DIRS = () self.save_warnings_state() warnings.filterwarnings('ignore', category=DeprecationWarning, module='django.contrib.auth.models') def tearDown(self): for setting in self.restore_settings: self.restore_setting(setting) # Restore these manually (see above). settings.MIDDLEWARE_CLASSES = self._middleware_classes settings.TEMPLATE_CONTEXT_PROCESSORS = \ self._template_context_processors settings.INSTALLED_APPS = self._installed_apps settings.MESSAGE_STORAGE = self._message_storage settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS self.restore_warnings_state() def restore_setting(self, setting): if setting in self._remembered_settings: value = self._remembered_settings.pop(setting) setattr(settings, setting, value) elif hasattr(settings, setting): delattr(settings._wrapped, setting) def get_request(self): return http.HttpRequest() def get_response(self): return http.HttpResponse() def get_storage(self, data=None): """ Returns the storage backend, setting its loaded data to the ``data`` argument. This method avoids the storage ``_get`` method from getting called so that other parts of the storage backend can be tested independent of the message retrieval logic. """ storage = self.storage_class(self.get_request()) storage._loaded_data = data or [] return storage def test_add(self): storage = self.get_storage() self.assertFalse(storage.added_new) storage.add(constants.INFO, 'Test message 1') self.assertTrue(storage.added_new) storage.add(constants.INFO, 'Test message 2', extra_tags='tag') self.assertEqual(len(storage), 2) def test_add_lazy_translation(self): storage = self.get_storage() response = self.get_response() storage.add(constants.INFO, ugettext_lazy('lazy message')) storage.update(response) storing = self.stored_messages_count(storage, response) self.assertEqual(storing, 1) def test_no_update(self): storage = self.get_storage() response = self.get_response() storage.update(response) storing = self.stored_messages_count(storage, response) self.assertEqual(storing, 0) def test_add_update(self): storage = self.get_storage() response = self.get_response() storage.add(constants.INFO, 'Test message 1') storage.add(constants.INFO, 'Test message 1', extra_tags='tag') storage.update(response) storing = self.stored_messages_count(storage, response) self.assertEqual(storing, 2) def test_existing_add_read_update(self): storage = self.get_existing_storage() response = self.get_response() storage.add(constants.INFO, 'Test message 3') list(storage) # Simulates a read storage.update(response) storing = self.stored_messages_count(storage, response) self.assertEqual(storing, 0) def test_existing_read_add_update(self): storage = self.get_existing_storage() response = self.get_response() list(storage) # Simulates a read storage.add(constants.INFO, 'Test message 3') storage.update(response) storing = self.stored_messages_count(storage, response) self.assertEqual(storing, 1) def test_full_request_response_cycle(self): """ With the message middleware enabled, tests that messages are properly stored and then retrieved across the full request/redirect/response cycle. """ settings.MESSAGE_LEVEL = constants.DEBUG data = { 'messages': ['Test message %d' % x for x in xrange(10)], } show_url = reverse('django.contrib.messages.tests.urls.show') for level in ('debug', 'info', 'success', 'warning', 'error'): add_url = reverse('django.contrib.messages.tests.urls.add', args=(level,)) response = self.client.post(add_url, data, follow=True) self.assertRedirects(response, show_url) self.assertTrue('messages' in response.context) messages = [Message(self.levels[level], msg) for msg in data['messages']] self.assertEqual(list(response.context['messages']), messages) for msg in data['messages']: self.assertContains(response, msg) def test_with_template_response(self): settings.MESSAGE_LEVEL = constants.DEBUG data = { 'messages': ['Test message %d' % x for x in xrange(10)], } show_url = reverse('django.contrib.messages.tests.urls.show_template_response') for level in self.levels.keys(): add_url = reverse('django.contrib.messages.tests.urls.add_template_response', args=(level,)) response = self.client.post(add_url, data, follow=True) self.assertRedirects(response, show_url) self.assertTrue('messages' in response.context) for msg in data['messages']: self.assertContains(response, msg) # there shouldn't be any messages on second GET request response = self.client.get(show_url) for msg in data['messages']: self.assertNotContains(response, msg) def test_multiple_posts(self): """ Tests that messages persist properly when multiple POSTs are made before a GET. """ settings.MESSAGE_LEVEL = constants.DEBUG data = { 'messages': ['Test message %d' % x for x in xrange(10)], } show_url = reverse('django.contrib.messages.tests.urls.show') messages = [] for level in ('debug', 'info', 'success', 'warning', 'error'): messages.extend([Message(self.levels[level], msg) for msg in data['messages']]) add_url = reverse('django.contrib.messages.tests.urls.add', args=(level,)) self.client.post(add_url, data) response = self.client.get(show_url) self.assertTrue('messages' in response.context) self.assertEqual(list(response.context['messages']), messages) for msg in data['messages']: self.assertContains(response, msg) @skipUnlessAuthIsInstalled def test_middleware_disabled_auth_user(self): """ Tests that the messages API successfully falls back to using user.message_set to store messages directly when the middleware is disabled. """ settings.MESSAGE_LEVEL = constants.DEBUG user = User.objects.create_user('test', 'test@example.com', 'test') self.client.login(username='test', password='test') settings.INSTALLED_APPS = list(settings.INSTALLED_APPS) settings.INSTALLED_APPS.remove( 'django.contrib.messages', ) settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES) settings.MIDDLEWARE_CLASSES.remove( 'django.contrib.messages.middleware.MessageMiddleware', ) settings.TEMPLATE_CONTEXT_PROCESSORS = \ list(settings.TEMPLATE_CONTEXT_PROCESSORS) settings.TEMPLATE_CONTEXT_PROCESSORS.remove( 'django.contrib.messages.context_processors.messages', ) data = { 'messages': ['Test message %d' % x for x in xrange(10)], } show_url = reverse('django.contrib.messages.tests.urls.show') for level in ('debug', 'info', 'success', 'warning', 'error'): add_url = reverse('django.contrib.messages.tests.urls.add', args=(level,)) response = self.client.post(add_url, data, follow=True) self.assertRedirects(response, show_url) self.assertTrue('messages' in response.context) context_messages = list(response.context['messages']) for msg in data['messages']: self.assertTrue(msg in context_messages) self.assertContains(response, msg) def test_middleware_disabled_anon_user(self): """ Tests that, when the middleware is disabled and a user is not logged in, an exception is raised when one attempts to store a message. """ settings.MESSAGE_LEVEL = constants.DEBUG settings.INSTALLED_APPS = list(settings.INSTALLED_APPS) settings.INSTALLED_APPS.remove( 'django.contrib.messages', ) settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES) settings.MIDDLEWARE_CLASSES.remove( 'django.contrib.messages.middleware.MessageMiddleware', ) settings.TEMPLATE_CONTEXT_PROCESSORS = \ list(settings.TEMPLATE_CONTEXT_PROCESSORS) settings.TEMPLATE_CONTEXT_PROCESSORS.remove( 'django.contrib.messages.context_processors.messages', ) data = { 'messages': ['Test message %d' % x for x in xrange(10)], } show_url = reverse('django.contrib.messages.tests.urls.show') for level in ('debug', 'info', 'success', 'warning', 'error'): add_url = reverse('django.contrib.messages.tests.urls.add', args=(level,)) self.assertRaises(MessageFailure, self.client.post, add_url, data, follow=True) def test_middleware_disabled_anon_user_fail_silently(self): """ Tests that, when the middleware is disabled and a user is not logged in, an exception is not raised if 'fail_silently' = True """ settings.MESSAGE_LEVEL = constants.DEBUG settings.INSTALLED_APPS = list(settings.INSTALLED_APPS) settings.INSTALLED_APPS.remove( 'django.contrib.messages', ) settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES) settings.MIDDLEWARE_CLASSES.remove( 'django.contrib.messages.middleware.MessageMiddleware', ) settings.TEMPLATE_CONTEXT_PROCESSORS = \ list(settings.TEMPLATE_CONTEXT_PROCESSORS) settings.TEMPLATE_CONTEXT_PROCESSORS.remove( 'django.contrib.messages.context_processors.messages', ) data = { 'messages': ['Test message %d' % x for x in xrange(10)], 'fail_silently': True, } show_url = reverse('django.contrib.messages.tests.urls.show') for level in ('debug', 'info', 'success', 'warning', 'error'): add_url = reverse('django.contrib.messages.tests.urls.add', args=(level,)) response = self.client.post(add_url, data, follow=True) self.assertRedirects(response, show_url) self.assertTrue('messages' in response.context) self.assertEqual(list(response.context['messages']), []) def stored_messages_count(self, storage, response): """ Returns the number of messages being stored after a ``storage.update()`` call. """ raise NotImplementedError('This method must be set by a subclass.') def test_get(self): raise NotImplementedError('This method must be set by a subclass.') def get_existing_storage(self): return self.get_storage([Message(constants.INFO, 'Test message 1'), Message(constants.INFO, 'Test message 2', extra_tags='tag')]) def test_existing_read(self): """ Tests that reading the existing storage doesn't cause the data to be lost. """ storage = self.get_existing_storage() self.assertFalse(storage.used) # After iterating the storage engine directly, the used flag is set. data = list(storage) self.assertTrue(storage.used) # The data does not disappear because it has been iterated. self.assertEqual(data, list(storage)) def test_existing_add(self): storage = self.get_existing_storage() self.assertFalse(storage.added_new) storage.add(constants.INFO, 'Test message 3') self.assertTrue(storage.added_new) def test_default_level(self): # get_level works even with no storage on the request. request = self.get_request() self.assertEqual(get_level(request), constants.INFO) # get_level returns the default level if it hasn't been set. storage = self.get_storage() request._messages = storage self.assertEqual(get_level(request), constants.INFO) # Only messages of sufficient level get recorded. add_level_messages(storage) self.assertEqual(len(storage), 5) def test_low_level(self): request = self.get_request() storage = self.storage_class(request) request._messages = storage self.assertTrue(set_level(request, 5)) self.assertEqual(get_level(request), 5) add_level_messages(storage) self.assertEqual(len(storage), 6) def test_high_level(self): request = self.get_request() storage = self.storage_class(request) request._messages = storage self.assertTrue(set_level(request, 30)) self.assertEqual(get_level(request), 30) add_level_messages(storage) self.assertEqual(len(storage), 2) def test_settings_level(self): request = self.get_request() storage = self.storage_class(request) settings.MESSAGE_LEVEL = 29 self.assertEqual(get_level(request), 29) add_level_messages(storage) self.assertEqual(len(storage), 3) def test_tags(self): storage = self.get_storage() storage.level = 0 add_level_messages(storage) tags = [msg.tags for msg in storage] self.assertEqual(tags, ['info', '', 'extra-tag debug', 'warning', 'error', 'success']) def test_custom_tags(self): settings.MESSAGE_TAGS = { constants.INFO: 'info', constants.DEBUG: '', constants.WARNING: '', constants.ERROR: 'bad', 29: 'custom', } # LEVEL_TAGS is a constant defined in the # django.contrib.messages.storage.base module, so after changing # settings.MESSAGE_TAGS, we need to update that constant too. base.LEVEL_TAGS = utils.get_level_tags() try: storage = self.get_storage() storage.level = 0 add_level_messages(storage) tags = [msg.tags for msg in storage] self.assertEqual(tags, ['info', 'custom', 'extra-tag', '', 'bad', 'success']) finally: # Ensure the level tags constant is put back like we found it. self.restore_setting('MESSAGE_TAGS') base.LEVEL_TAGS = utils.get_level_tags()
bsd-3-clause
agconti/njode
env/lib/python2.7/site-packages/tests/etree13/HTMLTreeBuilder.py
25
7771
# # ElementTree # $Id$ # # a simple tree builder, for HTML input # # history: # 2002-04-06 fl created # 2002-04-07 fl ignore IMG and HR end tags # 2002-04-07 fl added support for 1.5.2 and later # 2003-04-13 fl added HTMLTreeBuilder alias # 2004-12-02 fl don't feed non-ASCII charrefs/entities as 8-bit strings # 2004-12-05 fl don't feed non-ASCII CDATA as 8-bit strings # # Copyright (c) 1999-2004 by Fredrik Lundh. All rights reserved. # # fredrik@pythonware.com # http://www.pythonware.com # # -------------------------------------------------------------------- # The ElementTree toolkit is # # Copyright (c) 1999-2007 by Fredrik Lundh # # By obtaining, using, and/or copying this software and/or its # associated documentation, you agree that you have read, understood, # and will comply with the following terms and conditions: # # Permission to use, copy, modify, and distribute this software and # its associated documentation for any purpose and without fee is # hereby granted, provided that the above copyright notice appears in # all copies, and that both that copyright notice and this permission # notice appear in supporting documentation, and that the name of # Secret Labs AB or the author not be used in advertising or publicity # pertaining to distribution of the software without specific, written # prior permission. # # SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD # TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- # ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR # BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE # OF THIS SOFTWARE. # -------------------------------------------------------------------- ## # Tools to build element trees from HTML files. ## import htmlentitydefs import re, string, sys import mimetools, StringIO import ElementTree AUTOCLOSE = "p", "li", "tr", "th", "td", "head", "body" IGNOREEND = "img", "hr", "meta", "link", "br" if sys.version[:3] == "1.5": is_not_ascii = re.compile(r"[\x80-\xff]").search # 1.5.2 else: is_not_ascii = re.compile(eval(r'u"[\u0080-\uffff]"')).search try: from HTMLParser import HTMLParser except ImportError: from sgmllib import SGMLParser # hack to use sgmllib's SGMLParser to emulate 2.2's HTMLParser class HTMLParser(SGMLParser): # the following only works as long as this class doesn't # provide any do, start, or end handlers def unknown_starttag(self, tag, attrs): self.handle_starttag(tag, attrs) def unknown_endtag(self, tag): self.handle_endtag(tag) ## # ElementTree builder for HTML source code. This builder converts an # HTML document or fragment to an ElementTree. # <p> # The parser is relatively picky, and requires balanced tags for most # elements. However, elements belonging to the following group are # automatically closed: P, LI, TR, TH, and TD. In addition, the # parser automatically inserts end tags immediately after the start # tag, and ignores any end tags for the following group: IMG, HR, # META, and LINK. # # @keyparam builder Optional builder object. If omitted, the parser # uses the standard <b>elementtree</b> builder. # @keyparam encoding Optional character encoding, if known. If omitted, # the parser looks for META tags inside the document. If no tags # are found, the parser defaults to ISO-8859-1. Note that if your # document uses a non-ASCII compatible encoding, you must decode # the document before parsing. # # @see elementtree.ElementTree class HTMLTreeBuilder(HTMLParser): # FIXME: shouldn't this class be named Parser, not Builder? def __init__(self, builder=None, encoding=None): self.__stack = [] if builder is None: builder = ElementTree.TreeBuilder() self.__builder = builder self.encoding = encoding or "iso-8859-1" HTMLParser.__init__(self) ## # Flushes parser buffers, and return the root element. # # @return An Element instance. def close(self): HTMLParser.close(self) return self.__builder.close() ## # (Internal) Handles start tags. def handle_starttag(self, tag, attrs): if tag == "meta": # look for encoding directives http_equiv = content = None for k, v in attrs: if k == "http-equiv": http_equiv = string.lower(v) elif k == "content": content = v if http_equiv == "content-type" and content: # use mimetools to parse the http header header = mimetools.Message( StringIO.StringIO("%s: %s\n\n" % (http_equiv, content)) ) encoding = header.getparam("charset") if encoding: self.encoding = encoding if tag in AUTOCLOSE: if self.__stack and self.__stack[-1] == tag: self.handle_endtag(tag) self.__stack.append(tag) attrib = {} if attrs: for k, v in attrs: attrib[string.lower(k)] = v self.__builder.start(tag, attrib) if tag in IGNOREEND: self.__stack.pop() self.__builder.end(tag) ## # (Internal) Handles end tags. def handle_endtag(self, tag): if tag in IGNOREEND: return lasttag = self.__stack.pop() if tag != lasttag and lasttag in AUTOCLOSE: self.handle_endtag(lasttag) self.__builder.end(tag) ## # (Internal) Handles character references. def handle_charref(self, char): if char[:1] == "x": char = int(char[1:], 16) else: char = int(char) if 0 <= char < 128: self.__builder.data(chr(char)) else: self.__builder.data(unichr(char)) ## # (Internal) Handles entity references. def handle_entityref(self, name): entity = htmlentitydefs.entitydefs.get(name) if entity: if len(entity) == 1: entity = ord(entity) else: entity = int(entity[2:-1]) if 0 <= entity < 128: self.__builder.data(chr(entity)) else: self.__builder.data(unichr(entity)) else: self.unknown_entityref(name) ## # (Internal) Handles character data. def handle_data(self, data): if isinstance(data, type('')) and is_not_ascii(data): # convert to unicode, but only if necessary data = unicode(data, self.encoding, "ignore") self.__builder.data(data) ## # (Hook) Handles unknown entity references. The default action # is to ignore unknown entities. def unknown_entityref(self, name): pass # ignore by default; override if necessary ## # An alias for the <b>HTMLTreeBuilder</b> class. TreeBuilder = HTMLTreeBuilder ## # Parse an HTML document or document fragment. # # @param source A filename or file object containing HTML data. # @param encoding Optional character encoding, if known. If omitted, # the parser looks for META tags inside the document. If no tags # are found, the parser defaults to ISO-8859-1. # @return An ElementTree instance def parse(source, encoding=None): return ElementTree.parse(source, HTMLTreeBuilder(encoding=encoding)) if __name__ == "__main__": import sys ElementTree.dump(parse(open(sys.argv[1])))
bsd-3-clause
quentinsf/ansible
v1/ansible/module_utils/rax.py
280
11974
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by # Ansible still belong to the author of the module, and may assign their own # license to the complete work. # # Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013 # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from uuid import UUID FINAL_STATUSES = ('ACTIVE', 'ERROR') VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use', 'error', 'error_deleting') CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN', 'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN'] CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS', 'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP', 'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP'] NON_CALLABLES = (basestring, bool, dict, int, list, type(None)) PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000" SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111" def rax_slugify(value): """Prepend a key with rax_ and normalize the key name""" return 'rax_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_')) def rax_clb_node_to_dict(obj): """Function to convert a CLB Node object to a dict""" if not obj: return {} node = obj.to_dict() node['id'] = obj.id node['weight'] = obj.weight return node def rax_to_dict(obj, obj_type='standard'): """Generic function to convert a pyrax object to a dict obj_type values: standard clb server """ instance = {} for key in dir(obj): value = getattr(obj, key) if obj_type == 'clb' and key == 'nodes': instance[key] = [] for node in value: instance[key].append(rax_clb_node_to_dict(node)) elif (isinstance(value, list) and len(value) > 0 and not isinstance(value[0], NON_CALLABLES)): instance[key] = [] for item in value: instance[key].append(rax_to_dict(item)) elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')): if obj_type == 'server': if key == 'image': if not value: instance['rax_boot_source'] = 'volume' else: instance['rax_boot_source'] = 'local' key = rax_slugify(key) instance[key] = value if obj_type == 'server': for attr in ['id', 'accessIPv4', 'name', 'status']: instance[attr] = instance.get(rax_slugify(attr)) return instance def rax_find_bootable_volume(module, rax_module, server, exit=True): """Find a servers bootable volume""" cs = rax_module.cloudservers cbs = rax_module.cloud_blockstorage server_id = rax_module.utils.get_id(server) volumes = cs.volumes.get_server_volumes(server_id) bootable_volumes = [] for volume in volumes: vol = cbs.get(volume) if module.boolean(vol.bootable): bootable_volumes.append(vol) if not bootable_volumes: if exit: module.fail_json(msg='No bootable volumes could be found for ' 'server %s' % server_id) else: return False elif len(bootable_volumes) > 1: if exit: module.fail_json(msg='Multiple bootable volumes found for server ' '%s' % server_id) else: return False return bootable_volumes[0] def rax_find_image(module, rax_module, image, exit=True): """Find a server image by ID or Name""" cs = rax_module.cloudservers try: UUID(image) except ValueError: try: image = cs.images.find(human_id=image) except(cs.exceptions.NotFound, cs.exceptions.NoUniqueMatch): try: image = cs.images.find(name=image) except (cs.exceptions.NotFound, cs.exceptions.NoUniqueMatch): if exit: module.fail_json(msg='No matching image found (%s)' % image) else: return False return rax_module.utils.get_id(image) def rax_find_volume(module, rax_module, name): """Find a Block storage volume by ID or name""" cbs = rax_module.cloud_blockstorage try: UUID(name) volume = cbs.get(name) except ValueError: try: volume = cbs.find(name=name) except rax_module.exc.NotFound: volume = None except Exception, e: module.fail_json(msg='%s' % e) return volume def rax_find_network(module, rax_module, network): """Find a cloud network by ID or name""" cnw = rax_module.cloud_networks try: UUID(network) except ValueError: if network.lower() == 'public': return cnw.get_server_networks(PUBLIC_NET_ID) elif network.lower() == 'private': return cnw.get_server_networks(SERVICE_NET_ID) else: try: network_obj = cnw.find_network_by_label(network) except (rax_module.exceptions.NetworkNotFound, rax_module.exceptions.NetworkLabelNotUnique): module.fail_json(msg='No matching network found (%s)' % network) else: return cnw.get_server_networks(network_obj) else: return cnw.get_server_networks(network) def rax_find_server(module, rax_module, server): """Find a Cloud Server by ID or name""" cs = rax_module.cloudservers try: UUID(server) server = cs.servers.get(server) except ValueError: servers = cs.servers.list(search_opts=dict(name='^%s$' % server)) if not servers: module.fail_json(msg='No Server was matched by name, ' 'try using the Server ID instead') if len(servers) > 1: module.fail_json(msg='Multiple servers matched by name, ' 'try using the Server ID instead') # We made it this far, grab the first and hopefully only server # in the list server = servers[0] return server def rax_find_loadbalancer(module, rax_module, loadbalancer): """Find a Cloud Load Balancer by ID or name""" clb = rax_module.cloud_loadbalancers try: found = clb.get(loadbalancer) except: found = [] for lb in clb.list(): if loadbalancer == lb.name: found.append(lb) if not found: module.fail_json(msg='No loadbalancer was matched') if len(found) > 1: module.fail_json(msg='Multiple loadbalancers matched') # We made it this far, grab the first and hopefully only item # in the list found = found[0] return found def rax_argument_spec(): """Return standard base dictionary used for the argument_spec argument in AnsibleModule """ return dict( api_key=dict(type='str', aliases=['password'], no_log=True), auth_endpoint=dict(type='str'), credentials=dict(type='str', aliases=['creds_file']), env=dict(type='str'), identity_type=dict(type='str', default='rackspace'), region=dict(type='str'), tenant_id=dict(type='str'), tenant_name=dict(type='str'), username=dict(type='str'), verify_ssl=dict(choices=BOOLEANS, type='bool'), ) def rax_required_together(): """Return the default list used for the required_together argument to AnsibleModule""" return [['api_key', 'username']] def setup_rax_module(module, rax_module, region_required=True): """Set up pyrax in a standard way for all modules""" rax_module.USER_AGENT = 'ansible/%s %s' % (ANSIBLE_VERSION, rax_module.USER_AGENT) api_key = module.params.get('api_key') auth_endpoint = module.params.get('auth_endpoint') credentials = module.params.get('credentials') env = module.params.get('env') identity_type = module.params.get('identity_type') region = module.params.get('region') tenant_id = module.params.get('tenant_id') tenant_name = module.params.get('tenant_name') username = module.params.get('username') verify_ssl = module.params.get('verify_ssl') if env is not None: rax_module.set_environment(env) rax_module.set_setting('identity_type', identity_type) if verify_ssl is not None: rax_module.set_setting('verify_ssl', verify_ssl) if auth_endpoint is not None: rax_module.set_setting('auth_endpoint', auth_endpoint) if tenant_id is not None: rax_module.set_setting('tenant_id', tenant_id) if tenant_name is not None: rax_module.set_setting('tenant_name', tenant_name) try: username = username or os.environ.get('RAX_USERNAME') if not username: username = rax_module.get_setting('keyring_username') if username: api_key = 'USE_KEYRING' if not api_key: api_key = os.environ.get('RAX_API_KEY') credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or os.environ.get('RAX_CREDS_FILE')) region = (region or os.environ.get('RAX_REGION') or rax_module.get_setting('region')) except KeyError, e: module.fail_json(msg='Unable to load %s' % e.message) try: if api_key and username: if api_key == 'USE_KEYRING': rax_module.keyring_auth(username, region=region) else: rax_module.set_credentials(username, api_key=api_key, region=region) elif credentials: credentials = os.path.expanduser(credentials) rax_module.set_credential_file(credentials, region=region) else: raise Exception('No credentials supplied!') except Exception, e: if e.message: msg = str(e.message) else: msg = repr(e) module.fail_json(msg=msg) if region_required and region not in rax_module.regions: module.fail_json(msg='%s is not a valid region, must be one of: %s' % (region, ','.join(rax_module.regions))) return rax_module
gpl-3.0
mcrowson/django
tests/template_tests/filter_tests/test_urlizetrunc.py
130
3347
from django.template.defaultfilters import urlizetrunc from django.test import SimpleTestCase from django.utils.safestring import mark_safe from ..utils import setup class UrlizetruncTests(SimpleTestCase): @setup({'urlizetrunc01': '{% autoescape off %}{{ a|urlizetrunc:"8" }} {{ b|urlizetrunc:"8" }}{% endautoescape %}'}) def test_urlizetrunc01(self): output = self.engine.render_to_string( 'urlizetrunc01', { 'a': '"Unsafe" http://example.com/x=&y=', 'b': mark_safe('&quot;Safe&quot; http://example.com?x=&amp;y='), }, ) self.assertEqual( output, '"Unsafe" <a href="http://example.com/x=&amp;y=" rel="nofollow">http:...</a> ' '&quot;Safe&quot; <a href="http://example.com?x=&amp;y=" rel="nofollow">http:...</a>' ) @setup({'urlizetrunc02': '{{ a|urlizetrunc:"8" }} {{ b|urlizetrunc:"8" }}'}) def test_urlizetrunc02(self): output = self.engine.render_to_string( 'urlizetrunc02', { 'a': '"Unsafe" http://example.com/x=&y=', 'b': mark_safe('&quot;Safe&quot; http://example.com?x=&amp;y='), }, ) self.assertEqual( output, '&quot;Unsafe&quot; <a href="http://example.com/x=&amp;y=" rel="nofollow">http:...</a> ' '&quot;Safe&quot; <a href="http://example.com?x=&amp;y=" rel="nofollow">http:...</a>' ) class FunctionTests(SimpleTestCase): def test_truncate(self): uri = 'http://31characteruri.com/test/' self.assertEqual(len(uri), 31) self.assertEqual( urlizetrunc(uri, 31), '<a href="http://31characteruri.com/test/" rel="nofollow">' 'http://31characteruri.com/test/</a>', ) self.assertEqual( urlizetrunc(uri, 30), '<a href="http://31characteruri.com/test/" rel="nofollow">' 'http://31characteruri.com/t...</a>', ) self.assertEqual( urlizetrunc(uri, 2), '<a href="http://31characteruri.com/test/"' ' rel="nofollow">...</a>', ) def test_overtruncate(self): self.assertEqual( urlizetrunc('http://short.com/', 20), '<a href=' '"http://short.com/" rel="nofollow">http://short.com/</a>', ) def test_query_string(self): self.assertEqual( urlizetrunc('http://www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search&meta=', 20), '<a href="http://www.google.co.uk/search?hl=en&amp;q=some+long+url&amp;btnG=Search&amp;' 'meta=" rel="nofollow">http://www.google...</a>', ) def test_non_string_input(self): self.assertEqual(urlizetrunc(123, 1), '123') def test_autoescape(self): self.assertEqual( urlizetrunc('foo<a href=" google.com ">bar</a>buz', 10), 'foo&lt;a href=&quot; <a href="http://google.com" rel="nofollow">google.com</a> &quot;&gt;bar&lt;/a&gt;buz' ) def test_autoescape_off(self): self.assertEqual( urlizetrunc('foo<a href=" google.com ">bar</a>buz', 9, autoescape=False), 'foo<a href=" <a href="http://google.com" rel="nofollow">google...</a> ">bar</a>buz', )
bsd-3-clause
dvliman/jaikuengine
.google_appengine/lib/django-1.2/tests/modeltests/delete/tests.py
42
4819
from django.db.models import sql from django.db.models.loading import cache from django.db.models.query import CollectedObjects from django.db.models.query_utils import CyclicDependency from django.test import TestCase from models import A, B, C, D, E, F class DeleteTests(TestCase): def clear_rel_obj_caches(self, *models): for m in models: if hasattr(m._meta, '_related_objects_cache'): del m._meta._related_objects_cache def order_models(self, *models): cache.app_models["delete"].keyOrder = models def setUp(self): self.order_models("a", "b", "c", "d", "e", "f") self.clear_rel_obj_caches(A, B, C, D, E, F) def tearDown(self): self.order_models("a", "b", "c", "d", "e", "f") self.clear_rel_obj_caches(A, B, C, D, E, F) def test_collected_objects(self): g = CollectedObjects() self.assertFalse(g.add("key1", 1, "item1", None)) self.assertEqual(g["key1"], {1: "item1"}) self.assertFalse(g.add("key2", 1, "item1", "key1")) self.assertFalse(g.add("key2", 2, "item2", "key1")) self.assertEqual(g["key2"], {1: "item1", 2: "item2"}) self.assertFalse(g.add("key3", 1, "item1", "key1")) self.assertTrue(g.add("key3", 1, "item1", "key2")) self.assertEqual(g.ordered_keys(), ["key3", "key2", "key1"]) self.assertTrue(g.add("key2", 1, "item1", "key3")) self.assertRaises(CyclicDependency, g.ordered_keys) def test_delete(self): ## Second, test the usage of CollectedObjects by Model.delete() # Due to the way that transactions work in the test harness, doing # m.delete() here can work but fail in a real situation, since it may # delete all objects, but not in the right order. So we manually check # that the order of deletion is correct. # Also, it is possible that the order is correct 'accidentally', due # solely to order of imports etc. To check this, we set the order that # 'get_models()' will retrieve to a known 'nice' order, and then try # again with a known 'tricky' order. Slightly naughty access to # internals here :-) # If implementation changes, then the tests may need to be simplified: # - remove the lines that set the .keyOrder and clear the related # object caches # - remove the second set of tests (with a2, b2 etc) a1 = A.objects.create() b1 = B.objects.create(a=a1) c1 = C.objects.create(b=b1) d1 = D.objects.create(c=c1, a=a1) o = CollectedObjects() a1._collect_sub_objects(o) self.assertEqual(o.keys(), [D, C, B, A]) a1.delete() # Same again with a known bad order self.order_models("d", "c", "b", "a") self.clear_rel_obj_caches(A, B, C, D) a2 = A.objects.create() b2 = B.objects.create(a=a2) c2 = C.objects.create(b=b2) d2 = D.objects.create(c=c2, a=a2) o = CollectedObjects() a2._collect_sub_objects(o) self.assertEqual(o.keys(), [D, C, B, A]) a2.delete() def test_collected_objects_null(self): g = CollectedObjects() self.assertFalse(g.add("key1", 1, "item1", None)) self.assertFalse(g.add("key2", 1, "item1", "key1", nullable=True)) self.assertTrue(g.add("key1", 1, "item1", "key2")) self.assertEqual(g.ordered_keys(), ["key1", "key2"]) def test_delete_nullable(self): e1 = E.objects.create() f1 = F.objects.create(e=e1) e1.f = f1 e1.save() # Since E.f is nullable, we should delete F first (after nulling out # the E.f field), then E. o = CollectedObjects() e1._collect_sub_objects(o) self.assertEqual(o.keys(), [F, E]) # temporarily replace the UpdateQuery class to verify that E.f is # actually nulled out first logged = [] class LoggingUpdateQuery(sql.UpdateQuery): def clear_related(self, related_field, pk_list, using): logged.append(related_field.name) return super(LoggingUpdateQuery, self).clear_related(related_field, pk_list, using) original = sql.UpdateQuery sql.UpdateQuery = LoggingUpdateQuery e1.delete() self.assertEqual(logged, ["f"]) logged = [] e2 = E.objects.create() f2 = F.objects.create(e=e2) e2.f = f2 e2.save() # Same deal as before, though we are starting from the other object. o = CollectedObjects() f2._collect_sub_objects(o) self.assertEqual(o.keys(), [F, E]) f2.delete() self.assertEqual(logged, ["f"]) logged = [] sql.UpdateQuery = original
apache-2.0
GunoH/intellij-community
plugins/hg4idea/testData/bin/mercurial/bookmarks.py
91
9671
# Mercurial bookmark support code # # Copyright 2008 David Soria Parra <dsp@php.net> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from mercurial.i18n import _ from mercurial.node import hex from mercurial import encoding, error, util, obsolete import errno, os class bmstore(dict): """Storage for bookmarks. This object should do all bookmark reads and writes, so that it's fairly simple to replace the storage underlying bookmarks without having to clone the logic surrounding bookmarks. This particular bmstore implementation stores bookmarks as {hash}\s{name}\n (the same format as localtags) in .hg/bookmarks. The mapping is stored as {name: nodeid}. This class does NOT handle the "current" bookmark state at this time. """ def __init__(self, repo): dict.__init__(self) self._repo = repo try: for line in repo.vfs('bookmarks'): line = line.strip() if not line: continue if ' ' not in line: repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') % line) continue sha, refspec = line.split(' ', 1) refspec = encoding.tolocal(refspec) try: self[refspec] = repo.changelog.lookup(sha) except LookupError: pass except IOError, inst: if inst.errno != errno.ENOENT: raise def write(self): '''Write bookmarks Write the given bookmark => hash dictionary to the .hg/bookmarks file in a format equal to those of localtags. We also store a backup of the previous state in undo.bookmarks that can be copied back on rollback. ''' repo = self._repo if repo._bookmarkcurrent not in self: setcurrent(repo, None) wlock = repo.wlock() try: file = repo.vfs('bookmarks', 'w', atomictemp=True) for name, node in self.iteritems(): file.write("%s %s\n" % (hex(node), encoding.fromlocal(name))) file.close() # touch 00changelog.i so hgweb reloads bookmarks (no lock needed) try: os.utime(repo.sjoin('00changelog.i'), None) except OSError: pass finally: wlock.release() def readcurrent(repo): '''Get the current bookmark If we use gittish branches we have a current bookmark that we are on. This function returns the name of the bookmark. It is stored in .hg/bookmarks.current ''' mark = None try: file = repo.opener('bookmarks.current') except IOError, inst: if inst.errno != errno.ENOENT: raise return None try: # No readline() in osutil.posixfile, reading everything is cheap mark = encoding.tolocal((file.readlines() or [''])[0]) if mark == '' or mark not in repo._bookmarks: mark = None finally: file.close() return mark def setcurrent(repo, mark): '''Set the name of the bookmark that we are currently on Set the name of the bookmark that we are on (hg update <bookmark>). The name is recorded in .hg/bookmarks.current ''' current = repo._bookmarkcurrent if current == mark: return if mark not in repo._bookmarks: mark = '' wlock = repo.wlock() try: file = repo.opener('bookmarks.current', 'w', atomictemp=True) file.write(encoding.fromlocal(mark)) file.close() finally: wlock.release() repo._bookmarkcurrent = mark def unsetcurrent(repo): wlock = repo.wlock() try: try: util.unlink(repo.join('bookmarks.current')) repo._bookmarkcurrent = None except OSError, inst: if inst.errno != errno.ENOENT: raise finally: wlock.release() def iscurrent(repo, mark=None, parents=None): '''Tell whether the current bookmark is also active I.e., the bookmark listed in .hg/bookmarks.current also points to a parent of the working directory. ''' if not mark: mark = repo._bookmarkcurrent if not parents: parents = [p.node() for p in repo[None].parents()] marks = repo._bookmarks return (mark in marks and marks[mark] in parents) def updatecurrentbookmark(repo, oldnode, curbranch): try: return update(repo, oldnode, repo.branchtip(curbranch)) except error.RepoLookupError: if curbranch == "default": # no default branch! return update(repo, oldnode, repo.lookup("tip")) else: raise util.Abort(_("branch %s not found") % curbranch) def deletedivergent(repo, deletefrom, bm): '''Delete divergent versions of bm on nodes in deletefrom. Return True if at least one bookmark was deleted, False otherwise.''' deleted = False marks = repo._bookmarks divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]] for mark in divergent: if mark and marks[mark] in deletefrom: if mark != bm: del marks[mark] deleted = True return deleted def update(repo, parents, node): deletefrom = parents marks = repo._bookmarks update = False cur = repo._bookmarkcurrent if not cur: return False if marks[cur] in parents: old = repo[marks[cur]] new = repo[node] divs = [repo[b] for b in marks if b.split('@', 1)[0] == cur.split('@', 1)[0]] anc = repo.changelog.ancestors([new.rev()]) deletefrom = [b.node() for b in divs if b.rev() in anc or b == new] if old.descendant(new): marks[cur] = new.node() update = True if deletedivergent(repo, deletefrom, cur): update = True if update: marks.write() return update def listbookmarks(repo): # We may try to list bookmarks on a repo type that does not # support it (e.g., statichttprepository). marks = getattr(repo, '_bookmarks', {}) d = {} hasnode = repo.changelog.hasnode for k, v in marks.iteritems(): # don't expose local divergent bookmarks if hasnode(v) and ('@' not in k or k.endswith('@')): d[k] = hex(v) return d def pushbookmark(repo, key, old, new): w = repo.wlock() try: marks = repo._bookmarks if hex(marks.get(key, '')) != old: return False if new == '': del marks[key] else: if new not in repo: return False marks[key] = repo[new].node() marks.write() return True finally: w.release() def updatefromremote(ui, repo, remotemarks, path): ui.debug("checking for updated bookmarks\n") changed = False localmarks = repo._bookmarks for k in sorted(remotemarks): if k in localmarks: nr, nl = remotemarks[k], localmarks[k] if nr in repo: cr = repo[nr] cl = repo[nl] if cl.rev() >= cr.rev(): continue if validdest(repo, cl, cr): localmarks[k] = cr.node() changed = True ui.status(_("updating bookmark %s\n") % k) else: if k == '@': kd = '' else: kd = k # find a unique @ suffix for x in range(1, 100): n = '%s@%d' % (kd, x) if n not in localmarks: break # try to use an @pathalias suffix # if an @pathalias already exists, we overwrite (update) it for p, u in ui.configitems("paths"): if path == u: n = '%s@%s' % (kd, p) localmarks[n] = cr.node() changed = True ui.warn(_("divergent bookmark %s stored as %s\n") % (k, n)) elif remotemarks[k] in repo: # add remote bookmarks for changes we already have localmarks[k] = repo[remotemarks[k]].node() changed = True ui.status(_("adding remote bookmark %s\n") % k) if changed: localmarks.write() def diff(ui, dst, src): ui.status(_("searching for changed bookmarks\n")) smarks = src.listkeys('bookmarks') dmarks = dst.listkeys('bookmarks') diff = sorted(set(smarks) - set(dmarks)) for k in diff: mark = ui.debugflag and smarks[k] or smarks[k][:12] ui.write(" %-25s %s\n" % (k, mark)) if len(diff) <= 0: ui.status(_("no changed bookmarks found\n")) return 1 return 0 def validdest(repo, old, new): """Is the new bookmark destination a valid update from the old one""" repo = repo.unfiltered() if old == new: # Old == new -> nothing to update. return False elif not old: # old is nullrev, anything is valid. # (new != nullrev has been excluded by the previous check) return True elif repo.obsstore: return new.node() in obsolete.foreground(repo, [old.node()]) else: # still an independant clause as it is lazyer (and therefore faster) return old.descendant(new)
apache-2.0
andrewfu0325/gem5-aladdin
tests/quick/se/60.rubytest/test.py
90
1564
# Copyright (c) 2010 Advanced Micro Devices, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Ron Dreslinski
bsd-3-clause
jblupus/PyLoyaltyProject
loyalty/loyalty.py
1
15648
#!/usr/bin/python # coding=utf-8 import json import os from threading import Thread import numpy as np import pandas as pd from utils import HOME PATH = HOME + '/Dropbox/Twitter/' DATASETS_PATH = PATH + 'Data/datasets/' PROFILE_PATH = PATH + 'Data/profile/users_profile_data.csv' class Entropy(Thread): def __init__(self, inputfile, outputfile): super(Entropy, self).__init__() self.inputfile = inputfile self.outputfile = outputfile def run(self): with open(self.inputfile, 'r') as infile, open(self.outputfile, 'wb') as outfile: entropies = {} for line in infile.readlines(): line_json = json.loads(line) key = line_json.keys()[0] if len(line_json[key]) > 10: entropies.update({key: calc_entropy(line_json[key])}) json.dump({'entropies': entropies}, outfile, indent=4) def calc_entropy(freq): rel_freq = np.array(freq) / float(sum(freq)) return -1 * np.sum(rel_freq * np.log10(rel_freq)) / np.log10(np.size(freq)) def calc_interval_size(values, size): return round(np.size(values) / size, 4) def clean_dict(intervals): keys = filter(lambda k: intervals[k] == 0, intervals) for key in keys: del intervals[key] return intervals def save_json(filename, data): with open(filename, 'wb') as outfile: json.dump({'intervals': data}, outfile, indent=4) def calc_alters(values): size = float(np.size(values)) if size == 0: return {} values_mean = np.mean(values) values_sd = np.std(values) intervals = {0: calc_interval_size(filter(lambda v: v <= values_mean, values), size)} if values_sd > 0: max_value = np.max(values) next_values = map(lambda v: v, values) for i in xrange(0, int(np.ceil((max_value - values_mean) / values_sd))): inf = values_mean + (i * values_sd) next_values = filter(lambda v: v > inf, next_values) sup = values_mean + ((i + 1) * values_sd) rel_size = calc_interval_size(filter(lambda v: v <= sup, next_values), size) intervals.update({i + 1: rel_size}) return clean_dict(intervals) class Loyalty(Thread): def __init__(self, in_filename, out_filename): super(Loyalty, self).__init__() self.in_filename = in_filename self.out_filename = out_filename def run(self): with open(self.in_filename, 'r') as infile: loyalties = {} for line in infile.readlines(): line_json = json.loads(line) key = line_json.keys()[0] loyalties.update({key: calc_alters(line_json[key])}) save_json(self.out_filename, data=loyalties) class TopAlters(Thread): def __init__(self, in_filename, out_filename, ids=[]): super(TopAlters, self).__init__() self.in_filename = in_filename self.out_filename = out_filename self.ids = ids def run(self): with open(self.in_filename, 'r') as infile: top_alters = {} json_values = json.load(infile)['intervals'] has_ids = np.size(self.ids) == 0 for json_value in json_values: if has_ids or int(json_value) in self.ids: if len(json_values[json_value]) > 0: try: max_interval = np.max(np.array(json_values[json_value].keys()).astype(int)) except ValueError as v: print json_value, len(json_values[json_value]), json_values[json_value] raise v top_alters.update({json_value: max_interval}) df = pd.DataFrame() df['ego_id'] = top_alters.keys() df['max_interval'] = top_alters.values() df.to_csv(self.out_filename) def text_loyalty(): for filename in ['like.jsons', 'mention.jsons', 'retweet.jsons']: in_filename = PATH + 'Text.Distributions/' + filename out_filename = PATH + 'Text.Loyalty/' + format() Loyalty(in_filename=in_filename, out_filename=out_filename).run() def interactions_loyalty(): for filename in ['like.jsons', 'mention.jsons', 'retweet.jsons', 'union.jsons']: in_filename = PATH + 'Filtered.Distributions/' + filename out_filename = PATH + 'Interactions.Loyalty/' + filename Loyalty(in_filename=in_filename, out_filename=out_filename).run() def check(filename): with open(filename, 'r') as infile: json_values = json.load(infile)['intervals'] for json_value in json_values: print json_value, np.sum(json_values[json_value].values()) def top_alters(): for filename in [('like.jsons', 'like.csv'), ('mention.jsons', 'mention.csv'), ('retweet.jsons', 'retweet.csv'), ('union.jsons', 'union.csv')]: in_filename = PATH + 'Interactions.Loyalty/' + filename[0] out_filename = PATH + 'Interactions.TopAlters/' + filename[1] TopAlters(in_filename=in_filename, out_filename=out_filename).run() def friend_top_alters(): # for filename in [('like.jsons', 'd_friend_like.csv'), # ('mention.jsons', 'd_friend_mention.csv'), # ('retweet.jsons', 'd_friend_retweet.csv')]: # df_friends = pd.read_csv(FRIENDS_PATH + filename[1]) # friends = df_friends['seed_id'].values # in_filename = PATH + 'Interactions.Loyalty/' + filename[0] # out_filename = PATH + 'Interactions.TopAlters/' + filename[1] # TopAlters(in_filename=in_filename, out_filename=out_filename, friends=friends).run() union_friends = set() for filename in ['d_friend_like.csv', 'd_friend_mention.csv', 'd_friend_retweet.csv']: df_friends = pd.read_csv(DATASETS_PATH + filename) friends = df_friends['seed_id'].values union_friends.update(friends) union_friends = list(union_friends) in_filename = PATH + 'Interactions.Loyalty/union.jsons' out_filename = PATH + 'Interactions.TopAlters/d_friend_union.csv' TopAlters(in_filename=in_filename, out_filename=out_filename, ids=union_friends).run() # friend_top_alters() def language_top_atlers(): df = pd.read_csv(PROFILE_PATH) langs = ['en', 'es', 'pt', 'fr', 'it', 'de', 'ja', 'others'] df['language'] = map(lambda lang: lang if 'en-' not in lang else 'en', df['language']) df['language'] = map(lambda lang: lang if 'es-' not in lang else 'es', df['language']) df['language'] = map(lambda lang: lang if 'pt-' not in lang else 'pt', df['language']) df['language'] = map(lambda lang: lang if 'fr-' not in lang else 'fr', df['language']) df['language'] = map(lambda lang: lang if 'it-' not in lang else 'it', df['language']) df['language'] = map(lambda lang: lang if 'de-' not in lang else 'de', df['language']) df['language'] = map(lambda lang: lang if 'ja-' not in lang else 'ja', df['language']) df['language'] = map(lambda lang: lang if lang in langs else 'others', df['language']) for language in langs: lang_ids = df.loc[df['language'] == language]['user_id'].values for filename in [('like.jsons', 'd_like_' + language + '.csv'), ('mention.jsons', 'd_mention_' + language + '.csv'), ('retweet.jsons', 'd_retweet_' + language + '.csv'), ('union.jsons', 'd_union_' + language + '.csv')]: in_filename = PATH + 'Interactions.Loyalty/' + filename[0] out_filename = PATH + 'Language.TopAlters/' + filename[1] TopAlters(in_filename=in_filename, out_filename=out_filename, ids=lang_ids).run() # friend_top_alters() def datasets_size(): d_size = 188511.0 print '\n\nDatasets' for dataset in np.sort(filter(lambda x: 'union' not in x, os.listdir(DATASETS_PATH))): df = pd.read_csv(DATASETS_PATH + dataset) size = np.size(df['seed_id'].values) print dataset.split('.')[0], ' & '.join([str(size), str(round(100 * size / d_size, 2))]) print '\n\nLanguages Datasets' for dataset in np.sort(filter(lambda x: 'union' not in x, os.listdir(PATH + 'Language.TopAlters/'))): df = pd.read_csv(PATH + 'Language.TopAlters/' + dataset) size = np.size(df['ego_id'].values) print dataset.split('.')[0], ' & '.join([str(size), str(round(100 * size / d_size, 2))]) # datasets_size() def get_top_alters(filename=None, values=[], ids=[]): if filename is not None: df = pd.read_csv(filename) if len(ids) == 0: values = df['max_interval'].values else: d = dict(zip(df['ego_id'], df['max_interval'])) values = [d[key] for key in ids] values = filter(lambda x: x > 0, values) total = float(np.size(values)) i0 = np.size(filter(lambda x: x == 0, values)) i1 = np.size(filter(lambda x: 1 <= x <= 3, values)) i2 = np.size(filter(lambda x: 4 <= x <= 6, values)) i3 = np.size(filter(lambda x: 7 <= x <= 9, values)) i4 = np.size(filter(lambda x: 10 <= x <= 12, values)) i5 = np.size(filter(lambda x: 13 <= x <= 15, values)) i6 = np.size(filter(lambda x: 16 <= x, values)) counts = {0: i0, 1: i1, 2: i2, 3: i3, 4: i4, 5: i5, 6: i6} # counts = dict([(key, round(100 * counts[key] / total, 4)) for key in np.sort(counts.keys())]) return total, np.array(counts.values()).astype(float).tolist() def check_top(filename): with open(filename, 'r') as infile: i = [] for line in infile.readlines(): values = np.array(line.split(' '))[1:] i.append(float(values[0].split(':')[0])) total, counts = get_top_alters(values=i) print total, 100.0 * np.array(counts) / total, counts def top_category(): langs = ['en', 'es', 'pt', 'fr', 'it', 'de', 'ja', 'others'] datapath = PATH + 'Interactions.TopAlters/' langdatapath = PATH + 'Language.TopAlters/' total, values = get_top_alters(datapath + 'like.csv') print 'like.csv', total, ' & '.join(np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\' for lang in langs: total, values = get_top_alters(langdatapath + 'd_like_' + lang + '.csv') print 'd_like_' + lang + '.csv', total, ' & '.join(np.array(np.round(100.0 * np.array(values) / total, 4)) .astype(str)) + '\\\\' print '\n' total, values = get_top_alters(datapath + 'mention.csv') print 'mention.csv', total, ' & '.join( np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\' for lang in langs: total, values = get_top_alters(langdatapath + 'd_mention_' + lang + '.csv') print 'd_mention_' + lang + '.csv', total, ' & '.join(np.array(np.round(100.0 * np.array(values) / total, 4)) .astype(str)) + '\\\\' print '\n' total, values = get_top_alters(datapath + 'retweet.csv') print 'retweet.csv', total, ' & '.join( np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\' for lang in langs: total, values = get_top_alters(langdatapath + 'd_retweet_' + lang + '.csv') print 'd_retweet_' + lang + '.csv', total, ' & '.join( np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\' print '\n' total, values = get_top_alters(datapath + 'union.csv') print 'union.csv', total, ' & '.join( np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\' for lang in langs: total, values = get_top_alters(langdatapath + 'd_union_' + lang + '.csv') print 'd_union_' + lang + '.csv', total, ' & '.join( np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\' # top_category() def co_top_category(): ids = set() for _file in ['d_like.csv', 'd_mention.csv', 'd_retweet.csv']: _ids = np.array(pd.read_csv(DATASETS_PATH + _file)['seed_id']).astype(int) if len(ids) > 0: ids = ids.intersection(_ids.tolist()) else: ids.update(_ids.tolist()) datapath = PATH + 'Interactions.TopAlters/' total, values = get_top_alters(filename=datapath + 'like.csv', ids=list(ids)) print 'like.csv', total, ' & '.join(np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\' total, values = get_top_alters(filename=datapath + 'mention.csv', ids=list(ids)) print 'mention.csv', total, ' & '.join( np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\' total, values = get_top_alters(filename=datapath + 'retweet.csv', ids=list(ids)) print 'retweet.csv', total, ' & '.join( np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\' total, values = get_top_alters(filename=datapath + 'union.csv', ids=list(ids)) print 'union.csv', total, ' & '.join( np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\' # co_top_category() def friends_top_category(): for _file in [('d_like.csv', 'd_friend_like.csv'), ('d_mention.csv', 'd_friend_mention.csv'), ('d_retweet.csv', 'd_friend_retweet.csv'), ('d_union.csv', 'd_friend_union.csv')]: # _ids = np.array(pd.read_csv(DATASETS_PATH + _file[1])['seed_id']).astype(int) datapath = PATH + 'Interactions.TopAlters/' total, values = get_top_alters(filename=datapath + _file[1]) print _file[0], total, ' & '.join(np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\' # friends_top_category() def co_friends_top_category(): ids = set() for _file in ['d_friend_like.csv', 'd_friend_mention.csv', 'd_friend_retweet.csv']: _ids = np.array(pd.read_csv(DATASETS_PATH + _file)['seed_id']).astype(int) if len(ids) > 0: ids = ids.intersection(_ids.tolist()) else: ids.update(_ids.tolist()) for _file in [('d_like.csv', 'd_friend_like.csv'), ('d_mention.csv', 'd_friend_mention.csv'), ('d_retweet.csv', 'd_friend_retweet.csv'), ('d_union.csv', 'd_friend_union.csv')]: datapath = PATH + 'Interactions.TopAlters/' total, values = get_top_alters(filename=datapath + _file[1], ids=list(ids)) print _file[0], total, ' & '.join(np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\' # co_friends_top_category() def text_top_alters(): for filename in [('like.jsons', 'like.csv'), ('mention.jsons', 'mention.csv'), ('retweet.jsons', 'retweet.csv')]: in_filename = PATH + 'Text.Loyalty/' + filename[0] out_filename = PATH + 'Text.TopAlters/' + filename[1] TopAlters(in_filename=in_filename, out_filename=out_filename).run() # text_top_alters() def text_top_category(): for _file in ['like.csv', 'mention.csv', 'retweet.csv']: datapath = PATH + 'Text.TopAlters/' total, values = get_top_alters(filename=datapath + _file) print _file, total, ' & '.join(np.array(np.round(100.0 * np.array(values) / total, 4)).astype(str)) + '\\\\'
bsd-2-clause
dudepare/bedrock
bedrock/firefox/views.py
1
23915
# -*- coding: utf-8 -*- # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import json import re from cgi import escape from django.conf import settings from django.http import HttpResponsePermanentRedirect, HttpResponseRedirect from django.views.decorators.cache import never_cache from django.views.decorators.csrf import csrf_exempt, csrf_protect from django.views.decorators.http import require_POST from django.views.decorators.vary import vary_on_headers from django.views.generic.base import TemplateView import basket from bedrock.base.helpers import static from bedrock.base.urlresolvers import reverse from lib import l10n_utils from lib.l10n_utils.dotlang import _ from product_details.version_compare import Version import waffle from bedrock.base.geo import get_country_from_request from bedrock.firefox.firefox_details import firefox_desktop from bedrock.firefox.forms import SMSSendForm, SendToDeviceWidgetForm from bedrock.mozorg.views import process_partnership_form from bedrock.mozorg.util import HttpResponseJSON from bedrock.releasenotes import version_re UA_REGEXP = re.compile(r"Firefox/(%s)" % version_re) LANG_FILES = ['firefox/partners/index'] LOCALE_FXOS_HEADLINES = { 'de': { 'title': u"Firefox OS ist richtungsweisend für die Zukunft des " u"mobilen Marktes", 'url': 'http://blog.mozilla.org/press-de/2014/02/23/' 'firefox-os-ist-richtungsweisend-fur-die-zukunft-des-mobilen-' 'marktes', }, 'en-GB': { 'title': u'Firefox OS Unleashes the Future of Mobile', 'url': 'http://blog.mozilla.org/press-uk/2014/02/23/' 'firefox-os-unleashes-the-future-of-mobile' }, 'en-US': { 'title': _('Firefox OS Unleashes the Future of Mobile'), 'url': 'https://blog.mozilla.org/press/2014/02/firefox-os-future-2/', }, 'es-AR': { 'title': u'Firefox OS te desvela el futuro de lo móvil', 'url': 'http://blog.mozilla.org/press-latam/2014/02/23/' 'firefox-os-te-desvela-el-futuro-de-lo-movil/', }, 'es-CL': { 'title': u'Firefox OS te desvela el futuro de lo móvil', 'url': 'http://blog.mozilla.org/press-latam/2014/02/23/' 'firefox-os-te-desvela-el-futuro-de-lo-movil/', }, 'es-ES': { 'title': u'Firefox OS te desvela el futuro de lo móvil', 'url': 'https://blog.mozilla.org/press/2014/02/firefox-os-future-2/', }, 'es-MX': { 'title': u'Firefox OS te desvela el futuro de lo móvil', 'url': 'http://blog.mozilla.org/press-latam/2014/02/23/' 'firefox-os-te-desvela-el-futuro-de-lo-movil/', }, 'fr': { 'title': u'Firefox OS chamboule le futur du mobile', 'url': 'http://blog.mozilla.org/press-fr/2014/02/23/' 'firefox-os-chamboule-le-futur-du-mobile', }, 'it': { 'title': u'Firefox OS svela il futuro del mobile', 'url': 'http://blog.mozilla.org/press-it/2014/02/23/' 'firefox-os-svela-il-futuro-del-mobile', }, 'pl': { 'title': u'Firefox OS uwalnia przyszłość technologii mobilnej', 'url': 'http://blog.mozilla.org/press-pl/2014/02/23/' 'firefox-os-uwalnia-przyszlosc-technologii-mobilnej', }, 'pt-BR': { 'title': u'Firefox OS apresenta o futuro dos dispositivos móveis', 'url': 'https://blog.mozilla.org/press-br/2014/02/23/' 'firefox-os-apresenta-o-futuro-dos-dispositivos-moveis/', }, } INSTALLER_CHANNElS = [ 'release', 'beta', 'alpha', # 'nightly', # soon ] SMS_MESSAGES = { 'android': 'SMS_Android', } EMAIL_MESSAGES = { 'android': 'download-firefox-android', 'ios': 'download-firefox-ios', 'all': 'download-firefox-mobile', } LOCALE_SPRING_CAMPAIGN_VIDEOS = { 'en-US': 'https://videos.cdn.mozilla.net/uploads/marketing/SpringCampaign2015/Firefox_Welcome_english', 'en-GB': 'https://videos.cdn.mozilla.net/uploads/marketing/SpringCampaign2015/Firefox_Welcome_englishUK', 'de': 'https://videos.cdn.mozilla.net/uploads/marketing/SpringCampaign2015/Firefox_Welcome_german', 'es-ES': 'https://videos.cdn.mozilla.net/uploads/marketing/SpringCampaign2015/Firefox_Welcome_spanish', 'es-MX': 'https://videos.cdn.mozilla.net/uploads/marketing/SpringCampaign2015/Firefox_Welcome_spanishMX', 'fr': 'https://videos.cdn.mozilla.net/uploads/marketing/SpringCampaign2015/Firefox_Welcome_french', 'pt-BR': 'https://videos.cdn.mozilla.net/uploads/marketing/SpringCampaign2015/Firefox_Welcome_portugeseBrazil', } def get_js_bundle_files(bundle): """ Return a JSON string of the list of file names for lazy loaded javascript. """ bundle = settings.PIPELINE_JS[bundle] if settings.DEBUG: items = bundle['source_filenames'] else: items = (bundle['output_filename'],) return json.dumps([static(i) for i in items]) JS_COMMON = get_js_bundle_files('partners_common') JS_MOBILE = get_js_bundle_files('partners_mobile') JS_DESKTOP = get_js_bundle_files('partners_desktop') def installer_help(request): installer_lang = request.GET.get('installer_lang', None) installer_channel = request.GET.get('channel', None) context = { 'installer_lang': None, 'installer_channel': None, } if installer_lang and installer_lang in firefox_desktop.languages: context['installer_lang'] = installer_lang if installer_channel and installer_channel in INSTALLER_CHANNElS: context['installer_channel'] = installer_channel return l10n_utils.render(request, 'firefox/installer-help.html', context) @csrf_exempt def sms_send(request): form = SMSSendForm(request.POST or None) if request.method == 'POST': error_msg = _('An error occurred in our system. Please try again later.') error = None if form.is_valid(): try: basket.send_sms(form.cleaned_data['number'], SMS_MESSAGES['android'], form.cleaned_data['optin']) except basket.BasketException: error = error_msg else: number_errors = form.errors.get('number') if number_errors: # form error messages may contain unsanitized user input error = escape(number_errors[0]) else: error = error_msg if request.is_ajax(): # return JSON if error: resp = { 'success': False, 'error': error, } else: resp = {'success': True} return HttpResponseJSON(resp) else: if error: form.errors['__all__'] = form.error_class([error]) else: return HttpResponseRedirect(reverse('firefox.android.sms-thankyou')) return l10n_utils.render(request, 'firefox/android/sms-send.html', {'sms_form': form}) @require_POST @csrf_exempt def send_to_device_ajax(request): locale = l10n_utils.get_locale(request) phone_or_email = request.POST.get('phone-or-email') if not phone_or_email: return HttpResponseJSON({'success': False, 'errors': ['phone-or-email']}) data = { 'platform': request.POST.get('platform'), } data_type = 'email' if '@' in phone_or_email else 'number' data[data_type] = phone_or_email form = SendToDeviceWidgetForm(data) if form.is_valid(): phone_or_email = form.cleaned_data.get(data_type) platform = form.cleaned_data.get('platform') if data_type == 'number': if platform in SMS_MESSAGES: try: basket.send_sms(phone_or_email, SMS_MESSAGES[platform]) except basket.BasketException: return HttpResponseJSON({'success': False, 'errors': ['system']}, status=400) else: # TODO define all platforms in SMS_MESSAGES return HttpResponseJSON({'success': False, 'errors': ['platform']}) else: # email if platform in EMAIL_MESSAGES: try: basket.subscribe(phone_or_email, EMAIL_MESSAGES[platform], source_url=request.POST.get('source-url'), lang=locale) except basket.BasketException: return HttpResponseJSON({'success': False, 'errors': ['system']}, status=400) else: # TODO define all platforms in EMAIL_MESSAGES return HttpResponseJSON({'success': False, 'errors': ['platform']}) resp_data = {'success': True} else: resp_data = { 'success': False, 'errors': form.errors.keys(), } return HttpResponseJSON(resp_data) def windows_billboards(req): major_version = req.GET.get('majorVersion') minor_version = req.GET.get('minorVersion') if major_version and minor_version: major_version = float(major_version) minor_version = float(minor_version) if major_version == 5 and minor_version == 1: return l10n_utils.render(req, 'firefox/unsupported/winxp.html') return l10n_utils.render(req, 'firefox/unsupported/win2k.html') def fx_home_redirect(request): return HttpResponseRedirect(reverse('firefox.new')) def dnt(request): response = l10n_utils.render(request, 'firefox/dnt.html') response['Vary'] = 'DNT' return response def all_downloads(request, channel): if channel is None: channel = 'release' if channel == 'developer': channel = 'alpha' if channel == 'organizations': channel = 'esr' version = firefox_desktop.latest_version(channel) query = request.GET.get('q') channel_names = { 'release': _('Firefox'), 'beta': _('Firefox Beta'), 'alpha': _('Developer Edition'), 'esr': _('Firefox Extended Support Release'), } context = { 'full_builds_version': version.split('.', 1)[0], 'full_builds': firefox_desktop.get_filtered_full_builds(channel, version, query), 'test_builds': firefox_desktop.get_filtered_test_builds(channel, version, query), 'query': query, 'channel': channel, 'channel_name': channel_names[channel] } if channel == 'esr': next_version = firefox_desktop.latest_version('esr_next') if next_version: context['full_builds_next_version'] = next_version.split('.', 1)[0] context['full_builds_next'] = firefox_desktop.get_filtered_full_builds('esr_next', next_version, query) context['test_builds_next'] = firefox_desktop.get_filtered_test_builds('esr_next', next_version, query) return l10n_utils.render(request, 'firefox/all.html', context) @never_cache def firefox_os_geo_redirect(request): country = get_country_from_request(request) version = settings.FIREFOX_OS_COUNTRY_VERSIONS.get( country, settings.FIREFOX_OS_COUNTRY_VERSIONS['default'] ) return HttpResponseRedirect(reverse('firefox.os.ver.{0}'.format(version))) @csrf_protect def firefox_partners(request): # If the current locale isn't in our list, return the en-US value press_locale = request.locale if ( request.locale in LOCALE_FXOS_HEADLINES) else 'en-US' template_vars = { 'locale_headline_url': LOCALE_FXOS_HEADLINES[press_locale]['url'], 'locale_headline_title': LOCALE_FXOS_HEADLINES[press_locale]['title'], 'js_common': JS_COMMON, 'js_mobile': JS_MOBILE, 'js_desktop': JS_DESKTOP, } form_kwargs = { 'interest_set': 'fx', 'lead_source': 'www.mozilla.org/firefox/partners/'} return process_partnership_form( request, 'firefox/partners/index.html', 'firefox.partners.index', template_vars, form_kwargs) def show_devbrowser_firstrun_or_whatsnew(version): match = re.match(r'\d{1,2}', version) if match: num_version = int(match.group(0)) return num_version >= 35 and version.endswith('a2') return False def show_search_firstrun(version): try: version = Version(version) except ValueError: return False return version >= Version('34.0') def show_36_firstrun(version): try: version = Version(version) except ValueError: return False return version >= Version('36.0') def show_36_whatsnew_tour(oldversion): try: oldversion = Version(oldversion) except ValueError: return False return oldversion < Version('36.0') def show_38_0_5_firstrun_or_whatsnew(version): try: version = Version(version) except ValueError: return False return version >= Version('38.0.5') def show_40_firstrun(version): if (waffle.switch_is_active('fx40-firstrun')): try: version = Version(version) except ValueError: return False return version >= Version('40.0') else: return False class LatestFxView(TemplateView): """ Base class to be extended by views that require visitor to be using latest version of Firefox. Classes extending this class must implement either `get_template_names` function or provide `template_name` class attribute. Control where to redirect non Firefox users by setting the `non_fx_redirect` attribute to a url name. """ non_fx_redirect = 'firefox.new' @vary_on_headers('User-Agent') def dispatch(self, *args, **kwargs): return super(LatestFxView, self).dispatch(*args, **kwargs) def post(self, request, *args, **kwargs): # required for newsletter form post that is handled in # newsletter/helpers.py return self.get(request, *args, **kwargs) def redirect_to(self): """ Redirect visitors based on their user-agent. - Up-to-date Firefox users pass through. - Other Firefox users go to the new page. - Non Firefox users go to the configured page. """ query = self.request.META.get('QUERY_STRING') query = '?' + query if query else '' user_agent = self.request.META.get('HTTP_USER_AGENT', '') if 'Firefox' not in user_agent: return reverse(self.non_fx_redirect) + query # TODO : Where to redirect bug 757206 return None def render_to_response(self, context, **response_kwargs): redirect_url = self.redirect_to() if redirect_url is not None: return HttpResponsePermanentRedirect(redirect_url) else: return l10n_utils.render(self.request, self.get_template_names(), context, **response_kwargs) class FirstrunView(LatestFxView): def get(self, request, *args, **kwargs): if not settings.DEV and not request.is_secure(): uri = 'https://{host}{path}'.format( host=request.get_host(), path=request.get_full_path(), ) return HttpResponsePermanentRedirect(uri) return super(FirstrunView, self).get(request, *args, **kwargs) def get_context_data(self, **kwargs): ctx = super(FirstrunView, self).get_context_data(**kwargs) # add spring campaign video for 38.0.5 version = self.kwargs.get('version') or '' if show_38_0_5_firstrun_or_whatsnew(version): locale = l10n_utils.get_locale(self.request) ctx['video_url'] = LOCALE_SPRING_CAMPAIGN_VIDEOS.get(locale, False) return ctx def get_template_names(self): version = self.kwargs.get('version') or '' locale = l10n_utils.get_locale(self.request) if show_devbrowser_firstrun_or_whatsnew(version): if (waffle.switch_is_active('dev-edition-spring-campaign')): template = 'firefox/dev-firstrun-spring-campaign.html' else: template = 'firefox/dev-firstrun.html' elif show_40_firstrun(version): template = 'firefox/australis/fx40/firstrun.html' elif show_38_0_5_firstrun_or_whatsnew(version): template = 'firefox/australis/fx38_0_5/firstrun.html' elif show_36_firstrun(version): template = 'firefox/australis/fx36/firstrun-tour.html' elif show_search_firstrun(version) and locale == 'en-US': template = 'firefox/australis/firstrun-34-tour.html' else: template = 'firefox/australis/firstrun-tour.html' # return a list to conform with original intention return [template] class WhatsnewView(LatestFxView): pocket_locales = ['en-US', 'es-ES', 'ru', 'ja', 'de'] def get(self, request, *args, **kwargs): if not settings.DEV and not request.is_secure(): uri = 'https://{host}{path}'.format( host=request.get_host(), path=request.get_full_path(), ) return HttpResponsePermanentRedirect(uri) return super(WhatsnewView, self).get(request, *args, **kwargs) def get_context_data(self, **kwargs): ctx = super(WhatsnewView, self).get_context_data(**kwargs) locale = l10n_utils.get_locale(self.request) video_url = LOCALE_SPRING_CAMPAIGN_VIDEOS.get(locale, False) if video_url: ctx['video_url'] = video_url return ctx def get_template_names(self): locale = l10n_utils.get_locale(self.request) version = self.kwargs.get('version') or '' oldversion = self.request.GET.get('oldversion', '') # old versions of Firefox sent a prefixed version if oldversion.startswith('rv:'): oldversion = oldversion[3:] if show_devbrowser_firstrun_or_whatsnew(version): template = 'firefox/dev-whatsnew.html' elif show_38_0_5_firstrun_or_whatsnew(version): has_video = LOCALE_SPRING_CAMPAIGN_VIDEOS.get(locale, False) has_pocket = locale in self.pocket_locales if has_pocket and has_video: template = 'firefox/whatsnew_38/whatsnew-pocket-video.html' elif has_video: template = 'firefox/whatsnew_38/whatsnew-video.html' elif has_pocket: template = 'firefox/whatsnew_38/whatsnew-pocket.html' else: template = 'firefox/australis/fx36/whatsnew-no-tour.html' elif version.startswith('37.'): template = 'firefox/whatsnew-fx37.html' elif version.startswith('36.'): if show_36_whatsnew_tour(oldversion): template = 'firefox/australis/fx36/whatsnew-tour.html' else: template = 'firefox/australis/fx36/whatsnew-no-tour.html' else: template = 'firefox/australis/whatsnew-no-tour.html' # return a list to conform with original intention return [template] class TourView(LatestFxView): def get(self, request, *args, **kwargs): if not settings.DEV and not request.is_secure(): uri = 'https://{host}{path}'.format( host=request.get_host(), path=request.get_full_path(), ) return HttpResponsePermanentRedirect(uri) return super(TourView, self).get(request, *args, **kwargs) def get_template_names(self): version = self.kwargs.get('version') or '' locale = l10n_utils.get_locale(self.request) if show_devbrowser_firstrun_or_whatsnew(version): if (waffle.switch_is_active('dev-edition-spring-campaign')): template = 'firefox/dev-firstrun-spring-campaign.html' else: template = 'firefox/dev-firstrun.html' elif show_36_firstrun(version): template = 'firefox/australis/fx36/help-menu-36-tour.html' elif show_search_firstrun(version) and locale == 'en-US': template = 'firefox/australis/help-menu-34-tour.html' else: template = 'firefox/australis/help-menu-tour.html' # return a list to conform with original intention return [template] def hello(request): videos = { 'ar': 'https://videos.cdn.mozilla.net/uploads/FirefoxHello/firefoxhello_intro_arabic', 'de': 'https://videos.cdn.mozilla.net/uploads/FirefoxHello/firefoxhello_intro_german', 'en-US': 'https://videos.cdn.mozilla.net/uploads/FirefoxHello/firefoxhello_intro_english', 'es-AR': 'https://videos.cdn.mozilla.net/uploads/FirefoxHello/firefoxhello_intro_spanish', 'es-CL': 'https://videos.cdn.mozilla.net/uploads/FirefoxHello/firefoxhello_intro_spanish', 'es-ES': 'https://videos.cdn.mozilla.net/uploads/FirefoxHello/firefoxhello_intro_spanish', 'es-MX': 'https://videos.cdn.mozilla.net/uploads/FirefoxHello/firefoxhello_intro_spanish', 'fr': 'https://videos.cdn.mozilla.net/uploads/FirefoxHello/firefoxhello_intro_french', 'id': 'https://videos.cdn.mozilla.net/uploads/FirefoxHello/firefoxhello_intro_indonesian', 'it': 'https://videos.cdn.mozilla.net/uploads/FirefoxHello/firefoxhello_intro_italian', 'ja': 'https://videos.cdn.mozilla.net/uploads/FirefoxHello/firefoxhello_intro_japanese', 'pl': 'https://videos.cdn.mozilla.net/uploads/FirefoxHello/firefoxhello_intro_polish', 'pt-BR': 'https://videos.cdn.mozilla.net/uploads/FirefoxHello/firefoxhello_intro_portugese', 'ru': 'https://videos.cdn.mozilla.net/uploads/FirefoxHello/firefoxhello_intro_russian', 'tr': 'https://videos.cdn.mozilla.net/uploads/FirefoxHello/firefoxhello_intro_turkish', 'zh-TW': 'https://videos.cdn.mozilla.net/uploads/FirefoxHello/firefoxhello_intro_chinese' } return l10n_utils.render(request, 'firefox/hello/index.html', {'video_url': videos.get(request.locale, videos.get('en-US'))}) def hello_screen_sharing(version): try: if re.search('a\d$', version): version = version[:-2] version = Version(version) except ValueError: return False return version >= Version('38.0.5') def hello_minimal_ftu(version): try: if re.search('a\d$', version): version = version[:-2] version = Version(version) except ValueError: return False return version >= Version('40.0') class HelloStartView(LatestFxView): non_fx_redirect = 'firefox.hello' def get_template_names(self): version = self.kwargs.get('version') or '' if hello_minimal_ftu(version): template = 'firefox/hello/start-40.0.html' elif hello_screen_sharing(version): template = 'firefox/hello/start-38.0.5.html' else: template = 'firefox/hello/start.html' # return a list to conform with original intention return [template] class FeedbackView(TemplateView): def get_template_names(self): score = self.request.GET.get('score', 0) if score > '3': template = 'firefox/feedback/happy.html' else: template = 'firefox/feedback/unhappy.html' return [template]
mpl-2.0
NeuralEnsemble/neuroConstruct
lib/jython/Lib/socket.py
5
2462
# dispatches to _socket for actual implementation from _socket import ( socket, error, herror, gaierror, timeout, has_ipv6, create_connection, getdefaulttimeout, setdefaulttimeout, getfqdn, gethostbyaddr, gethostbyname, gethostbyname_ex, gethostname, getprotobyname, getservbyname, getservbyport, AF_UNSPEC, AF_INET, AF_INET6, AI_PASSIVE, AI_CANONNAME, AI_NUMERICHOST, AI_V4MAPPED, AI_ALL, AI_ADDRCONFIG, AI_NUMERICSERV, EAI_NONAME, EAI_SERVICE, EAI_ADDRFAMILY, NI_NUMERICHOST, NI_NUMERICSERV, NI_NOFQDN, NI_NAMEREQD, NI_DGRAM, NI_MAXSERV, NI_IDN, NI_IDN_ALLOW_UNASSIGNED, NI_IDN_USE_STD3_ASCII_RULES, NI_MAXHOST, SHUT_RD, SHUT_WR, SHUT_RDWR, SOCK_DGRAM, SOCK_STREAM, SOCK_RAW, SOCK_RDM, SOCK_SEQPACKET, SOL_SOCKET, # not supported, but here for apparent completeness IPPROTO_AH, IPPROTO_DSTOPTS, IPPROTO_ESP, IPPROTO_FRAGMENT, IPPROTO_GGP, IPPROTO_HOPOPTS, IPPROTO_ICMP, IPPROTO_ICMPV6, IPPROTO_IDP, IPPROTO_IGMP, IPPROTO_IP, # supported # not supported IPPROTO_IPV4, IPPROTO_IPV6, IPPROTO_MAX, IPPROTO_ND, IPPROTO_NONE, IPPROTO_PUP, IPPROTO_RAW, IPPROTO_ROUTING, IPPROTO_TCP, # supported IPPROTO_UDP, # supported # supported SO_BROADCAST, SO_KEEPALIVE, SO_LINGER, SO_RCVBUF, SO_REUSEADDR, SO_SNDBUF, SO_TIMEOUT, TCP_NODELAY, # pseudo options SO_ACCEPTCONN, SO_ERROR, SO_TYPE, # unsupported, will return errno.ENOPROTOOPT if actually used SO_OOBINLINE, SO_DEBUG, SO_DONTROUTE, SO_EXCLUSIVEADDRUSE, SO_RCVLOWAT, SO_RCVTIMEO, SO_REUSEPORT, SO_SNDLOWAT, SO_SNDTIMEO, SO_USELOOPBACK, INADDR_ANY, INADDR_BROADCAST, IN6ADDR_ANY_INIT, _GLOBAL_DEFAULT_TIMEOUT, is_ipv4_address, is_ipv6_address, is_ip_address, getaddrinfo, getnameinfo, htons, htonl, ntohs, ntohl, inet_aton, inet_ntoa, inet_pton, inet_ntop, _fileobject, _get_jsockaddr ) def supports(feature): # FIXME this seems to be Jython internals specific, and for # testing only; consider removing since it really no longer # matters if feature == "idna": return True raise KeyError("Unknown feature", feature)
gpl-2.0
cwalker87/stashboard-app
tests/test_services.py
13
7739
try: import json except ImportError: import simplejson as json from mock import patch from models import Service, List from test_api import StashboardTest class PublicServicesTest(StashboardTest): def test_get(self): response = self.get("/api/v1/services") self.assertEquals(response.status_code, 200) def test_post(self): response = self.post("/api/v1/services") self.assertEquals(response.status_code, 405) def test_delete(self): response = self.delete("/api/v1/services") self.assertEquals(response.status_code, 405) def test_put(self): response = self.put("/api/v1/services") self.assertEquals(response.status_code, 405) class ServiceInstanceTest(StashboardTest): def setUp(self): super(ServiceInstanceTest, self).setUp() self.service_list = List(slug="foo", name="Foo", description="Bar") self.service_list.put() self.service = Service(name="Foo", slug="foo", description="foo") self.service.put() @patch("handlers.api.invalidate_cache") def test_delete_service(self, mock): response = self.delete("/admin/api/v1/services/foo") self.assertEquals(response.status_code, 200) def test_delete_wrong_service(self): response = self.delete("/admin/api/v1/services/bar") self.assertEquals(response.status_code, 404) def test_delete_wrong_version(self): response = self.delete("/admin/api/foo/services/foo") self.assertEquals(response.status_code, 404) def test_post_wrong_service(self): response = self.post("/admin/api/v1/services/bar") self.assertEquals(response.status_code, 404) def test_post_wrong_version(self): response = self.post("/admin/api/foo/services/foo") self.assertEquals(response.status_code, 404) def test_post_update_list(self): response = self.post("/admin/api/v1/services/foo", data={"list": "foo"}) print response.content self.assertEquals(response.headers["Content-Type"], "application/json") self.assertEquals(response.status_code, 200) service = Service.get(self.service.key()) self.assertEquals(service.list.name, "Foo") def test_post_update_desc(self): response = self.post("/admin/api/v1/services/foo", data={"description": "hello"}) self.assertEquals(response.headers["Content-Type"], "application/json") self.assertEquals(response.status_code, 200) service = Service.get(self.service.key()) self.assertEquals(service.description, "hello") def test_post_update(self): response = self.post("/admin/api/v1/services/foo", data={"name": "bar"}) self.assertEquals(response.headers["Content-Type"], "application/json") self.assertEquals(response.status_code, 200) service = Service.get(self.service.key()) self.assertEquals(service.name, "bar") def test_get_wrong_service(self): response = self.get("/admin/api/v1/services/bar") self.assertEquals(response.status_code, 404) def test_get_wrong_version(self): response = self.get("/admin/api/foo/services/foo") self.assertEquals(response.status_code, 404) def test_get_service(self): response = self.get("/admin/api/v1/services/foo") self.assertEquals(response.status_code, 200) self.assertEquals(response.headers["Content-Type"], "application/json") class ServicesTest(StashboardTest): def setUp(self): super(ServicesTest, self).setUp() self.service_list = List(slug="foo", name="Foo", description="Bar") self.service_list.put() def test_post_wrong_version(self): response = self.post("/admin/api/foo/services") self.assertEquals(response.status_code, 404) self.assertEquals(response.headers["Content-Type"], "application/json") def test_get_wrong_version(self): response = self.get("/api/foo/services") self.assertEquals(response.status_code, 404) self.assertEquals(response.headers["Content-Type"], "application/json") def test_get_services_list(self): """Services should return a 200 with the proper content type""" response = self.get("/api/v1/services") self.assertEquals(response.status_code, 200) self.assertEquals(response.headers["Content-Type"], "application/json") def test_create_service_bad_list(self): """Services should 201 """ response = self.post("/admin/api/v1/services", data={"description": "An example service API", "name": "Some Random Name", "list": "bar"}) service = json.loads(response.content) self.assertEquals(response.status_code, 400) def test_create_service_list(self): """Services should 201 """ response = self.post("/admin/api/v1/services", data={"description": "An example service API", "name": "Some Random Name", "list": "foo"}) service = json.loads(response.content) self.assertEquals(response.status_code, 201) def test_create_service_name(self): """Services should 201 """ response = self.post("/admin/api/v1/services", data={"description": "An example service API", "name": "Some Random Name"}) service = json.loads(response.content) self.assertEquals(response.status_code, 201) self.assertEquals(service["name"], "Some Random Name") self.assertEquals(service["description"], "An example service API") def test_missing_service_name(self): """Services should 400 without a name""" response = self.post("/admin/api/v1/services", data={"description": "An example service API"}) self.assertEquals(response.status_code, 400) self.assertEquals(response.headers["Content-Type"], "application/json") def test_missing_service_description(self): """Services should 400 without a description""" response = self.post("/admin/api/v1/services", data={"name": "Some Random Name"}) self.assertEquals(response.status_code, 400) self.assertEquals(response.headers["Content-Type"], "application/json") def test_missing_service_data(self): """Creating a service should 400 without data""" response = self.post("/admin/api/v1/services") self.assertEquals(response.status_code, 400) self.assertEquals(response.headers["Content-Type"], "application/json") def test_delete(self): "should return 405 Method Not Allowed" response = self.delete("/admin/api/v1/services") self.assertEquals(response.status_code, 405) self.assertEquals(response.headers["Content-Type"], "application/json") def test_put(self): """Should return 405 Content Length Required""" response = self.put("/admin/api/v1/services") self.assertEquals(response.status_code, 405) self.assertEquals(response.headers["Content-Type"], "application/json") def test_put_with_data(self): """should return 405 Method Not Allowed""" response = self.put("/admin/api/v1/services", data={"description": "An example service API"}) self.assertEquals(response.status_code, 405) self.assertEquals(response.headers["Content-Type"], "application/json")
mit
bukzor/sympy
examples/advanced/autowrap_ufuncify.py
48
2388
#!/usr/bin/env python """ Setup ufuncs for the legendre polynomials ----------------------------------------- This example demonstrates how you can use the ufuncify utility in SymPy to create fast, customized universal functions for use with numpy arrays. An autowrapped sympy expression can be significantly faster than what you would get by applying a sequence of the ufuncs shipped with numpy. [0] You need to have numpy installed to run this example, as well as a working fortran compiler. [0]: http://ojensen.wordpress.com/2010/08/10/fast-ufunc-ish-hydrogen-solutions/ """ import sys from sympy.external import import_module np = import_module('numpy') if not np: sys.exit("Cannot import numpy. Exiting.") import mpmath from sympy.utilities.autowrap import ufuncify from sympy.utilities.lambdify import implemented_function from sympy import symbols, legendre, Plot, pprint def main(): print(__doc__) x = symbols('x') # a numpy array we can apply the ufuncs to grid = np.linspace(-1, 1, 1000) # set mpmath precision to 20 significant numbers for verification mpmath.mp.dps = 20 print("Compiling legendre ufuncs and checking results:") # Let's also plot the ufunc's we generate plot1 = Plot(visible=False) for n in range(6): # Setup the SymPy expression to ufuncify expr = legendre(n, x) print("The polynomial of degree %i is" % n) pprint(expr) # This is where the magic happens: binary_poly = ufuncify(x, expr) # It's now ready for use with numpy arrays polyvector = binary_poly(grid) # let's check the values against mpmath's legendre function maxdiff = 0 for j in range(len(grid)): precise_val = mpmath.legendre(n, grid[j]) diff = abs(polyvector[j] - precise_val) if diff > maxdiff: maxdiff = diff print("The largest error in applied ufunc was %e" % maxdiff) assert maxdiff < 1e-14 # We can also attach the autowrapped legendre polynomial to a sympy # function and plot values as they are calculated by the binary function g = implemented_function('g', binary_poly) plot1[n] = g(x), [200] print("Here's a plot with values calculated by the wrapped binary functions") plot1.show() if __name__ == '__main__': main()
bsd-3-clause
Distrotech/mozjs
js/src/python/mozbuild/mozbuild/mozconfig.py
3
13081
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from __future__ import unicode_literals import os import re import subprocess from collections import defaultdict from mach.mixin.process import ProcessExecutionMixin MOZ_MYCONFIG_ERROR = ''' The MOZ_MYCONFIG environment variable to define the location of mozconfigs is deprecated. If you wish to define the mozconfig path via an environment variable, use MOZCONFIG instead. '''.strip() MOZCONFIG_LEGACY_PATH = ''' You currently have a mozconfig at %s. This implicit location is no longer supported. Please move it to %s/.mozconfig or set an explicit path via the $MOZCONFIG environment variable. '''.strip() MOZCONFIG_BAD_EXIT_CODE = ''' Evaluation of your mozconfig exited with an error. This could be triggered by a command inside your mozconfig failing. Please change your mozconfig to not error and/or to catch errors in executed commands. '''.strip() class MozconfigFindException(Exception): """Raised when a mozconfig location is not defined properly.""" class MozconfigLoadException(Exception): """Raised when a mozconfig could not be loaded properly. This typically indicates a malformed or misbehaving mozconfig file. """ def __init__(self, path, message, output=None): self.path = path self.output = output Exception.__init__(self, message) class MozconfigLoader(ProcessExecutionMixin): """Handles loading and parsing of mozconfig files.""" RE_MAKE_VARIABLE = re.compile(''' ^\s* # Leading whitespace (?P<var>[a-zA-Z_0-9]+) # Variable name \s* [?:]?= \s* # Assignment operator surrounded by optional # spaces (?P<value>.*$)''', # Everything else (likely the value) re.VERBOSE) # Default mozconfig files in the topsrcdir. DEFAULT_TOPSRCDIR_PATHS = ('.mozconfig', 'mozconfig') DEPRECATED_TOPSRCDIR_PATHS = ('mozconfig.sh', 'myconfig.sh') DEPRECATED_HOME_PATHS = ('.mozconfig', '.mozconfig.sh', '.mozmyconfig.sh') IGNORE_SHELL_VARIABLES = ('_') def __init__(self, topsrcdir): self.topsrcdir = topsrcdir @property def _loader_script(self): our_dir = os.path.abspath(os.path.dirname(__file__)) return os.path.join(our_dir, 'mozconfig_loader') def find_mozconfig(self): """Find the active mozconfig file for the current environment. This emulates the logic in mozconfig-find. 1) If ENV[MOZCONFIG] is set, use that 2) If $TOPSRCDIR/mozconfig or $TOPSRCDIR/.mozconfig exists, use it. 3) If both exist or if there are legacy locations detected, error out. The absolute path to the found mozconfig will be returned on success. None will be returned if no mozconfig could be found. A MozconfigFindException will be raised if there is a bad state, including conditions from #3 above. """ # Check for legacy methods first. if 'MOZ_MYCONFIG' in os.environ: raise MozconfigFindException(MOZ_MYCONFIG_ERROR) env_path = os.environ.get('MOZCONFIG', None) if env_path is not None: if not os.path.exists(env_path): raise MozconfigFindException( 'MOZCONFIG environment variable refers to a path that ' 'does not exist: ' + env_path) if not os.path.isfile(env_path): raise MozconfigFindException( 'MOZCONFIG environment variable refers to a ' 'non-file: ' + env_path) srcdir_paths = [os.path.join(self.topsrcdir, p) for p in self.DEFAULT_TOPSRCDIR_PATHS] existing = [p for p in srcdir_paths if os.path.isfile(p)] if env_path is None and len(existing) > 1: raise MozconfigFindException('Multiple default mozconfig files ' 'present. Remove all but one. ' + ', '.join(existing)) path = None if env_path is not None: path = env_path elif len(existing): assert len(existing) == 1 path = existing[0] if path is not None: return os.path.abspath(path) deprecated_paths = [os.path.join(self.topsrcdir, s) for s in self.DEPRECATED_TOPSRCDIR_PATHS] home = os.environ.get('HOME', None) if home is not None: deprecated_paths.extend([os.path.join(home, s) for s in self.DEPRECATED_HOME_PATHS]) for path in deprecated_paths: if os.path.exists(path): raise MozconfigFindException( MOZCONFIG_LEGACY_PATH % (path, self.topsrcdir)) return None def read_mozconfig(self, path=None, moz_build_app=None): """Read the contents of a mozconfig into a data structure. This takes the path to a mozconfig to load. If it is not defined, we will try to find a mozconfig from the environment using find_mozconfig(). mozconfig files are shell scripts. So, we can't just parse them. Instead, we run the shell script in a wrapper which allows us to record state from execution. Thus, the output from a mozconfig is a friendly static data structure. """ if path is None: path = self.find_mozconfig() result = { 'path': path, 'topobjdir': None, 'configure_args': None, 'make_flags': None, 'make_extra': None, 'env': None, } if path is None: return result path = path.replace(os.sep, '/') result['configure_args'] = [] result['make_extra'] = [] result['make_flags'] = [] env = dict(os.environ) args = self._normalize_command([self._loader_script, self.topsrcdir.replace(os.sep, '/'), path], True) try: # We need to capture stderr because that's where the shell sends # errors if execution fails. output = subprocess.check_output(args, stderr=subprocess.STDOUT, cwd=self.topsrcdir, env=env) except subprocess.CalledProcessError as e: lines = e.output.splitlines() # Output before actual execution shouldn't be relevant. try: index = lines.index('------END_BEFORE_SOURCE') lines = lines[index + 1:] except ValueError: pass raise MozconfigLoadException(path, MOZCONFIG_BAD_EXIT_CODE, lines) parsed = self._parse_loader_output(output) all_variables = set(parsed['vars_before'].keys()) all_variables |= set(parsed['vars_after'].keys()) changed = { 'added': {}, 'removed': {}, 'modified': {}, 'unmodified': {}, } for key in all_variables: if key in self.IGNORE_SHELL_VARIABLES: continue if key not in parsed['vars_before']: changed['added'][key] = parsed['vars_after'][key] continue if key not in parsed['vars_after']: changed['removed'][key] = parsed['vars_before'][key] continue if parsed['vars_before'][key] != parsed['vars_after'][key]: changed['modified'][key] = ( parsed['vars_before'][key], parsed['vars_after'][key]) continue changed['unmodified'][key] = parsed['vars_after'][key] result['env'] = changed result['configure_args'] = [self._expand(o) for o in parsed['ac']] if moz_build_app is not None: result['configure_args'].extend(self._expand(o) for o in parsed['ac_app'][moz_build_app]) mk = [self._expand(o) for o in parsed['mk']] for o in mk: match = self.RE_MAKE_VARIABLE.match(o) if match is None: result['make_extra'].append(o) continue name, value = match.group('var'), match.group('value') if name == 'MOZ_MAKE_FLAGS': result['make_flags'] = value continue if name == 'MOZ_OBJDIR': result['topobjdir'] = value continue result['make_extra'].append(o) return result def _parse_loader_output(self, output): mk_options = [] ac_options = [] ac_app_options = defaultdict(list) before_source = {} after_source = {} current = None current_type = None in_variable = None for line in output.splitlines(): # XXX This is an ugly hack. Data may be lost from things # like environment variable values. # See https://bugzilla.mozilla.org/show_bug.cgi?id=831381 line = line.decode('utf-8', 'ignore') if not line: continue if line.startswith('------BEGIN_'): assert current_type is None assert current is None assert not in_variable current_type = line[len('------BEGIN_'):] current = [] continue if line.startswith('------END_'): assert not in_variable section = line[len('------END_'):] assert current_type == section if current_type == 'AC_OPTION': ac_options.append('\n'.join(current)) elif current_type == 'MK_OPTION': mk_options.append('\n'.join(current)) elif current_type == 'AC_APP_OPTION': app = current.pop(0) ac_app_options[app].append('\n'.join(current)) current = None current_type = None continue assert current_type is not None if current_type in ('BEFORE_SOURCE', 'AFTER_SOURCE'): # mozconfigs are sourced using the Bourne shell (or at least # in Bourne shell mode). This means |set| simply lists # variables from the current shell (not functions). (Note that # if Bash is installed in /bin/sh it acts like regular Bourne # and doesn't print functions.) So, lines should have the # form: # # key='value' # key=value # # The only complication is multi-line variables. Those have the # form: # # key='first # second' # TODO Bug 818377 Properly handle multi-line variables of form: # $ foo="a='b' # c='d'" # $ set # foo='a='"'"'b'"'"' # c='"'"'d'"'" name = in_variable value = None if in_variable: # Reached the end of a multi-line variable. if line.endswith("'") and not line.endswith("\\'"): current.append(line[:-1]) value = '\n'.join(current) in_variable = None else: current.append(line) continue else: equal_pos = line.find('=') if equal_pos < 1: # TODO log warning? continue name = line[0:equal_pos] value = line[equal_pos + 1:] if len(value): has_quote = value[0] == "'" if has_quote: value = value[1:] # Lines with a quote not ending in a quote are multi-line. if has_quote and not value.endswith("'"): in_variable = name current.append(value) continue else: value = value[:-1] if has_quote else value assert name is not None if current_type == 'BEFORE_SOURCE': before_source[name] = value else: after_source[name] = value current = [] continue current.append(line) return { 'mk': mk_options, 'ac': ac_options, 'ac_app': ac_app_options, 'vars_before': before_source, 'vars_after': after_source, } def _expand(self, s): return s.replace('@TOPSRCDIR@', self.topsrcdir)
mpl-2.0
venthur/pyff
src/FeedbackBase/VisualP300.py
3
19392
# VisualP300.py - # Copyright (C) 2009 Matthias Treder # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. """ VisualP300 provides a framework for running P300 experiments within pyff. """ import sys,os,random import pygame from MainloopFeedback import MainloopFeedback from lib.P300VisualElement.Textbox import Textbox from lib.P300Aux.P300Functions import wait_for_key,show_message class VisualP300(MainloopFeedback): """ This class is derived from MainloopFeedback, which provides basic functionality. The class is derived from the Feedback base class and it implements a generic visual P300 speller based on the pygame extension (http://www.pygame.org). You need to have pygame installed in order to use this class or one of its subclasses. Pygame is a simple and easy-to-use 2D game engine. For documentation and tutorials, see http://www.pygame.org/docs/ VisualP300 is built in a modular fashion. It is basically a controller class which connects a number of components. You can use any combination of these predefined components to build your own P300 speller. The components are as follows layout defines the spatial layout of your P300 speller (eg, matrix, circle), in other words, it provides the screen coordinates of the elements. layout is an instance of one of the P300Layout classes. The README file in the P300Layout/ subfolder provides detail information. elements a list of elements (eg, letters or images). Each element is an instance of (a subclass of) P300VisualElement. The README file in the P300VisualElement/ subfolder provides detail information. deco other graphical objects that should be displayed but which are not integral parts of the speller (such as a fixation dot) groups a list of tuples containing the indices of elements that should be flashed together (such as rows and columns in the classical P300 speller) **Other important properties** group_trigger a list with the triggers corresponding to each group. For a given index, the value at that index the triggers are sent via the parallel port when a group of elements is flashed. Note that the stimulus method automatically sends a trigger with the same index as the current flash group. As an example, if groups[2] is currently flashed, the trigger in group_trigger[2] will be sent. If you want trigger to be different depending on whether they correspond to a target or nontarget, you have to change group_trigger manually upon each trial. flash_sequence For the current trial, flash_sequence contains a list of the indices of the groups that are flashed subsequently. **Timing** For convenience, most timing variables (such as flash duration) are given in NUMBER OF FRAMES, not in milliseconds. Number of frames is in relation to FPS, the actual frames-per-second that is set. You might want change the default value to better match your screen refresh rate. **Vertical screen refresh** It can make a big difference whether or not you use fullscreen mode. If you don't use fullscreen mode, hardware backends may not enable hardware surface and double buffering (for more information, see http://kevinlocke.name/inquiry/sdlblitspeed/sdlblitspeed.php). If stimulus presentation is not time-locked with the vertical screen refresh rate, flickering artefacts may result. Time-locking should be automatically enabled in fullscreen mode. If it is not, check * if you got the latest graphics driver installed * if you got the latest DirectX version (on windows machines) You might also need to set your graphics driver to sync vertical refresh The standard driver is DirectX. If you work on a non-Windows machine, you need to change the video_driver variable (ftp://ptah.lnf.kth.se/pub/misc/sdl-env-vars gives a list of drivers). Double buffering is enabled by default to prevent tearing. Note that with doublebuf, hwsurface (hardware surface) is used instead of swsurface (software surface). If you write your own drawing routines, be sure to use pygame.diplay.flip() command instead of pygame.display.update(), to have your stimuli rendered properly in double buffer mode. To prepare your own experiment, you could first have a look at the examples such as P300Matrix.py and P300Hex.py """ DEFAULT_SCREEN_WIDTH,DEFAULT_SCREEN_HEIGHT = 800,800 DEFAULT_FULLSCREEN = False DEFAULT_BGCOLOR = 0,0,0 # Default background color # Give durations as number of frames DEFAULT_FLASH_DURATION = 5 DEFAULT_SOA = 10 # Time between the onset of two flashes (stimlus onset asynchrony) DEFAULT_FPS = 60 # Default frames per second # """ If state_change is set to true, state changes (ie, OFF-ON or ON-OFF) # will be presented instead of flashes (ie, OFF-ON-OFF) """ # DEFAULT_STATE_CHANGE = False # Set here whether you want state changes (OFF-ON or ON-OFF) or flashes (OFF-ON-OFF) DEFAULT_NR_SEQUENCES = 1 # nr of times each group is flashed in a trial # Settings for textmessages via the function show_message() DEFAULT_TEXTSIZE = 40 DEFAULT_TEXTCOLOR = 255,255,255 # Settings for pygame DEFAULT_PYGAME_INFO = True # If true, gives a screen with information about pygame settings #DEFAULT_VIDEO_DRIVER = 'directx' # Speller states PRE_TRIAL = 0 STIMULUS = 1 FEEDBACK = 2 POST_TRIAL = 3 # Stimulus states STIM_START_FLASH = 1 STIM_IN_FLASH = 2 STIM_END_FLASH = 3 STIM_BETWEEN_FLASHES = 4 # *** Overwritten MainloopFeedback methods *** def init(self): """Define your variables here.""" # Visual settings self.window_title = "Visual P300" self.screenWidth,self.screenHeight = self.DEFAULT_SCREEN_WIDTH,self.DEFAULT_SCREEN_HEIGHT """ Canvas: The part of the screen which is used for painting! That's more efficient than repainting the whole of the screen """ self.canvasWidth,self.canvasHeight = 600,600 self.fullscreen = self.DEFAULT_FULLSCREEN self.bgcolor = self.DEFAULT_BGCOLOR self.textsize = self.DEFAULT_TEXTSIZE self.textcolor = self.DEFAULT_TEXTCOLOR # Trigger self.group_trigger = None # Triggers are specified in the subclass # Data logging self.datafile = None # If a string is provided, logging is enabled # Timing self.flash_duration= self.DEFAULT_FLASH_DURATION self.soa = self.DEFAULT_SOA self.nr_sequences = self.DEFAULT_NR_SEQUENCES #self.state_change = self.DEFAULT_STATE_CHANGE self.fps = self.DEFAULT_FPS # Random number generator self.random = random.Random() # Get random generator # pygame specific variables self.pygame_info = self.DEFAULT_PYGAME_INFO # self.video_driver = self.DEFAULT_VIDEO_DRIVER def pre_mainloop(self): """ - define a layout - define your visual elements and add them to the elements list using the add_element method - define your deco and add them to the deco list - make a deco_group containing all deco - define your groups and add them to the groups list using the add_group method - define your triggers Always create a layout before you add elements, and add elements before you add groups. """ self.current_flash = 0 # Index of the flash presented last # Core members self.layout = None # layout is specified in the subclass self.elements = [] # elements are specified in the subclass self.deco = [] # decoration (elements such as frames, text fields, etc, that are not an integral part of the P300 speller) self.groups = [] # specifies which elements are flashed together self.deco_group = None # Speller state self.state = self.PRE_TRIAL self.state_finished = False self.flash_sequence = [] self.nr_flashes = None # len of flash sequence # Stimulus states self.stim_state = None # Init pygame and start before_mainloop implemented by children self._init_pygame() self.before_mainloop() def before_mainloop(self): """ Prepare your elements, groups, triggers etc in this method """ pass def _init_pygame(self): # Initialize pygame, open screen and fill screen with background color #os.environ['SDL_VIDEODRIVER'] = self.video_driver # Set video driver pygame.init() if self.fullscreen: #use opts = pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.FULLSCREEN to use doublebuffer and vertical sync opts = pygame.FULLSCREEN self.screen = pygame.display.set_mode((self.screenWidth,self.screenHeight),opts) else: self.screen = pygame.display.set_mode((self.screenWidth,self.screenHeight)) self.background = pygame.Surface( (self.canvasWidth,self.canvasHeight) ) self.background.fill(self.bgcolor) self.background_rect = self.background.get_rect(center = (self.screenWidth/2,self.screenHeight/2) ) # Background for whole screen (needs lots of time to paint, use self.background in most cases) self.all_background = pygame.Surface( (self.screenWidth,self.screenHeight) ) self.all_background.fill(self.bgcolor) self.all_background_rect = self.all_background.get_rect(center = (self.screenWidth/2,self.screenHeight/2) ) self.screen.blit(self.all_background,self.all_background_rect) pygame.display.flip() self.screen.blit(self.all_background,self.all_background_rect) self.clock = pygame.time.Clock() pygame.mouse.set_visible(False) # init sound engine pygame.mixer.init() if self.pygame_info: # If true, give some information inf = pygame.display.Info() driver = pygame.display.get_driver() text = "PYGAME SYSTEM INFO\n\n" text += "Display driver: "+str(driver)+"\nFullscreen: "+str(self.fullscreen) text += "\nhw: "+str(inf.hw)+"\nwm: "+str(inf.wm) text += "\nvideo_mem: "+str(inf.video_mem) +"\nbytesize: "+str(inf.bytesize) text += "\nblit_hw: "+str(inf.blit_hw) +"\nblit_hw_CC: "+str(inf.blit_hw_CC)+"\nblit_hw_A: "+str(inf.blit_hw_A) text += "\nblit_sw: "+str(inf.blit_sw) +"\nblit_sw_CC: "+str(inf.blit_sw_CC)+"\nblit_sw_A: "+str(inf.blit_sw_A) show_message(self,text,box=True) wait_for_key() def post_mainloop(self): """ save your log file etc """ self.after_mainloop() # Get rid of pygame objects self.clock = None self.background = None self.background_rect = None self.all_background = None self.all_background_rect = None self.groups = None self.all_elements_group = None self.deco_group = None self.deco = None self.elements = None self.screen = None pygame.quit() # Close datafile if self.datafile is not None: try: self.datafile.close() except IOError: self.logger.warn("Could not close datafile") def after_mainloop(self): """ Put here any 'cleaning-up' you want to do after the experiment You should also clean up all references to pygame objects here, e.g. by deleting the reference or setting it to None (or any other non- pygame object) """ pass def tick(self): # Check event cue for event in pygame.event.get(): if event.type == pygame.QUIT: self.on_stop() elif event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: self.on_stop() # If last state is finished, proceed to next state if self.state_finished: if self.state == self.PRE_TRIAL: self.state = self.STIMULUS elif self.state == self.STIMULUS: self.state = self.FEEDBACK elif self.state == self.FEEDBACK: self.state = self.POST_TRIAL elif self.state == self.POST_TRIAL: self.state = self.PRE_TRIAL self.state_finished = False def pause_tick(self): pygame.time.wait(100) def play_tick(self): state = self.state if state == self.PRE_TRIAL: self._pre_trial() elif state == self.STIMULUS: self.pre_stimulus() if not self.state_finished: self._stimulus() self.post_stimulus() elif state == self.FEEDBACK: self.feedback() elif state == self.POST_TRIAL: self._post_trial() def _pre_trial(self): self.pre_trial() # Make a group containing all current elements self.all_elements_group = pygame.sprite.RenderUpdates(self.elements) self.all_elements_group.update(0) self.all_elements_group.draw(self.screen) """ In order for the group.clear method (called in the flashing loop) to work, the group.draw methods have to have been called before, """ for i in range(len(self.groups)): self.groups[i].draw(self.screen) # Initialize first group self.current_flash = 0 self.nr_flashes = len(self.flash_sequence) self.stim_state = self.STIM_START_FLASH def pre_trial(self): """ choose stimulus, define the triggers, target, and perform any other computations necessary to perform the stimulus. You can also display text messages before the start of the trial and present a countdown You should overwrite this method to prepare your flash_sequence and graphics here, if necessary. You have to set self.state_finished = True to proceed to the next state. """ self.state_finished = True def pre_stimulus(self): # logging, send trigger, check eye tracker pass def _stimulus(self): """ Here, the stimulus is presented. Usually, you will not need to override this method """ state = self.stim_state if state == self.STIM_IN_FLASH: # Wait flash time self.current_tick += 1 if self.current_tick == self.flash_duration: self.stim_state = self.STIM_END_FLASH elif state == self.STIM_START_FLASH: # Flash ON self.groups[self.flash_sequence[self.current_flash]].update() # change state self.current_tick = 0 if self.flash_duration > 1: self.stim_state = self.STIM_IN_FLASH else: self.stim_state = self.STIM_END_FLASH # We only need to paint if smth changes self.screen.blit(self.background,self.background_rect) self.all_elements_group.draw(self.screen) if len(self.deco)>0: self.deco_group.draw(self.screen) pygame.display.flip() elif state == self.STIM_END_FLASH: # Flash OFF self.groups[self.flash_sequence[self.current_flash]].update(-1) # change state back self.current_tick += 1 if self.current_tick+1 < self.soa: self.stim_state = self.STIM_BETWEEN_FLASHES else: self.stim_state = self.STIM_START_FLASH self.current_flash += 1 if self.current_flash == self.nr_flashes: self.state_finished = True # All flashes have been shown self.current_tick = 0 # Reset current tick self.screen.blit(self.background,self.background_rect) self.all_elements_group.draw(self.screen) if len(self.deco)>0: self.deco_group.draw(self.screen) pygame.display.flip() elif state == self.STIM_BETWEEN_FLASHES: self.current_tick += 1 if self.current_tick == self.soa: self.current_tick = 0 self.stim_state = self.STIM_START_FLASH self.clock.tick(self.fps) def post_stimulus(self): # logging, send trigger, check eye tracker pass def feedback(self): """ Present feedback (for instance the chosen letter) """ # Give your feedback (eg the chosen symbol) here self.state_finished = True def _post_trial(self): # Do not overwrite this method self.post_trial() def post_trial(self): """ any clean up Any stuff you need to do after presenting a trial. You have to set self.state_finished = True to proceed to the next state. """ self.state_finished = True def add_element(self,element): """ Adds a visual element to the list of elements and set it on the position specified by the layout """ nr_elements = len(self.elements) # Number of elements already in list if self.layout is None: self.logger.warn("Cannot add element: no layout specified") else: # Position element so that it's centered on the screen (x,y) = self.layout.positions[nr_elements] element.pos = (x+self.screenWidth/2,y+self.screenHeight/2) self.elements.append(element) def add_group(self,group): """ Takes the indices of the elements in the elements list and adds them as one group """ new_group = pygame.sprite.RenderUpdates() if type(group) == int: new_group.add(self.elements[group]) else: for g in group: new_group.add(self.elements[g]) self.groups.append(new_group) def log_data(self): """ Overwrite this method to log your specific data. The datafile object is referenced by self.datafile. You should open the file yourself in the pre_mainloop method of your derived class. The file is closed automatically when the feedback is stopped. """ pass
gpl-2.0
rghe/ansible
lib/ansible/modules/packaging/os/flatpak_remote.py
15
7194
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2017 John Kwiatkoski (@JayKayy) <jkwiat40@gmail.com> # Copyright: (c) 2018 Alexander Bethke (@oolongbrothers) <oolongbrothers@gmx.net> # Copyright: (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: flatpak_remote version_added: '2.6' short_description: Manage flatpak repository remotes description: - Allows users to add or remove flatpak remotes. - The flatpak remotes concept is comparable to what is called repositories in other packaging formats. - Currently, remote addition is only supported via I(flatpakrepo) file URLs. - Existing remotes will not be updated. - See the M(flatpak) module for managing flatpaks. author: - John Kwiatkoski (@jaykayy) - Alexander Bethke (@oolongbrothers) requirements: - flatpak options: executable: description: - The path to the C(flatpak) executable to use. - By default, this module looks for the C(flatpak) executable on the path. default: flatpak flatpakrepo_url: description: - The URL to the I(flatpakrepo) file representing the repository remote to add. - When used with I(state=present), the flatpak remote specified under the I(flatpakrepo_url) is added using the specified installation C(method). - When used with I(state=absent), this is not required. - Required when I(state=present). method: description: - The installation method to use. - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system) or only for the current C(user). choices: [ system, user ] default: system name: description: - The desired name for the flatpak remote to be registered under on the managed host. - When used with I(state=present), the remote will be added to the managed host under the specified I(name). - When used with I(state=absent) the remote with that name will be removed. required: true state: description: - Indicates the desired package state. choices: [ absent, present ] default: present ''' EXAMPLES = r''' - name: Add the Gnome flatpak remote to the system installation flatpak_remote: name: gnome state: present flatpakrepo_url: https://sdk.gnome.org/gnome-apps.flatpakrepo - name: Add the flathub flatpak repository remote to the user installation flatpak_remote: name: flathub state: present flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo method: user - name: Remove the Gnome flatpak remote from the user installation flatpak_remote: name: gnome state: absent method: user - name: Remove the flathub remote from the system installation flatpak_remote: name: flathub state: absent ''' RETURN = r''' command: description: The exact flatpak command that was executed returned: When a flatpak command has been executed type: string sample: "/usr/bin/flatpak remote-add --system flatpak-test https://dl.flathub.org/repo/flathub.flatpakrepo" msg: description: Module error message returned: failure type: string sample: "Executable '/usr/local/bin/flatpak' was not found on the system." rc: description: Return code from flatpak binary returned: When a flatpak command has been executed type: int sample: 0 stderr: description: Error output from flatpak binary returned: When a flatpak command has been executed type: string sample: "error: GPG verification enabled, but no summary found (check that the configured URL in remote config is correct)\n" stdout: description: Output from flatpak binary returned: When a flatpak command has been executed type: string sample: "flathub\tFlathub\thttps://dl.flathub.org/repo/\t1\t\n" ''' import subprocess from ansible.module_utils.basic import AnsibleModule def add_remote(module, binary, name, flatpakrepo_url, method): """Add a new remote.""" global result command = "{0} remote-add --{1} {2} {3}".format( binary, method, name, flatpakrepo_url) _flatpak_command(module, module.check_mode, command) result['changed'] = True def remove_remote(module, binary, name, method): """Remove an existing remote.""" global result command = "{0} remote-delete --{1} --force {2} ".format( binary, method, name) _flatpak_command(module, module.check_mode, command) result['changed'] = True def remote_exists(module, binary, name, method): """Check if the remote exists.""" command = "{0} remote-list -d --{1}".format(binary, method) # The query operation for the remote needs to be run even in check mode output = _flatpak_command(module, False, command) for line in output.splitlines(): listed_remote = line.split() if listed_remote[0] == name: return True return False def _flatpak_command(module, noop, command): global result if noop: result['rc'] = 0 result['command'] = command return "" process = subprocess.Popen( command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout_data, stderr_data = process.communicate() result['rc'] = process.returncode result['command'] = command result['stdout'] = stdout_data result['stderr'] = stderr_data if result['rc'] != 0: module.fail_json(msg="Failed to execute flatpak command", **result) return stdout_data def main(): module = AnsibleModule( argument_spec=dict( name=dict(type='str', required=True), flatpakrepo_url=dict(type='str'), method=dict(type='str', default='system', choices=['user', 'system']), state=dict(type='str', default="present", choices=['absent', 'present']), executable=dict(type='str', default="flatpak") ), # This module supports check mode supports_check_mode=True, ) name = module.params['name'] flatpakrepo_url = module.params['flatpakrepo_url'] method = module.params['method'] state = module.params['state'] executable = module.params['executable'] binary = module.get_bin_path(executable, None) if flatpakrepo_url is None: flatpakrepo_url = '' global result result = dict( changed=False ) # If the binary was not found, fail the operation if not binary: module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result) remote_already_exists = remote_exists(module, binary, bytes(name, 'utf-8'), method) if state == 'present' and not remote_already_exists: add_remote(module, binary, name, flatpakrepo_url, method) elif state == 'absent' and remote_already_exists: remove_remote(module, binary, name, method) module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
devil1437/GalaxyNexusKernel
scripts/tracing/draw_functrace.py
14676
3560
#!/usr/bin/python """ Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com> Licensed under the terms of the GNU GPL License version 2 This script parses a trace provided by the function tracer in kernel/trace/trace_functions.c The resulted trace is processed into a tree to produce a more human view of the call stack by drawing textual but hierarchical tree of calls. Only the functions's names and the the call time are provided. Usage: Be sure that you have CONFIG_FUNCTION_TRACER # mount -t debugfs nodev /sys/kernel/debug # echo function > /sys/kernel/debug/tracing/current_tracer $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func Wait some times but not too much, the script is a bit slow. Break the pipe (Ctrl + Z) $ scripts/draw_functrace.py < raw_trace_func > draw_functrace Then you have your drawn trace in draw_functrace """ import sys, re class CallTree: """ This class provides a tree representation of the functions call stack. If a function has no parent in the kernel (interrupt, syscall, kernel thread...) then it is attached to a virtual parent called ROOT. """ ROOT = None def __init__(self, func, time = None, parent = None): self._func = func self._time = time if parent is None: self._parent = CallTree.ROOT else: self._parent = parent self._children = [] def calls(self, func, calltime): """ If a function calls another one, call this method to insert it into the tree at the appropriate place. @return: A reference to the newly created child node. """ child = CallTree(func, calltime, self) self._children.append(child) return child def getParent(self, func): """ Retrieve the last parent of the current node that has the name given by func. If this function is not on a parent, then create it as new child of root @return: A reference to the parent. """ tree = self while tree != CallTree.ROOT and tree._func != func: tree = tree._parent if tree == CallTree.ROOT: child = CallTree.ROOT.calls(func, None) return child return tree def __repr__(self): return self.__toString("", True) def __toString(self, branch, lastChild): if self._time is not None: s = "%s----%s (%s)\n" % (branch, self._func, self._time) else: s = "%s----%s\n" % (branch, self._func) i = 0 if lastChild: branch = branch[:-1] + " " while i < len(self._children): if i != len(self._children) - 1: s += "%s" % self._children[i].__toString(branch +\ " |", False) else: s += "%s" % self._children[i].__toString(branch +\ " |", True) i += 1 return s class BrokenLineException(Exception): """If the last line is not complete because of the pipe breakage, we want to stop the processing and ignore this line. """ pass class CommentLineException(Exception): """ If the line is a comment (as in the beginning of the trace file), just ignore it. """ pass def parseLine(line): line = line.strip() if line.startswith("#"): raise CommentLineException m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line) if m is None: raise BrokenLineException return (m.group(1), m.group(2), m.group(3)) def main(): CallTree.ROOT = CallTree("Root (Nowhere)", None, None) tree = CallTree.ROOT for line in sys.stdin: try: calltime, callee, caller = parseLine(line) except BrokenLineException: break except CommentLineException: continue tree = tree.getParent(caller) tree = tree.calls(callee, calltime) print CallTree.ROOT if __name__ == "__main__": main()
gpl-2.0
drpngx/tensorflow
tensorflow/python/kernel_tests/pooling_ops_test.py
19
66381
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for pooling operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import gen_nn_ops from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import nn_ops from tensorflow.python.ops import variables import tensorflow.python.ops.nn_grad # pylint: disable=unused-import from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging def GetTestConfigs(include_nchw_vect_c=False): """Get all the valid tests configs to run. Args: include_nchw_vect_c: Whether to include NCHW_VECT_C in the test configs. Returns: all the valid test configs as tuples of data_format and use_gpu. """ test_configs = [("NHWC", False), ("NHWC", True)] if not test.is_gpu_available(cuda_only=True): tf_logging.info("NCHW and NCHW_VECT_C tests skipped because not run with " "--config=cuda or no GPUs available.") return test_configs # "NCHW" format is currently supported exclusively on CUDA GPUs. test_configs += [("NCHW", True)] if include_nchw_vect_c: if test.is_gpu_available( cuda_only=True, min_cuda_compute_capability=(6, 1)): test_configs += [("NCHW_VECT_C", True)] else: tf_logging.info("NCHW_VECT_C test skipped because no GPUs with " "compute capability >= 6.1 are available.") return test_configs def GetShrunkInceptionMaxPoolShapes(shrink=30): """Iterator for some of the max pool ops in the Inception 2015 model. Args: shrink: Factor to shrink depth relative to Inception. Yields: Tuple (name, input_size, filter_size, out_size, strides, padding) """ names = ["maxpool2", "maxpool3", "maxpool4", "maxpool5"] input_sizes = [[32, 71, 71, 192], [32, 35, 35, 288], [32, 17, 17, 1248], [32, 8, 8, 2048]] filter_sizes = [[1, 3, 3, 1], [1, 3, 3, 1], [1, 3, 3, 1], [1, 3, 3, 1]] output_sizes = [[32, 35, 35, 192], [32, 17, 17, 288], [32, 8, 8, 1248], [32, 8, 8, 2048]] strides = [[1, 2, 2, 1], [1, 2, 2, 1], [1, 2, 2, 1], [1, 1, 1, 1]] # Shrink each depth value for i in input_sizes: i[3] //= shrink for o in output_sizes: o[3] //= shrink paddings = ["VALID", "VALID", "VALID", "SAME"] for n, i, f, o, s, p in zip(names, input_sizes, filter_sizes, output_sizes, strides, paddings): yield n, i, f, o, s, p class PoolingTest(test.TestCase): def _VerifyOneType(self, pool_func, input_sizes, ksize, strides, padding, data_format, data_type, expected, use_gpu, v2): """Verifies the output values of the pooling function. Args: pool_func: Function to be called, co.MaxPool, co.AvgPool, or the Lua version. input_sizes: Input tensor dimensions. ksize: The kernel size dimensions strides: The stride dimensions padding: Padding type. data_format: The data format we use to run the pooling operation. data_type: The data type to use to run the pooling operation. expected: An array containing the expected operation outputs. use_gpu: Whether we are running on GPU. """ total_size = 1 for s in input_sizes: total_size *= s if v2 and data_format != "NHWC": tf_logging.info("v2 not supported for %s", data_format) return if data_format == "NCHW_VECT_C": if data_type != dtypes.float32: tf_logging.info("quantization to qint8 not implemented for %r", data_type) return if input_sizes[-1] % 4 != 0: tf_logging.info("Skipping test for depth %d", input_sizes[-1]) return tf_logging.info("Running %s test. %r %r %d %r %r %r %s", data_format, v2, input_sizes, total_size, pool_func, ksize, strides, data_type) # Initializes the input tensor with array containing incrementing # numbers from 1, wrapping round to -127 after 127 to support int8. x = [((f + 128) % 255) - 127 for f in range(total_size)] with self.test_session(use_gpu=use_gpu): t = constant_op.constant(x, shape=input_sizes, dtype=data_type) if data_format in ("NCHW", "NCHW_VECT_C"): if data_format == "NCHW_VECT_C": t = test_util.NHWCToNCHW_VECT_C(t) t, _, _ = gen_array_ops.quantize_v2(t, -128.0, 127.0, dtypes.qint8) else: t = test_util.NHWCToNCHW(t) ksize = test_util.NHWCToNCHW(ksize) strides = test_util.NHWCToNCHW(strides) ksize_placeholder = array_ops.placeholder(dtypes.int32, shape=[4]) strides_placeholder = array_ops.placeholder(dtypes.int32, shape=[4]) if v2: t = pool_func( t, ksize=ksize_placeholder, strides=strides_placeholder, padding=padding, data_format=data_format) else: t = pool_func( t, ksize=ksize, strides=strides, padding=padding, data_format=data_format) if data_format == "NCHW_VECT_C": t = gen_array_ops.dequantize(t, -128, 127) t = test_util.NCHW_VECT_CToNHWC(t) elif data_format == "NCHW": t = test_util.NCHWToNHWC(t) if v2: actual = t.eval(feed_dict={ ksize_placeholder: ksize, strides_placeholder: strides }) else: actual = t.eval() self.assertShapeEqual(actual, t) self.assertAllCloseAccordingToType(expected, actual.flatten()) def _VerifyOneTest(self, pool_func, input_sizes, ksize, strides, padding, data_format, expected, use_gpu, v2): """Verifies the output values of the pooling function. Args: pool_func: Function to be called, co.MaxPool, co.AvgPool, or the Lua version. input_sizes: Input tensor dimensions. ksize: The kernel size dimensions strides: The stride dimensions padding: Padding type. data_format: The data format we use to run the pooling operation. expected: An array containing the expected operation outputs. use_gpu: Whether we are running on GPU. """ if data_format == "NCHW_VECT_C": avg_pool_func = nn_ops.avg_pool tf_logging.info("pool_func=%s", pool_func) if pool_func == avg_pool_func: tf_logging.info("NCHW_VECT_C not yet implemented for avg_pool") return self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding, data_format, dtypes.float32, expected, use_gpu, v2) self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding, data_format, dtypes.float64, expected, use_gpu, v2) if not use_gpu or test_util.CudaSupportsHalfMatMulAndConv(): self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding, data_format, dtypes.float16, expected, use_gpu, v2) def _VerifyValues(self, pool_func, input_sizes, ksize, strides, padding, expected, use_gpu, v2=False): """Verifies the output values of the pooling function. Args: pool_func: Function to be called, co.MaxPool, co.AvgPool, or the Lua version. input_sizes: Input tensor dimensions. ksize: The kernel size dimensions strides: The stride dimensions padding: Padding type. expected: An array containing the expected operation outputs. use_gpu: Whether we are running on GPU. """ for (data_format, use_gpu_2) in GetTestConfigs(True): if use_gpu_2 == use_gpu: self._VerifyOneTest(pool_func, input_sizes, ksize, strides, padding, data_format, expected, use_gpu, v2) def _testAvgPoolValidPadding(self, use_gpu): expected_output = [7.0, 8.0, 9.0] self._VerifyValues( nn_ops.avg_pool, input_sizes=[1, 3, 3, 3], ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID", expected=expected_output, use_gpu=use_gpu) def _testAvgPoolSamePadding(self, use_gpu): expected_output = [8.5, 9.5, 10.5, 14.5, 15.5, 16.5] self._VerifyValues( nn_ops.avg_pool, input_sizes=[1, 2, 4, 3], ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME", expected=expected_output, use_gpu=use_gpu) def _testAvgPoolSamePaddingNonSquareWindow(self, use_gpu): # input is: # [1.0, 2.0 # 3.0 4.0] # # Window of [x, x] should do: # [avg(1.0, 2.0), avg(2.0, padded0), # avg(3.0, 4.0), avg(4.0, padded0)] self._VerifyValues( nn_ops.avg_pool, input_sizes=[1, 2, 2, 1], ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1], padding="SAME", expected=[1.5, 2.0, 3.5, 4.0], use_gpu=use_gpu) # Window of [x, # x] should do: # [avg(1.0, 3.0), avg(2.0, 4.0) # avg(3.0, padded0), avg(4.0, padded0)] self._VerifyValues( nn_ops.avg_pool, input_sizes=[1, 2, 2, 1], ksize=[1, 2, 1, 1], strides=[1, 1, 1, 1], padding="SAME", expected=[2.0, 3.0, 3.0, 4.0], use_gpu=use_gpu) def _testAvgPoolSamePaddingNonSquareWindowMultiBatch(self, use_gpu): self._VerifyValues( nn_ops.avg_pool, input_sizes=[2, 2, 2, 2], ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1], padding="SAME", expected=[ 2.0, 3.0, 3.0, 4.0, 6.0, 7.0, 7.0, 8.0, 10.0, 11.0, 11.0, 12.0, 14.0, 15.0, 15.0, 16.0 ], use_gpu=use_gpu) self._VerifyValues( nn_ops.avg_pool, input_sizes=[2, 2, 2, 2], ksize=[1, 2, 1, 1], strides=[1, 1, 1, 1], padding="SAME", expected=[ 3.0, 4.0, 5.0, 6.0, 5.0, 6.0, 7.0, 8.0, 11.0, 12.0, 13.0, 14.0, 13.0, 14.0, 15.0, 16.0 ], use_gpu=use_gpu) def _testAvgPoolValidPaddingUnevenStride(self, use_gpu): self._VerifyValues( nn_ops.avg_pool, input_sizes=[1, 3, 3, 3], ksize=[1, 2, 2, 1], strides=[1, 1, 2, 1], padding="VALID", expected=[7.0, 8.0, 9.0, 16.0, 17.0, 18.0], use_gpu=use_gpu) self._VerifyValues( nn_ops.avg_pool, input_sizes=[1, 3, 3, 3], ksize=[1, 2, 2, 1], strides=[1, 2, 1, 1], padding="VALID", expected=[7.0, 8.0, 9.0, 10.0, 11.0, 12.0], use_gpu=use_gpu) def _testAvgPoolSamePadding4(self, use_gpu): expected_output = [ 11.0, 12.0, 13.0, 14.0, 19.0, 20.0, 21.0, 22.0, 43.0, 44.0, 45.0, 46.0, 51.0, 52.0, 53.0, 54.0 ] self._VerifyValues( nn_ops.avg_pool, input_sizes=[1, 4, 4, 4], ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME", expected=expected_output, use_gpu=use_gpu) def _testAvgPoolSamePaddingPacket4(self, use_gpu): expected_output = [ 21.0, 22.0, 23.0, 24.0, 27.0, 28.0, 29.0, 30.0, 45.0, 46.0, 47.0, 48.0, 51.0, 52.0, 53.0, 54.0 ] self._VerifyValues( nn_ops.avg_pool, input_sizes=[1, 4, 4, 4], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME", expected=expected_output, use_gpu=use_gpu) def _testAvgPoolSamePaddingPacket8(self, use_gpu): expected_output = [ -12.0, -11.0, -10.0, -9.0, -8.0, -7.0, -6.0, -5.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, -3.5, -54.0, -53.0, -52.0, -51.0, -50.0, -49.0, -48.0, -47.0, -38.0, -37.0, -36.0, -35.0, -34.0, -33.0, -32.0, -31.0, -22.0, -21.0, -20.0, -19.0, -18.0, -17.0, -16.0, -15.0, -10.0, -9.0, -8.0, -7.0, -6.0, -5.0, -4.0, -3.0, -11.0, -10.0, -9.0, -8.0, -7.0, -6.0, -5.0, -4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, -3.5, -2.5, -85.0, -84.0, -83.0, -82.0, -81.0, -80.0, -79.0, -78.0, -69.0, -68.0, -67.0, -66.0, -65.0, -64.0, -63.0, -62.0, -53.0, -52.0, -51.0, -50.0, -49.0, -48.0, -47.0, -46.0, -41.0, -40.0, -39.0, -38.0, -37.0, -36.0, -35.0, -34.0 ] self._VerifyValues( nn_ops.avg_pool, input_sizes=[1, 8, 8, 8], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME", expected=expected_output, use_gpu=use_gpu) def _testAvgPoolEmptyInput(self, use_gpu): self._VerifyValues( nn_ops.avg_pool, input_sizes=[0, 8, 8, 8], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME", expected=[], use_gpu=use_gpu) def testAvgPooling(self): for use_gpu in True, False: self._testAvgPoolValidPadding(use_gpu) self._testAvgPoolSamePadding(use_gpu) self._testAvgPoolSamePaddingNonSquareWindow(use_gpu) self._testAvgPoolSamePaddingNonSquareWindowMultiBatch(use_gpu) self._testAvgPoolValidPaddingUnevenStride(use_gpu) self._testAvgPoolSamePadding4(use_gpu) self._testAvgPoolSamePaddingPacket4(use_gpu) self._testAvgPoolSamePaddingPacket8(use_gpu) self._testAvgPoolEmptyInput(use_gpu) def _testMaxPoolValidPadding(self, use_gpu): expected_output = [13.0, 14.0, 15.0] self._VerifyValues( nn_ops.max_pool, input_sizes=[1, 3, 3, 3], ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID", expected=expected_output, use_gpu=use_gpu) for v2 in [True, False]: self._VerifyValues( gen_nn_ops.max_pool_v2, input_sizes=[1, 3, 3, 3], ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID", expected=expected_output, use_gpu=use_gpu, v2=v2) def _testMaxPoolSamePadding(self, use_gpu): expected_output = [13.0, 14.0, 15.0, 16.0, 17.0, 18.0] self._VerifyValues( nn_ops.max_pool, input_sizes=[1, 2, 3, 3], ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME", expected=expected_output, use_gpu=use_gpu) for v2 in [True, False]: self._VerifyValues( gen_nn_ops.max_pool_v2, input_sizes=[1, 2, 3, 3], ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME", expected=expected_output, use_gpu=use_gpu, v2=v2) def _testMaxPoolSamePaddingNonSquareWindow(self, use_gpu): # input is: # [1.0, 2.0 # 3.0 4.0] # # Window of [x, x] should do: # # [max(1.0, 2.0), max(2.0, padded0), # max(3.0, 4.0), max(4.0, padded0)] self._VerifyValues( nn_ops.max_pool, input_sizes=[1, 2, 2, 1], ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1], padding="SAME", expected=[2.0, 2.0, 4.0, 4.0], use_gpu=use_gpu) for v2 in [True, False]: self._VerifyValues( gen_nn_ops.max_pool_v2, input_sizes=[1, 2, 2, 1], ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1], padding="SAME", expected=[2.0, 2.0, 4.0, 4.0], use_gpu=use_gpu, v2=v2) def _testMaxPoolValidPaddingUnevenStride(self, use_gpu): self._VerifyValues( nn_ops.max_pool, input_sizes=[1, 4, 4, 1], ksize=[1, 2, 2, 1], strides=[1, 1, 2, 1], padding="VALID", expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0], use_gpu=use_gpu) self._VerifyValues( nn_ops.max_pool, input_sizes=[1, 4, 4, 1], ksize=[1, 2, 2, 1], strides=[1, 2, 1, 1], padding="VALID", expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0], use_gpu=use_gpu) for v2 in [True, False]: self._VerifyValues( gen_nn_ops.max_pool_v2, input_sizes=[1, 4, 4, 1], ksize=[1, 2, 2, 1], strides=[1, 1, 2, 1], padding="VALID", expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0], use_gpu=use_gpu, v2=v2) self._VerifyValues( gen_nn_ops.max_pool_v2, input_sizes=[1, 4, 4, 1], ksize=[1, 2, 2, 1], strides=[1, 2, 1, 1], padding="VALID", expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0], use_gpu=use_gpu, v2=v2) def _testMaxPoolSamePaddingPacket4(self, use_gpu): expected_output = [ 21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0, 54.0, 55.0, 56.0, 61.0, 62.0, 63.0, 64.0 ] self._VerifyValues( nn_ops.max_pool, input_sizes=[1, 4, 4, 4], ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME", expected=expected_output, use_gpu=use_gpu) for v2 in [True, False]: self._VerifyValues( gen_nn_ops.max_pool_v2, input_sizes=[1, 4, 4, 4], ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME", expected=expected_output, use_gpu=use_gpu, v2=v2) def _testMaxPoolSamePaddingPacket8(self, use_gpu): expected_output = [ 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 97.0, 98.0, 99.0, 100.0, 101.0, 102.0, 103.0, 104.0, 113.0, 114.0, 115.0, 116.0, 117.0, 118.0, 119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 125.0, 126.0, 127.0, 120.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, 98.0, 99.0, 100.0, 101.0, 102.0, 103.0, 104.0, 105.0, 114.0, 115.0, 116.0, 117.0, 118.0, 119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 125.0, 126.0, 127.0, 120.0, 121.0, -45.0, -44.0, -43.0, -42.0, -41.0, -40.0, -39.0, -38.0, -29.0, -28.0, -27.0, -26.0, -25.0, -24.0, -23.0, -22.0, -13.0, -12.0, -11.0, -10.0, -9.0, -8.0, -7.0, -6.0, -5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0 ] self._VerifyValues( nn_ops.max_pool, input_sizes=[1, 8, 8, 8], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME", expected=expected_output, use_gpu=use_gpu) for v2 in [True, False]: self._VerifyValues( gen_nn_ops.max_pool_v2, input_sizes=[1, 8, 8, 8], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME", expected=expected_output, use_gpu=use_gpu, v2=v2) def _testMaxPoolEmptyInput(self, use_gpu): self._VerifyValues( gen_nn_ops.max_pool_v2, input_sizes=[0, 8, 8, 8], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME", expected=[], use_gpu=use_gpu) def testMaxPooling(self): for use_gpu in True, False: self._testMaxPoolValidPadding(use_gpu) self._testMaxPoolSamePadding(use_gpu) self._testMaxPoolSamePaddingNonSquareWindow(use_gpu) self._testMaxPoolValidPaddingUnevenStride(use_gpu) self._testMaxPoolSamePaddingPacket4(use_gpu) self._testMaxPoolSamePaddingPacket8(use_gpu) self._testMaxPoolEmptyInput(use_gpu) # Tests for DepthwiseMaxPooling on CPU only. def testDepthwiseMaxPool1x1DepthWindow1(self): # input is: # [1.0, ..., 10.0] along depth, # # We maxpool by depth in patches of 2. self._VerifyValues( nn_ops.max_pool, input_sizes=[1, 1, 1, 10], ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2], padding="SAME", expected=[2.0, 4.0, 6.0, 8.0, 10.0], use_gpu=False) for v2 in [True, False]: self._VerifyValues( gen_nn_ops.max_pool_v2, input_sizes=[1, 1, 1, 10], ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2], padding="SAME", expected=[2.0, 4.0, 6.0, 8.0, 10.0], use_gpu=False, v2=v2) def testDepthwiseMaxPool2x2DepthWindow3(self): # input is: # # a 2x2x6 cube, and we depthwise max across 3 to produce a 2x2x2 # output. Each node has contiguous values, so the depthwise max # should be multiples of 3.0. self._VerifyValues( nn_ops.max_pool, input_sizes=[1, 2, 2, 6], ksize=[1, 1, 1, 3], strides=[1, 1, 1, 3], padding="SAME", expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0], use_gpu=False) for v2 in [True, False]: self._VerifyValues( gen_nn_ops.max_pool_v2, input_sizes=[1, 2, 2, 6], ksize=[1, 1, 1, 3], strides=[1, 1, 1, 3], padding="SAME", expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0], use_gpu=False, v2=v2) def testKernelSmallerThanStrideValid(self): for use_gpu in [True, False]: self._VerifyValues( nn_ops.max_pool, input_sizes=[1, 7, 7, 1], ksize=[1, 2, 2, 1], strides=[1, 3, 3, 1], padding="VALID", expected=[9, 12, 30, 33], use_gpu=use_gpu) for v2 in [True, False]: self._VerifyValues( gen_nn_ops.max_pool_v2, input_sizes=[1, 7, 7, 1], ksize=[1, 2, 2, 1], strides=[1, 3, 3, 1], padding="VALID", expected=[9, 12, 30, 33], use_gpu=use_gpu, v2=v2) self._VerifyValues( nn_ops.avg_pool, input_sizes=[1, 7, 7, 1], ksize=[1, 2, 2, 1], strides=[1, 3, 3, 1], padding="VALID", expected=[5, 8, 26, 29], use_gpu=use_gpu) def testKernelSmallerThanStrideSame(self): for use_gpu in [True, False]: for pool_func in [nn_ops.max_pool, nn_ops.avg_pool]: self._VerifyValues( pool_func, input_sizes=[1, 3, 3, 1], ksize=[1, 1, 1, 1], strides=[1, 2, 2, 1], padding="SAME", expected=[1, 3, 7, 9], use_gpu=use_gpu) self._VerifyValues( pool_func, input_sizes=[1, 4, 4, 1], ksize=[1, 1, 1, 1], strides=[1, 2, 2, 1], padding="SAME", expected=[1, 3, 9, 11], use_gpu=use_gpu) for v2 in [True, False]: self._VerifyValues( gen_nn_ops.max_pool_v2, input_sizes=[1, 3, 3, 1], ksize=[1, 1, 1, 1], strides=[1, 2, 2, 1], padding="SAME", expected=[1, 3, 7, 9], use_gpu=use_gpu, v2=v2) self._VerifyValues( gen_nn_ops.max_pool_v2, input_sizes=[1, 4, 4, 1], ksize=[1, 1, 1, 1], strides=[1, 2, 2, 1], padding="SAME", expected=[1, 3, 9, 11], use_gpu=use_gpu, v2=v2) def _testDepthwiseMaxPoolInvalidConfig(self, in_size, ksize, strides, error_msg, use_gpu=False): with self.test_session(use_gpu=use_gpu): t = constant_op.constant(1.0, shape=in_size) with self.assertRaisesRegexp(errors_impl.UnimplementedError, error_msg): t = nn_ops.max_pool( t, ksize=ksize, strides=strides, padding="SAME").eval() def testDepthwiseMaxPoolInvalidConfigs(self): self._testDepthwiseMaxPoolInvalidConfig( [1, 2, 2, 4], [1, 2, 2, 2], [1, 1, 1, 2], "exactly one of pooling across depth") self._testDepthwiseMaxPoolInvalidConfig( [1, 2, 2, 4], [1, 1, 1, 2], [1, 1, 1, 1], "depth window to equal the depth stride") self._testDepthwiseMaxPoolInvalidConfig([1, 2, 2, 4], [1, 1, 1, 3], [1, 1, 1, 3], "evenly divide") if test.is_gpu_available(): with self.test_session(use_gpu=True): t = variables.Variable(np.ones([1, 2, 2, 4])) variables.global_variables_initializer().run() with self.assertRaisesOpError("for CPU devices"): nn_ops.max_pool( t, ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2], padding="SAME").eval() # The following are tests that verify that the CPU and GPU implementations # produce the same results. def _CompareMaxPoolingFwd(self, input_shape, ksize, strides, padding): for dtype in np.float64, np.float32, np.float16: tensor_input = np.random.rand(*input_shape).astype(dtype) with self.test_session(use_gpu=True): t = constant_op.constant(tensor_input, shape=input_shape) out_op, _ = nn_ops.max_pool_with_argmax(t, ksize, strides, padding) gpu_val = out_op.eval() with self.test_session(use_gpu=False): t = constant_op.constant(tensor_input, shape=input_shape) out_op = nn_ops.max_pool(t, ksize, strides, padding) cpu_val = out_op.eval() self.assertAllCloseAccordingToType(cpu_val, gpu_val) def _CompareMaxPoolingBk(self, input_shape, output_shape, ksize, strides, padding): for dtype in np.float64, np.float32, np.float16: # Generate numbers in a narrow range, so that there are many duplicates # in the input. tensor_input = np.random.random_integers(0, 3, input_shape).astype(dtype) tensor_output = np.random.rand(*output_shape).astype(dtype) with self.test_session(use_gpu=True): t = constant_op.constant(tensor_input, shape=input_shape) _, argmax_op = nn_ops.max_pool_with_argmax(t, ksize, strides, padding) argmax = argmax_op.eval() grad_in = constant_op.constant(tensor_output, shape=output_shape) out_op = gen_nn_ops.max_pool_grad_with_argmax(t, grad_in, argmax, ksize, strides, padding) gpu_val = out_op.eval() self.assertShapeEqual(gpu_val, out_op) with self.test_session(use_gpu=False): t = constant_op.constant(tensor_input, shape=input_shape) out_op = nn_ops.max_pool(t, ksize, strides, padding) orig_out = out_op.eval() grad_in = constant_op.constant(tensor_output, shape=output_shape) out_op = gen_nn_ops.max_pool_grad(t, orig_out, grad_in, ksize, strides, padding) cpu_val = out_op.eval() self.assertShapeEqual(cpu_val, out_op) # The CPU version accumulates its gradient on fp16, so it's less # accurate than the GPU version that does the accumulation on fp32 self.assertAllCloseAccordingToType( cpu_val, gpu_val, half_rtol=0.01, half_atol=0.01) def _CompareMaxPoolingGradBk(self, input_shape, output_shape, ksize, strides, padding): for dtype in np.float64, np.float32, np.float16: # Generate numbers in a narrow range, so that there are many duplicates # in the input. tensor_input = np.random.random_integers(0, 3, input_shape).astype(dtype) with self.test_session(use_gpu=True): t = constant_op.constant(tensor_input, shape=input_shape) _, argmax_op = nn_ops.max_pool_with_argmax(t, ksize, strides, padding) argmax = argmax_op.eval() grad_in = constant_op.constant(tensor_input, shape=input_shape) out_op = gen_nn_ops.max_pool_grad_grad_with_argmax( t, grad_in, argmax, ksize, strides, padding) gpu_val = out_op.eval() self.assertShapeEqual(gpu_val, out_op) with self.test_session(use_gpu=False): t = constant_op.constant(tensor_input, shape=input_shape) out_op = nn_ops.max_pool(t, ksize, strides, padding) orig_out = out_op.eval() grad_in = constant_op.constant(tensor_input, shape=input_shape) out_op = gen_nn_ops.max_pool_grad_grad(t, orig_out, grad_in, ksize, strides, padding) cpu_val = out_op.eval() self.assertShapeEqual(cpu_val, out_op) # The CPU version accumulates its gradient on fp16, so it's less # accurate than the GPU version that does the accumulation on fp32 self.assertAllCloseAccordingToType( cpu_val, gpu_val, half_rtol=0.01, half_atol=0.01) def testMaxPoolingWithArgmax(self): tensor_input = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0] with self.test_session(use_gpu=True) as sess: t = constant_op.constant(tensor_input, shape=[1, 3, 3, 1]) out_op, argmax_op = nn_ops.max_pool_with_argmax( t, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], Targmax=dtypes.int64, padding="VALID") out, argmax = sess.run([out_op, argmax_op]) self.assertShapeEqual(out, out_op) self.assertShapeEqual(argmax, argmax_op) self.assertAllClose(out.ravel(), [1.0, 1.0, 1.0, 1.0]) self.assertAllEqual(argmax.ravel(), [0, 1, 3, 5]) def testMaxPoolingGradWithArgmax(self): orig_input = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0] tensor_input = [11.0, 12.0, 13.0, 14.0] tensor_argmax = list(np.array([0, 1, 3, 5], dtype=np.int64)) with self.test_session(use_gpu=True): orig_in = constant_op.constant(orig_input, shape=[1, 3, 3, 1]) t = constant_op.constant(tensor_input, shape=[1, 2, 2, 1]) argmax = constant_op.constant( tensor_argmax, shape=[1, 2, 2, 1], dtype=dtypes.int64) out_op = gen_nn_ops.max_pool_grad_with_argmax( orig_in, t, argmax, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding="VALID") out = out_op.eval().flatten() self.assertAllClose(out, [11.0, 12.0, 0.0, 13.0, 0.0, 14.0, 0.0, 0.0, 0.0]) def testMaxPoolingGradGradWithArgmax(self): # MaxPoolWithArgMax is implemented only on CUDA. if not test.is_gpu_available(cuda_only=True): return orig_input = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0] tensor_input = [11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0] tensor_argmax = list(np.array([0, 1, 3, 5], dtype=np.int64)) with self.test_session(use_gpu=True): orig_in = constant_op.constant(orig_input, shape=[1, 3, 3, 1]) t = constant_op.constant(tensor_input, shape=[1, 3, 3, 1]) argmax = constant_op.constant( tensor_argmax, shape=[1, 2, 2, 1], dtype=dtypes.int64) out_op = gen_nn_ops.max_pool_grad_grad_with_argmax( orig_in, t, argmax, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding="VALID") out = out_op.eval().flatten() self.assertAllClose(out, [11.0, 12.0, 14.0, 16.0]) def _ConstructAndTestGradient(self, pool_func, input_sizes, output_sizes, window_rows, window_cols, row_stride, col_stride, padding, data_format, use_gpu, x_init_value=None): """Verifies the gradients of the avg pooling function. Args: pool_func: Function to be called, co.MaxPool, co.AvgPool, or the Lua version. input_sizes: Input tensor dimensions. output_sizes: Output tensor dimensions. window_rows: kernel size in row dim window_cols: kernel size in col dim row_stride: Row Stride. col_stride: Col Stride. padding: Padding type. data_format: Data format. use_gpu: whether we are running on GPU x_init_value: Values to be passed to the gradient checker. """ assert input_sizes[0] == output_sizes[0] assert input_sizes[3] == output_sizes[3] total_size = 1 for s in input_sizes: total_size *= s # Initializes the input tensor with array containing incrementing # numbers from 1. x = [f * 1.0 for f in range(1, total_size + 1)] with self.test_session(use_gpu=use_gpu): input_tensor = constant_op.constant(x, shape=input_sizes, name="input") if pool_func == nn_ops.avg_pool: func_name = "avg_pool" err_tolerance = 1e-4 else: if x_init_value is None: x_init_value = np.asfarray( np.arange(1, total_size + 1), dtype=np.float32).reshape(input_sizes) func_name = "max_pool" err_tolerance = 1e-3 if data_format == "NCHW": ksize = [1, 1, window_rows, window_rows] strides = [1, 1, row_stride, col_stride] t = test_util.NHWCToNCHW(input_tensor) else: ksize = [1, window_rows, window_rows, 1] strides = [1, row_stride, col_stride, 1] t = input_tensor t = pool_func( t, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=func_name) if data_format == "NCHW": t = test_util.NCHWToNHWC(t) err = gradient_checker.compute_gradient_error( input_tensor, input_sizes, t, output_sizes, x_init_value=x_init_value, delta=1e-2) tf_logging.info("%s gradient error = " % func_name, err) self.assertLess(err, err_tolerance) def _ConstructAndTestSecondGradient(self, pool_func, input_sizes, output_sizes, window_rows, window_cols, row_stride, col_stride, padding, data_format, use_gpu, x_init_value=None): """Verifies the second-order gradients of the pooling function. Args: pool_func: Function to be called, co.MaxPool, co.AvgPool, or the Lua version. input_sizes: Input tensor dimensions. output_sizes: Output tensor dimensions. window_rows: kernel size in row dim window_cols: kernel size in col dim row_stride: Row Stride. col_stride: Col Stride. padding: Padding type. data_format: Data format. use_gpu: whether we are running on GPU x_init_value: Values to be passed to the gradient checker. """ assert input_sizes[0] == output_sizes[0] assert input_sizes[3] == output_sizes[3] total_size = 1 for s in input_sizes: total_size *= s # Initializes the input tensor with array containing incrementing # numbers from 1. x = [f * 1.0 for f in range(1, total_size + 1)] with self.test_session(use_gpu=use_gpu): input_tensor = constant_op.constant(x, shape=input_sizes, name="input") if pool_func == nn_ops.avg_pool: func_name = "avg_pool" err_tolerance = 1e-3 else: if x_init_value is None: x_init_value = np.asfarray( np.arange(1, total_size + 1), dtype=np.float32).reshape(input_sizes) func_name = "max_pool" err_tolerance = 1e-2 if data_format == "NCHW": ksize = [1, 1, window_rows, window_rows] strides = [1, 1, row_stride, col_stride] t = test_util.NHWCToNCHW(input_tensor) else: ksize = [1, window_rows, window_rows, 1] strides = [1, row_stride, col_stride, 1] t = input_tensor t = pool_func( t, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=func_name) if data_format == "NCHW": t = test_util.NHWCToNCHW(t) t_g = gradients_impl.gradients(t**2, input_tensor)[0] err = gradient_checker.compute_gradient_error( input_tensor, input_sizes, t_g, input_sizes, x_init_value=x_init_value, delta=1e-2) tf_logging.info("%s second-order gradient error = " % func_name, err) self.assertLess(err, err_tolerance) def _testMaxPoolGradValidPadding1_1(self, data_format, use_gpu): for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]: self._ConstructAndTestGradient( pool_func, input_sizes=[1, 3, 3, 1], output_sizes=[1, 3, 3, 1], window_rows=1, window_cols=1, row_stride=1, col_stride=1, padding="VALID", data_format=data_format, use_gpu=use_gpu) def _testMaxPoolGradValidPadding2_1_6(self, data_format, use_gpu): for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]: self._ConstructAndTestGradient( pool_func, input_sizes=[2, 6, 6, 3], output_sizes=[2, 5, 5, 3], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="VALID", data_format=data_format, use_gpu=use_gpu) def _testMaxPoolGradValidPadding2_1_7(self, data_format, use_gpu): for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]: self._ConstructAndTestGradient( pool_func, input_sizes=[2, 7, 7, 3], output_sizes=[2, 6, 6, 3], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="VALID", data_format=data_format, use_gpu=use_gpu) def _testMaxPoolGradValidPadding1_2(self, data_format, use_gpu): for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]: self._ConstructAndTestGradient( pool_func, input_sizes=[1, 3, 3, 1], output_sizes=[1, 2, 2, 1], window_rows=1, window_cols=1, row_stride=2, col_stride=2, padding="VALID", data_format=data_format, use_gpu=use_gpu) def _testMaxPoolGradValidPadding2_2(self, data_format, use_gpu): for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]: self._ConstructAndTestGradient( pool_func, input_sizes=[2, 2, 2, 3], output_sizes=[2, 1, 1, 3], window_rows=2, window_cols=2, row_stride=2, col_stride=2, padding="VALID", data_format=data_format, use_gpu=use_gpu) def _testMaxPoolGradSamePadding1_1(self, data_format, use_gpu): for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]: self._ConstructAndTestGradient( pool_func, input_sizes=[2, 2, 4, 3], output_sizes=[2, 2, 4, 3], window_rows=1, window_cols=1, row_stride=1, col_stride=1, padding="SAME", data_format=data_format, use_gpu=use_gpu) def _testMaxPoolGradSamePadding1_2(self, data_format, use_gpu): for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]: self._ConstructAndTestGradient( pool_func, input_sizes=[2, 2, 4, 3], output_sizes=[2, 1, 2, 3], window_rows=1, window_cols=1, row_stride=2, col_stride=2, padding="SAME", data_format=data_format, use_gpu=use_gpu) def _testMaxPoolGradSamePadding2_1(self, data_format, use_gpu): for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]: self._ConstructAndTestGradient( pool_func, input_sizes=[2, 2, 4, 3], output_sizes=[2, 2, 4, 3], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="SAME", data_format=data_format, use_gpu=use_gpu) def _testMaxPoolGradSamePadding2_2(self, data_format, use_gpu): for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]: self._ConstructAndTestGradient( pool_func, input_sizes=[2, 2, 4, 3], output_sizes=[2, 1, 2, 3], window_rows=2, window_cols=2, row_stride=2, col_stride=2, padding="SAME", data_format=data_format, use_gpu=use_gpu) def _testMaxPoolGradSamePadding3_1(self, data_format, use_gpu): for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]: self._ConstructAndTestGradient( pool_func, input_sizes=[1, 7, 7, 1], output_sizes=[1, 7, 7, 1], window_rows=3, window_cols=3, row_stride=1, col_stride=1, padding="SAME", data_format=data_format, use_gpu=use_gpu) def testMaxPoolGrad(self): for (data_format, use_gpu) in GetTestConfigs(): self._testMaxPoolGradValidPadding1_1(data_format, use_gpu) self._testMaxPoolGradValidPadding1_2(data_format, use_gpu) self._testMaxPoolGradValidPadding2_1_6(data_format, use_gpu) self._testMaxPoolGradValidPadding2_1_7(data_format, use_gpu) self._testMaxPoolGradValidPadding2_2(data_format, use_gpu) self._testMaxPoolGradSamePadding1_1(data_format, use_gpu) self._testMaxPoolGradSamePadding1_2(data_format, use_gpu) self._testMaxPoolGradSamePadding2_1(data_format, use_gpu) self._testMaxPoolGradSamePadding2_2(data_format, use_gpu) self._testMaxPoolGradSamePadding3_1(data_format, use_gpu) def _MaxPoolGrad(self, orig_input, orig_output, grad, window_rows, window_cols, row_stride, col_stride, padding, v2): """Max Pooling Gradient. Args: orig_input: A float Tensor. The original input tensor. orig_output: A float Tensor. The original output tensor. grad: A float Tensor. The 4D (batch x rows x cols x depth) output backprop. window_rows: integer. Kernel size along rows dimension. window_cols: integer. Kernel size along cols dimension. row_stride: integer. Stride along rows dimension col_stride: integer. Stride along cols dimension padding: PoolingOpDef.Padding. Padding type. Returns: A Tensor. """ pool_func = gen_nn_ops.max_pool_grad_v2 if v2 else gen_nn_ops.max_pool_grad return pool_func(orig_input, orig_output, grad, [1, window_rows, window_cols, 1], [1, row_stride, col_stride, 1], padding) def _testMaxPoolGradDirect(self, input_data, output_backprop, expected_input_backprop, input_sizes, output_sizes, window_rows, window_cols, row_stride, col_stride, padding, use_gpu, v2): pool_func = gen_nn_ops.max_pool_v2 if v2 else nn_ops.max_pool with self.test_session(use_gpu=use_gpu): input_tensor = variables.Variable( np.array(input_data, dtype=np.float32).reshape(input_sizes)) variables.global_variables_initializer().run() output_tensor = pool_func(input_tensor, [1, window_rows, window_cols, 1], [1, row_stride, col_stride, 1], padding) output_backprop_tensor = constant_op.constant( output_backprop, shape=output_sizes) input_backprop_tensor = self._MaxPoolGrad( input_tensor, output_tensor, output_backprop_tensor, window_rows, window_cols, row_stride, col_stride, padding, v2) actual_input_backprop = input_backprop_tensor.eval() self.assertShapeEqual(actual_input_backprop, input_backprop_tensor) actual_input_backprop = actual_input_backprop.flatten() actual_input_backprop = self._GetNdArray(actual_input_backprop) actual_output = output_tensor.eval().flatten() actual_output = self._GetNdArray(actual_output) self.assertAllClose( expected_input_backprop, actual_input_backprop, rtol=1e-6, atol=1e-6) def _testMaxPoolGradDirect1_1(self): input_data = [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 ] output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0] expected_input_backprop = [ 11.0, 12.0, 13.0, 0.0, 15.0, 16.0, 17.0, 0.0, 19.0, 20.0, 21.0, 0.0, 0.0, 0.0, 0.0, 0.0 ] for use_gpu in True, False: for v2 in [True, False]: self._testMaxPoolGradDirect( input_data, output_backprop, expected_input_backprop, input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="VALID", use_gpu=use_gpu, v2=v2) def _testMaxPoolGradDirect1_2(self): input_data = [ 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0 ] output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0] expected_input_backprop = [ 11.0, 0.0, 25.0, 0.0, 0.0, 31.0, 0.0, 17.0, 19.0, 0.0, 41.0, 0.0, 0.0, 0.0, 0.0, 0.0 ] for use_gpu in True, False: for v2 in [True, False]: self._testMaxPoolGradDirect( input_data, output_backprop, expected_input_backprop, input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="VALID", use_gpu=use_gpu, v2=v2) def _testMaxPoolGradDirect1_3(self): input_data = [ 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, ] output_backprop = [ 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0 ] expected_input_backprop = [ 54, 0.0, 62, 0.0, 0.0, 60, 0.0, 22.0, 47, 0.0, 51, 0.0, 0.0, 0.0, 0.0, 0.0, ] for use_gpu in True, False: for v2 in [True, False]: self._testMaxPoolGradDirect( input_data, output_backprop, expected_input_backprop, input_sizes=[1, 4, 4, 1], output_sizes=[1, 4, 4, 1], window_rows=3, window_cols=3, row_stride=1, col_stride=1, padding="SAME", use_gpu=use_gpu, v2=v2) def _testMaxPoolGradDirectWithNans2_1(self): input_data = [float("nan")] * 16 output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0] # Test the CPU implementation, which propagates diffs in case of NaN expected_input_backprop_tf_cpu = [ 11.0, 12.0, 13.0, 0.0, 15.0, 16.0, 17.0, 0.0, 19.0, 20.0, 21.0, 0.0, 0.0, 0.0, 0.0, 0.0 ] for v2 in [True, False]: self._testMaxPoolGradDirect( input_data, output_backprop, expected_input_backprop_tf_cpu, input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="VALID", use_gpu=False, v2=v2) if not test.is_gpu_available(): return # Test the GPU implementation that uses cudnn for now. saved_nanprop = os.environ.get("TF_ENABLE_MAXPOOL_NANPROP") # Do not propagate the diff in cases of NaNs os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = "0" expected_input_backprop_cudnn = [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ] for v2 in [True, False]: self._testMaxPoolGradDirect( input_data, output_backprop, expected_input_backprop_cudnn, input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="VALID", use_gpu=True, v2=v2) # Propagate the diff in cases of NaNs os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = "1" expected_input_backprop_cudnn = expected_input_backprop_tf_cpu for v2 in [True, False]: self._testMaxPoolGradDirect( input_data, output_backprop, expected_input_backprop_cudnn, input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="VALID", use_gpu=True, v2=v2) if saved_nanprop: os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = saved_nanprop else: del os.environ["TF_ENABLE_MAXPOOL_NANPROP"] def _testMaxPoolGradDirectWithNans2_2(self): input_data = [float("nan")] * 16 output_backprop = [ float("nan"), 12.0, 13.0, 15.0, float("nan"), 17.0, 19.0, 20.0, float("nan") ] # Test the CPU implementation, which propagates diffs in case of NaN expected_input_backprop_tf_cpu = [ float("nan"), 12.0, 13.0, 0.0, 15.0, float("nan"), 17.0, 0.0, 19.0, 20.0, float("nan"), 0.0, 0.0, 0.0, 0.0, 0.0 ] for v2 in [True, False]: self._testMaxPoolGradDirect( input_data, output_backprop, expected_input_backprop_tf_cpu, input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="VALID", use_gpu=False, v2=v2) if not test.is_gpu_available(): return # Test the GPU implementation that uses cudnn for now. saved_nanprop = os.environ.get("TF_ENABLE_MAXPOOL_NANPROP") # Do not propagate the diff in cases of NaNs os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = "0" expected_input_backprop_cudnn = [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ] for v2 in [True, False]: self._testMaxPoolGradDirect( input_data, output_backprop, expected_input_backprop_cudnn, input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="VALID", use_gpu=True, v2=v2) # Propagate the diff in cases of NaNs os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = "1" expected_input_backprop_cudnn = expected_input_backprop_tf_cpu for v2 in [True, False]: self._testMaxPoolGradDirect( input_data, output_backprop, expected_input_backprop_cudnn, input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="VALID", use_gpu=True, v2=v2) if saved_nanprop: os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = saved_nanprop else: del os.environ["TF_ENABLE_MAXPOOL_NANPROP"] def testMaxPoolGradDirect(self): self._testMaxPoolGradDirect1_1() self._testMaxPoolGradDirect1_2() self._testMaxPoolGradDirect1_3() self._testMaxPoolGradDirectWithNans2_1() self._testMaxPoolGradDirectWithNans2_2() def _testMaxPoolGradGradValidPadding1_1(self, data_format, use_gpu): for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]: self._ConstructAndTestSecondGradient( pool_func, input_sizes=[1, 3, 3, 1], output_sizes=[1, 3, 3, 1], window_rows=1, window_cols=1, row_stride=1, col_stride=1, padding="VALID", data_format=data_format, use_gpu=use_gpu) def _testMaxPoolGradGradValidPadding2_1_6(self, data_format, use_gpu): for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]: self._ConstructAndTestSecondGradient( pool_func, input_sizes=[2, 6, 6, 3], output_sizes=[2, 5, 5, 3], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="VALID", data_format=data_format, use_gpu=use_gpu) def _testMaxPoolGradGradValidPadding2_1_7(self, data_format, use_gpu): for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]: self._ConstructAndTestSecondGradient( pool_func, input_sizes=[2, 7, 7, 3], output_sizes=[2, 6, 6, 3], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="VALID", data_format=data_format, use_gpu=use_gpu) def _testMaxPoolGradGradValidPadding2_2(self, data_format, use_gpu): for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]: self._ConstructAndTestSecondGradient( pool_func, input_sizes=[2, 2, 2, 3], output_sizes=[2, 1, 1, 3], window_rows=2, window_cols=2, row_stride=2, col_stride=2, padding="VALID", data_format=data_format, use_gpu=use_gpu) def _testMaxPoolGradGradSamePadding1_1(self, data_format, use_gpu): for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]: self._ConstructAndTestSecondGradient( pool_func, input_sizes=[2, 2, 4, 3], output_sizes=[2, 2, 4, 3], window_rows=1, window_cols=1, row_stride=1, col_stride=1, padding="SAME", data_format=data_format, use_gpu=use_gpu) def _testMaxPoolGradGradSamePadding2_1(self, data_format, use_gpu): for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]: self._ConstructAndTestSecondGradient( pool_func, input_sizes=[2, 2, 4, 3], output_sizes=[2, 2, 4, 3], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="SAME", data_format=data_format, use_gpu=use_gpu) def _testMaxPoolGradGradSamePadding2_2(self, data_format, use_gpu): for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]: self._ConstructAndTestSecondGradient( pool_func, input_sizes=[2, 2, 4, 3], output_sizes=[2, 1, 2, 3], window_rows=2, window_cols=2, row_stride=2, col_stride=2, padding="SAME", data_format=data_format, use_gpu=use_gpu) def _testMaxPoolGradGradSamePadding3_1(self, data_format, use_gpu): for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]: self._ConstructAndTestSecondGradient( pool_func, input_sizes=[1, 7, 7, 1], output_sizes=[1, 7, 7, 1], window_rows=3, window_cols=3, row_stride=1, col_stride=1, padding="SAME", data_format=data_format, use_gpu=use_gpu) def testMaxPoolGradGrad(self): for (data_format, use_gpu) in GetTestConfigs(): self._testMaxPoolGradGradValidPadding1_1(data_format, use_gpu) self._testMaxPoolGradGradValidPadding2_1_6(data_format, use_gpu) self._testMaxPoolGradGradValidPadding2_1_7(data_format, use_gpu) self._testMaxPoolGradGradValidPadding2_2(data_format, use_gpu) self._testMaxPoolGradGradSamePadding1_1(data_format, use_gpu) self._testMaxPoolGradGradSamePadding2_1(data_format, use_gpu) self._testMaxPoolGradGradSamePadding2_2(data_format, use_gpu) self._testMaxPoolGradGradSamePadding3_1(data_format, use_gpu) def _MaxPoolGradGrad(self, orig_input, orig_output, grad, window_rows, window_cols, row_stride, col_stride, padding): """Max Pooling Second-Order Gradient. Args: orig_input: A float Tensor. The original input tensor. orig_output: A float Tensor. The original output tensor. grad: A float Tensor. The 4D (batch x out_rows x out_cols x depth) output backprop. window_rows: integer. Kernel size along rows dimension. window_cols: integer. Kernel size along cols dimension. row_stride: integer. Stride along rows dimension col_stride: integer. Stride along cols dimension padding: PoolingOpDef.Padding. Padding type. Returns: A Tensor. """ return gen_nn_ops.max_pool_grad_grad( orig_input, orig_output, grad, [1, window_rows, window_cols, 1], [1, row_stride, col_stride, 1], padding) def testAvgPoolGrad(self): for (data_format, use_gpu) in GetTestConfigs(): self._testAvgPoolGradValidPadding1_1(data_format, use_gpu) self._testAvgPoolGradValidPadding1_2(data_format, use_gpu) self._testAvgPoolGradValidPadding2_1(data_format, use_gpu) self._testAvgPoolGradValidPadding2_2(data_format, use_gpu) self._testAvgPoolGradSamePadding1_1(data_format, use_gpu) self._testAvgPoolGradSamePadding1_2(data_format, use_gpu) self._testAvgPoolGradSamePadding2_1(data_format, use_gpu) self._testAvgPoolGradSamePadding2_2(data_format, use_gpu) self._testAvgPoolGradSamePadding3_1(data_format, use_gpu) def _testAvgPoolGradValidPadding1_1(self, data_format, use_gpu): self._ConstructAndTestGradient( nn_ops.avg_pool, input_sizes=[2, 3, 3, 3], output_sizes=[2, 3, 3, 3], window_rows=1, window_cols=1, row_stride=1, col_stride=1, padding="VALID", data_format=data_format, use_gpu=use_gpu) def _testAvgPoolGradValidPadding1_2(self, data_format, use_gpu): self._ConstructAndTestGradient( nn_ops.avg_pool, input_sizes=[2, 3, 3, 3], output_sizes=[2, 2, 2, 3], window_rows=1, window_cols=1, row_stride=2, col_stride=2, padding="VALID", data_format=data_format, use_gpu=use_gpu) def _testAvgPoolGradValidPadding2_1(self, data_format, use_gpu): self._ConstructAndTestGradient( nn_ops.avg_pool, input_sizes=[2, 3, 3, 3], output_sizes=[2, 2, 2, 3], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="VALID", data_format=data_format, use_gpu=use_gpu) def _testAvgPoolGradValidPadding2_2(self, data_format, use_gpu): self._ConstructAndTestGradient( nn_ops.avg_pool, input_sizes=[2, 2, 2, 3], output_sizes=[2, 1, 1, 3], window_rows=2, window_cols=2, row_stride=2, col_stride=2, padding="VALID", data_format=data_format, use_gpu=use_gpu) def _testAvgPoolGradSamePadding1_1(self, data_format, use_gpu): self._ConstructAndTestGradient( nn_ops.avg_pool, input_sizes=[2, 2, 4, 3], output_sizes=[2, 2, 4, 3], window_rows=1, window_cols=1, row_stride=1, col_stride=1, padding="SAME", data_format=data_format, use_gpu=use_gpu) def _testAvgPoolGradSamePadding1_2(self, data_format, use_gpu): self._ConstructAndTestGradient( nn_ops.avg_pool, input_sizes=[2, 2, 4, 3], output_sizes=[2, 1, 2, 3], window_rows=1, window_cols=1, row_stride=2, col_stride=2, padding="SAME", data_format=data_format, use_gpu=use_gpu) def _testAvgPoolGradSamePadding2_1(self, data_format, use_gpu): self._ConstructAndTestGradient( nn_ops.avg_pool, input_sizes=[2, 2, 4, 3], output_sizes=[2, 2, 4, 3], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="SAME", data_format=data_format, use_gpu=use_gpu) def _testAvgPoolGradSamePadding2_2(self, data_format, use_gpu): self._ConstructAndTestGradient( nn_ops.avg_pool, input_sizes=[2, 2, 4, 3], output_sizes=[2, 1, 2, 3], window_rows=2, window_cols=2, row_stride=2, col_stride=2, padding="SAME", data_format=data_format, use_gpu=use_gpu) def _testAvgPoolGradSamePadding3_1(self, data_format, use_gpu): self._ConstructAndTestGradient( nn_ops.avg_pool, input_sizes=[1, 7, 7, 1], output_sizes=[1, 7, 7, 1], window_rows=3, window_cols=3, row_stride=1, col_stride=1, padding="SAME", data_format=data_format, use_gpu=use_gpu) def testShapeFunctionEdgeCases(self): # All shapes unknown. for pool_func in [nn_ops.max_pool, nn_ops.avg_pool]: p = pool_func( array_ops.placeholder(dtypes.float32), ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1], padding="SAME") self.assertEqual([None, None, None, None], p.get_shape().as_list()) p, am = nn_ops.max_pool_with_argmax( array_ops.placeholder(dtypes.float32), ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1], padding="SAME") self.assertEqual([None, None, None, None], p.get_shape().as_list()) self.assertEqual([None, None, None, None], am.get_shape().as_list()) # Incorrect input shape. for pool_func in [ nn_ops.max_pool, nn_ops.avg_pool, nn_ops.max_pool_with_argmax ]: with self.assertRaises(ValueError): pool_func( array_ops.placeholder(dtypes.float32, shape=[1, 3]), ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1], padding="SAME") def testOpEdgeCases(self): with self.test_session(use_gpu=test.is_gpu_available()) as sess: pool_funcs = [nn_ops.max_pool, nn_ops.avg_pool] if test.is_gpu_available(): pool_funcs.append(nn_ops.max_pool_with_argmax) for pool_func in pool_funcs: if pool_func != nn_ops.max_pool: # Illegal strides. with self.assertRaisesRegexp( errors_impl.UnimplementedError, "Pooling is not yet supported on the batch"): sess.run( pool_func( array_ops.placeholder(dtypes.float32), ksize=[1, 1, 1, 1], strides=[2, 1, 1, 1], padding="SAME")) # Filter too large. with self.assertRaisesRegexp(ValueError, "Negative dimension size"): sess.run( pool_func( array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]), ksize=[1, 20, 21, 1], strides=[1, 1, 1, 1], padding="VALID")) with self.assertRaisesRegexp(ValueError, "Negative dimension size"): pool_func( array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]), ksize=[1, 21, 20, 1], strides=[1, 1, 1, 1], padding="VALID") def GetMaxPoolFwdTest(input_size, filter_size, strides, padding): def Test(self): # MaxPoolWithArgMax is implemented only on CUDA. if not test.is_gpu_available(cuda_only=True): return self._CompareMaxPoolingFwd(input_size, filter_size, strides, padding) return Test def GetMaxPoolGradTest(input_size, filter_size, output_size, strides, padding): def Test(self): # MaxPoolWithArgMax is implemented only on CUDA. if not test.is_gpu_available(cuda_only=True): return self._CompareMaxPoolingBk(input_size, output_size, filter_size, strides, padding) return Test def GetMaxPoolGradGradTest(input_size, filter_size, output_size, strides, padding): def Test(self): # MaxPoolWithArgMax is implemented only on CUDA. if not test.is_gpu_available(cuda_only=True): return self._CompareMaxPoolingGradBk(input_size, output_size, filter_size, strides, padding) return Test if __name__ == "__main__": for (name_, input_size_, filter_size_, output_size_, stride_, padding_) in GetShrunkInceptionMaxPoolShapes(): setattr(PoolingTest, "testMaxPoolFwd_" + name_, GetMaxPoolFwdTest(input_size_, filter_size_, stride_, padding_)) setattr(PoolingTest, "testMaxPoolGrad_" + name_, GetMaxPoolGradTest(input_size_, filter_size_, output_size_, stride_, padding_)) setattr(PoolingTest, "testMaxPoolGradGrad_" + name_, GetMaxPoolGradGradTest(input_size_, filter_size_, output_size_, stride_, padding_)) test.main()
apache-2.0
beaufour/mtop
mtop/lib/ops.py
1
1081
from pymongo.errors import AutoReconnect try: from bson.son import SON except ImportError: # fall back to old location from pymongo.son import SON class MongoOps(object): """ Helper class for mongo commands we use. Wraps calls in try/except so that resizing does not break them. """ def __init__(self, connection): """ @param connection: pymongo Connection to use. """ self._connection = connection def get_inprog(self): ret = None try: ret = self._connection.db['$cmd.sys.inprog'].find_one() except AutoReconnect: pass return ret['inprog'] if ret else [] def get_server_status(self): ret = None try: ret = self._connection.db.command(SON([('serverStatus', 1), ('repl', 2), ['workingSet', 1], ])) except AutoReconnect: pass return ret
apache-2.0
TeamExodus/external_chromium_org
tools/prepare-bisect-perf-regression.py
84
2403
#!/usr/bin/env python # Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Prepare Performance Test Bisect Tool This script is used by a try bot to create a working directory and sync an initial copy of the depot for use in bisecting performance regressions. An example usage: ./tools/prepare-bisect-perf-regressions.py --working_directory "~/builds" --output_buildbot_annotations Would result in creating ~/builds/bisect and then populating it with a copy of the depot. """ import optparse import sys from auto_bisect import bisect_utils def main(): """Does an initial checkout of Chromium then exits.""" usage = ('%prog [options] [-- chromium-options]\n' 'Prepares a temporary depot for use on a try bot.') parser = optparse.OptionParser(usage=usage) parser.add_option('-w', '--working_directory', type='str', help='Path to the working directory where the script will ' 'do an initial checkout of the chromium depot. The ' 'files will be placed in a subdirectory "bisect" under ' 'working_directory and that will be used to perform the ' 'bisection.') parser.add_option('--output_buildbot_annotations', action='store_true', help='Add extra annotation output for buildbot.') parser.add_option('--target_platform', type='choice', choices=['chromium', 'cros', 'android'], default='chromium', help='The target platform. Choices are "chromium" (current ' 'platform), "cros", or "android". If you specify something ' 'other than "chromium", you must be properly set up to ' 'build that platform.') opts, _ = parser.parse_args() if not opts.working_directory: print 'Error: missing required parameter: --working_directory' print parser.print_help() return 1 if not bisect_utils.CheckIfBisectDepotExists(opts): try: bisect_utils.CreateBisectDirectoryAndSetupDepot( opts, bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS) except RuntimeError: return 1 return 0 if __name__ == '__main__': sys.exit(main())
bsd-3-clause
jeppeter/drizzlebr
plugin/mysql_protocol/prototest/prototest/mysql/bitfield.py
5
4028
#!/usr/bin/env python # # Drizzle Client & Protocol Library # # Copyright (C) 2008 Eric Day (eday@oddments.org) # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # # * The names of its contributors may not be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import unittest class BitField(object): '''Base class for managing bitfields.''' _fields = [] def __init__(self, value=0): self._value = value def __getattr__(self, name): try: if name.isupper(): return 1 << self._fields.index(name) elif name.islower(): return self._value & (1 << self._fields.index(name.upper())) != 0 raise Exception() except Exception: raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name)) def __setattr__(self, name, value): try: if name[0] == '_': self.__dict__[name] = value else: if name.islower(): if value: self._value |= (1 << self._fields.index(name.upper())) else: self._value &= ~(1 << self._fields.index(name.upper())) else: raise Exception() except Exception: raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name)) def __str__(self): return str([(self._fields[x], 1 << x) for x in range(0, len(self._fields)) if (1 << x) & self._value]) def value(self): return self._value class ExampleField(BitField): _fields = [ 'READ', 'WRITE', 'CREATE', 'DIRECT' ] class TestField(unittest.TestCase): def testDefaultInit(self): f = ExampleField() self.assertEqual(f.value(), 0) def testDataInit(self): f = ExampleField(15) self.assertEqual(f.value(), 15) def testGetAttr(self): f = ExampleField(1) self.assertEqual(f.read, True) self.assertEqual(f.READ, 1) self.assertEqual(f.write, False) self.assertEqual(f.WRITE, 2) def testBadGetAttr(self): f = ExampleField() self.assertRaises(AttributeError, getattr, f, 'BAD') self.assertRaises(AttributeError, getattr, f, 'bad') self.assertRaises(AttributeError, getattr, f, 'Read') def testSetAttr(self): f = ExampleField() self.assertEqual(f.read, False) self.assertEqual(f.write, False) f.read = True self.assertEqual(f.read, True) self.assertEqual(f.write, False) def testBadSetAttr(self): f = ExampleField() self.assertRaises(AttributeError, setattr, f, 'BAD', 0) self.assertRaises(AttributeError, setattr, f, 'bad', 0) self.assertRaises(AttributeError, setattr, f, 'Read', 0) self.assertRaises(AttributeError, setattr, f, 'READ', 0) if __name__ == '__main__': unittest.main()
gpl-2.0
teeple/pns_server
work/install/node-v0.10.25/deps/v8/tools/stats-viewer.py
143
15033
#!/usr/bin/env python # # Copyright 2008 the V8 project authors. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """A cross-platform execution counter viewer. The stats viewer reads counters from a binary file and displays them in a window, re-reading and re-displaying with regular intervals. """ import mmap import optparse import os import re import struct import sys import time import Tkinter # The interval, in milliseconds, between ui updates UPDATE_INTERVAL_MS = 100 # Mapping from counter prefix to the formatting to be used for the counter COUNTER_LABELS = {"t": "%i ms.", "c": "%i"} # The magic numbers used to check if a file is not a counters file COUNTERS_FILE_MAGIC_NUMBER = 0xDEADFACE CHROME_COUNTERS_FILE_MAGIC_NUMBER = 0x13131313 class StatsViewer(object): """The main class that keeps the data used by the stats viewer.""" def __init__(self, data_name, name_filter): """Creates a new instance. Args: data_name: the name of the file containing the counters. name_filter: The regexp filter to apply to counter names. """ self.data_name = data_name self.name_filter = name_filter # The handle created by mmap.mmap to the counters file. We need # this to clean it up on exit. self.shared_mmap = None # A mapping from counter names to the ui element that displays # them self.ui_counters = {} # The counter collection used to access the counters file self.data = None # The Tkinter root window object self.root = None def Run(self): """The main entry-point to running the stats viewer.""" try: self.data = self.MountSharedData() # OpenWindow blocks until the main window is closed self.OpenWindow() finally: self.CleanUp() def MountSharedData(self): """Mount the binary counters file as a memory-mapped file. If something goes wrong print an informative message and exit the program.""" if not os.path.exists(self.data_name): maps_name = "/proc/%s/maps" % self.data_name if not os.path.exists(maps_name): print "\"%s\" is neither a counter file nor a PID." % self.data_name sys.exit(1) maps_file = open(maps_name, "r") try: self.data_name = None for m in re.finditer(r"/dev/shm/\S*", maps_file.read()): if os.path.exists(m.group(0)): self.data_name = m.group(0) break if self.data_name is None: print "Can't find counter file in maps for PID %s." % self.data_name sys.exit(1) finally: maps_file.close() data_file = open(self.data_name, "r") size = os.fstat(data_file.fileno()).st_size fileno = data_file.fileno() self.shared_mmap = mmap.mmap(fileno, size, access=mmap.ACCESS_READ) data_access = SharedDataAccess(self.shared_mmap) if data_access.IntAt(0) == COUNTERS_FILE_MAGIC_NUMBER: return CounterCollection(data_access) elif data_access.IntAt(0) == CHROME_COUNTERS_FILE_MAGIC_NUMBER: return ChromeCounterCollection(data_access) print "File %s is not stats data." % self.data_name sys.exit(1) def CleanUp(self): """Cleans up the memory mapped file if necessary.""" if self.shared_mmap: self.shared_mmap.close() def UpdateCounters(self): """Read the contents of the memory-mapped file and update the ui if necessary. If the same counters are present in the file as before we just update the existing labels. If any counters have been added or removed we scrap the existing ui and draw a new one. """ changed = False counters_in_use = self.data.CountersInUse() if counters_in_use != len(self.ui_counters): self.RefreshCounters() changed = True else: for i in xrange(self.data.CountersInUse()): counter = self.data.Counter(i) name = counter.Name() if name in self.ui_counters: value = counter.Value() ui_counter = self.ui_counters[name] counter_changed = ui_counter.Set(value) changed = (changed or counter_changed) else: self.RefreshCounters() changed = True break if changed: # The title of the window shows the last time the file was # changed. self.UpdateTime() self.ScheduleUpdate() def UpdateTime(self): """Update the title of the window with the current time.""" self.root.title("Stats Viewer [updated %s]" % time.strftime("%H:%M:%S")) def ScheduleUpdate(self): """Schedules the next ui update.""" self.root.after(UPDATE_INTERVAL_MS, lambda: self.UpdateCounters()) def RefreshCounters(self): """Tear down and rebuild the controls in the main window.""" counters = self.ComputeCounters() self.RebuildMainWindow(counters) def ComputeCounters(self): """Group the counters by the suffix of their name. Since the same code-level counter (for instance "X") can result in several variables in the binary counters file that differ only by a two-character prefix (for instance "c:X" and "t:X") counters are grouped by suffix and then displayed with custom formatting depending on their prefix. Returns: A mapping from suffixes to a list of counters with that suffix, sorted by prefix. """ names = {} for i in xrange(self.data.CountersInUse()): counter = self.data.Counter(i) name = counter.Name() names[name] = counter # By sorting the keys we ensure that the prefixes always come in the # same order ("c:" before "t:") which looks more consistent in the # ui. sorted_keys = names.keys() sorted_keys.sort() # Group together the names whose suffix after a ':' are the same. groups = {} for name in sorted_keys: counter = names[name] if ":" in name: name = name[name.find(":")+1:] if not name in groups: groups[name] = [] groups[name].append(counter) return groups def RebuildMainWindow(self, groups): """Tear down and rebuild the main window. Args: groups: the groups of counters to display """ # Remove elements in the current ui self.ui_counters.clear() for child in self.root.children.values(): child.destroy() # Build new ui index = 0 sorted_groups = groups.keys() sorted_groups.sort() for counter_name in sorted_groups: counter_objs = groups[counter_name] if self.name_filter.match(counter_name): name = Tkinter.Label(self.root, width=50, anchor=Tkinter.W, text=counter_name) name.grid(row=index, column=0, padx=1, pady=1) count = len(counter_objs) for i in xrange(count): counter = counter_objs[i] name = counter.Name() var = Tkinter.StringVar() if self.name_filter.match(name): value = Tkinter.Label(self.root, width=15, anchor=Tkinter.W, textvariable=var) value.grid(row=index, column=(1 + i), padx=1, pady=1) # If we know how to interpret the prefix of this counter then # add an appropriate formatting to the variable if (":" in name) and (name[0] in COUNTER_LABELS): format = COUNTER_LABELS[name[0]] else: format = "%i" ui_counter = UiCounter(var, format) self.ui_counters[name] = ui_counter ui_counter.Set(counter.Value()) index += 1 self.root.update() def OpenWindow(self): """Create and display the root window.""" self.root = Tkinter.Tk() # Tkinter is no good at resizing so we disable it self.root.resizable(width=False, height=False) self.RefreshCounters() self.ScheduleUpdate() self.root.mainloop() class UiCounter(object): """A counter in the ui.""" def __init__(self, var, format): """Creates a new ui counter. Args: var: the Tkinter string variable for updating the ui format: the format string used to format this counter """ self.var = var self.format = format self.last_value = None def Set(self, value): """Updates the ui for this counter. Args: value: The value to display Returns: True if the value had changed, otherwise False. The first call always returns True. """ if value == self.last_value: return False else: self.last_value = value self.var.set(self.format % value) return True class SharedDataAccess(object): """A utility class for reading data from the memory-mapped binary counters file.""" def __init__(self, data): """Create a new instance. Args: data: A handle to the memory-mapped file, as returned by mmap.mmap. """ self.data = data def ByteAt(self, index): """Return the (unsigned) byte at the specified byte index.""" return ord(self.CharAt(index)) def IntAt(self, index): """Return the little-endian 32-byte int at the specified byte index.""" word_str = self.data[index:index+4] result, = struct.unpack("I", word_str) return result def CharAt(self, index): """Return the ascii character at the specified byte index.""" return self.data[index] class Counter(object): """A pointer to a single counter withing a binary counters file.""" def __init__(self, data, offset): """Create a new instance. Args: data: the shared data access object containing the counter offset: the byte offset of the start of this counter """ self.data = data self.offset = offset def Value(self): """Return the integer value of this counter.""" return self.data.IntAt(self.offset) def Name(self): """Return the ascii name of this counter.""" result = "" index = self.offset + 4 current = self.data.ByteAt(index) while current: result += chr(current) index += 1 current = self.data.ByteAt(index) return result class CounterCollection(object): """An overlay over a counters file that provides access to the individual counters contained in the file.""" def __init__(self, data): """Create a new instance. Args: data: the shared data access object """ self.data = data self.max_counters = data.IntAt(4) self.max_name_size = data.IntAt(8) def CountersInUse(self): """Return the number of counters in active use.""" return self.data.IntAt(12) def Counter(self, index): """Return the index'th counter.""" return Counter(self.data, 16 + index * self.CounterSize()) def CounterSize(self): """Return the size of a single counter.""" return 4 + self.max_name_size class ChromeCounter(object): """A pointer to a single counter withing a binary counters file.""" def __init__(self, data, name_offset, value_offset): """Create a new instance. Args: data: the shared data access object containing the counter name_offset: the byte offset of the start of this counter's name value_offset: the byte offset of the start of this counter's value """ self.data = data self.name_offset = name_offset self.value_offset = value_offset def Value(self): """Return the integer value of this counter.""" return self.data.IntAt(self.value_offset) def Name(self): """Return the ascii name of this counter.""" result = "" index = self.name_offset current = self.data.ByteAt(index) while current: result += chr(current) index += 1 current = self.data.ByteAt(index) return result class ChromeCounterCollection(object): """An overlay over a counters file that provides access to the individual counters contained in the file.""" _HEADER_SIZE = 4 * 4 _COUNTER_NAME_SIZE = 64 _THREAD_NAME_SIZE = 32 def __init__(self, data): """Create a new instance. Args: data: the shared data access object """ self.data = data self.max_counters = data.IntAt(8) self.max_threads = data.IntAt(12) self.counter_names_offset = \ self._HEADER_SIZE + self.max_threads * (self._THREAD_NAME_SIZE + 2 * 4) self.counter_values_offset = \ self.counter_names_offset + self.max_counters * self._COUNTER_NAME_SIZE def CountersInUse(self): """Return the number of counters in active use.""" for i in xrange(self.max_counters): name_offset = self.counter_names_offset + i * self._COUNTER_NAME_SIZE if self.data.ByteAt(name_offset) == 0: return i return self.max_counters def Counter(self, i): """Return the i'th counter.""" name_offset = self.counter_names_offset + i * self._COUNTER_NAME_SIZE value_offset = self.counter_values_offset + i * self.max_threads * 4 return ChromeCounter(self.data, name_offset, value_offset) def Main(data_file, name_filter): """Run the stats counter. Args: data_file: The counters file to monitor. name_filter: The regexp filter to apply to counter names. """ StatsViewer(data_file, name_filter).Run() if __name__ == "__main__": parser = optparse.OptionParser("usage: %prog [--filter=re] " "<stats data>|<test_shell pid>") parser.add_option("--filter", default=".*", help=("regexp filter for counter names " "[default: %default]")) (options, args) = parser.parse_args() if len(args) != 1: parser.print_help() sys.exit(1) Main(args[0], re.compile(options.filter))
gpl-2.0
Jeremy-WEI/python-mode
pymode/libs2/rope/contrib/autoimport.py
90
8103
import re from rope.base import (exceptions, pynames, resourceobserver, taskhandle, pyobjects, builtins, resources) from rope.refactor import importutils class AutoImport(object): """A class for finding the module that provides a name This class maintains a cache of global names in python modules. Note that this cache is not accurate and might be out of date. """ def __init__(self, project, observe=True, underlined=False): """Construct an AutoImport object If `observe` is `True`, listen for project changes and update the cache. If `underlined` is `True`, underlined names are cached, too. """ self.project = project self.underlined = underlined self.names = project.data_files.read_data('globalnames') if self.names is None: self.names = {} project.data_files.add_write_hook(self._write) # XXX: using a filtered observer observer = resourceobserver.ResourceObserver( changed=self._changed, moved=self._moved, removed=self._removed) if observe: project.add_observer(observer) def import_assist(self, starting): """Return a list of ``(name, module)`` tuples This function tries to find modules that have a global name that starts with `starting`. """ # XXX: breaking if gave up! use generators result = [] for module in self.names: for global_name in self.names[module]: if global_name.startswith(starting): result.append((global_name, module)) return result def get_modules(self, name): """Return the list of modules that have global `name`""" result = [] for module in self.names: if name in self.names[module]: result.append(module) return result def get_all_names(self): """Return the list of all cached global names""" result = set() for module in self.names: result.update(set(self.names[module])) return result def get_name_locations(self, name): """Return a list of ``(resource, lineno)`` tuples""" result = [] pycore = self.project.pycore for module in self.names: if name in self.names[module]: try: pymodule = pycore.get_module(module) if name in pymodule: pyname = pymodule[name] module, lineno = pyname.get_definition_location() if module is not None: resource = module.get_module().get_resource() if resource is not None and lineno is not None: result.append((resource, lineno)) except exceptions.ModuleNotFoundError: pass return result def generate_cache(self, resources=None, underlined=None, task_handle=taskhandle.NullTaskHandle()): """Generate global name cache for project files If `resources` is a list of `rope.base.resource.File`\s, only those files are searched; otherwise all python modules in the project are cached. """ if resources is None: resources = self.project.pycore.get_python_files() job_set = task_handle.create_jobset( 'Generatig autoimport cache', len(resources)) for file in resources: job_set.started_job('Working on <%s>' % file.path) self.update_resource(file, underlined) job_set.finished_job() def generate_modules_cache(self, modules, underlined=None, task_handle=taskhandle.NullTaskHandle()): """Generate global name cache for modules listed in `modules`""" job_set = task_handle.create_jobset( 'Generatig autoimport cache for modules', len(modules)) for modname in modules: job_set.started_job('Working on <%s>' % modname) if modname.endswith('.*'): mod = self.project.pycore.find_module(modname[:-2]) if mod: for sub in submodules(mod): self.update_resource(sub, underlined) else: self.update_module(modname, underlined) job_set.finished_job() def clear_cache(self): """Clear all entries in global-name cache It might be a good idea to use this function before regenerating global names. """ self.names.clear() def find_insertion_line(self, code): """Guess at what line the new import should be inserted""" match = re.search(r'^(def|class)\s+', code) if match is not None: code = code[:match.start()] try: pymodule = self.project.pycore.get_string_module(code) except exceptions.ModuleSyntaxError: return 1 testmodname = '__rope_testmodule_rope' importinfo = importutils.NormalImport(((testmodname, None),)) module_imports = importutils.get_module_imports( self.project.pycore, pymodule) module_imports.add_import(importinfo) code = module_imports.get_changed_source() offset = code.index(testmodname) lineno = code.count('\n', 0, offset) + 1 return lineno def update_resource(self, resource, underlined=None): """Update the cache for global names in `resource`""" try: pymodule = self.project.pycore.resource_to_pyobject(resource) modname = self._module_name(resource) self._add_names(pymodule, modname, underlined) except exceptions.ModuleSyntaxError: pass def update_module(self, modname, underlined=None): """Update the cache for global names in `modname` module `modname` is the name of a module. """ try: pymodule = self.project.pycore.get_module(modname) self._add_names(pymodule, modname, underlined) except exceptions.ModuleNotFoundError: pass def _module_name(self, resource): return self.project.pycore.modname(resource) def _add_names(self, pymodule, modname, underlined): if underlined is None: underlined = self.underlined globals = [] if isinstance(pymodule, pyobjects.PyDefinedObject): attributes = pymodule._get_structural_attributes() else: attributes = pymodule.get_attributes() for name, pyname in attributes.items(): if not underlined and name.startswith('_'): continue if isinstance(pyname, (pynames.AssignedName, pynames.DefinedName)): globals.append(name) if isinstance(pymodule, builtins.BuiltinModule): globals.append(name) self.names[modname] = globals def _write(self): self.project.data_files.write_data('globalnames', self.names) def _changed(self, resource): if not resource.is_folder(): self.update_resource(resource) def _moved(self, resource, newresource): if not resource.is_folder(): modname = self._module_name(resource) if modname in self.names: del self.names[modname] self.update_resource(newresource) def _removed(self, resource): if not resource.is_folder(): modname = self._module_name(resource) if modname in self.names: del self.names[modname] def submodules(mod): if isinstance(mod, resources.File): if mod.name.endswith('.py') and mod.name != '__init__.py': return set([mod]) return set() if not mod.has_child('__init__.py'): return set() result = set([mod]) for child in mod.get_children(): result |= submodules(child) return result
lgpl-3.0
abhishekgahlot/scikit-learn
examples/cluster/plot_segmentation_toy.py
258
3336
""" =========================================== Spectral clustering for image segmentation =========================================== In this example, an image with connected circles is generated and spectral clustering is used to separate the circles. In these settings, the :ref:`spectral_clustering` approach solves the problem know as 'normalized graph cuts': the image is seen as a graph of connected voxels, and the spectral clustering algorithm amounts to choosing graph cuts defining regions while minimizing the ratio of the gradient along the cut, and the volume of the region. As the algorithm tries to balance the volume (ie balance the region sizes), if we take circles with different sizes, the segmentation fails. In addition, as there is no useful information in the intensity of the image, or its gradient, we choose to perform the spectral clustering on a graph that is only weakly informed by the gradient. This is close to performing a Voronoi partition of the graph. In addition, we use the mask of the objects to restrict the graph to the outline of the objects. In this example, we are interested in separating the objects one from the other, and not from the background. """ print(__doc__) # Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org> # Gael Varoquaux <gael.varoquaux@normalesup.org> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.feature_extraction import image from sklearn.cluster import spectral_clustering ############################################################################### l = 100 x, y = np.indices((l, l)) center1 = (28, 24) center2 = (40, 50) center3 = (67, 58) center4 = (24, 70) radius1, radius2, radius3, radius4 = 16, 14, 15, 14 circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2 circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2 circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2 circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2 ############################################################################### # 4 circles img = circle1 + circle2 + circle3 + circle4 mask = img.astype(bool) img = img.astype(float) img += 1 + 0.2 * np.random.randn(*img.shape) # Convert the image into a graph with the value of the gradient on the # edges. graph = image.img_to_graph(img, mask=mask) # Take a decreasing function of the gradient: we take it weakly # dependent from the gradient the segmentation is close to a voronoi graph.data = np.exp(-graph.data / graph.data.std()) # Force the solver to be arpack, since amg is numerically # unstable on this example labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack') label_im = -np.ones(mask.shape) label_im[mask] = labels plt.matshow(img) plt.matshow(label_im) ############################################################################### # 2 circles img = circle1 + circle2 mask = img.astype(bool) img = img.astype(float) img += 1 + 0.2 * np.random.randn(*img.shape) graph = image.img_to_graph(img, mask=mask) graph.data = np.exp(-graph.data / graph.data.std()) labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack') label_im = -np.ones(mask.shape) label_im[mask] = labels plt.matshow(img) plt.matshow(label_im) plt.show()
bsd-3-clause
KamillaKhabibrakhmanova/fish
node_modules/ionic/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/easy_xml_test.py
2698
3270
#!/usr/bin/env python # Copyright (c) 2011 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Unit tests for the easy_xml.py file. """ import gyp.easy_xml as easy_xml import unittest import StringIO class TestSequenceFunctions(unittest.TestCase): def setUp(self): self.stderr = StringIO.StringIO() def test_EasyXml_simple(self): self.assertEqual( easy_xml.XmlToString(['test']), '<?xml version="1.0" encoding="utf-8"?><test/>') self.assertEqual( easy_xml.XmlToString(['test'], encoding='Windows-1252'), '<?xml version="1.0" encoding="Windows-1252"?><test/>') def test_EasyXml_simple_with_attributes(self): self.assertEqual( easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]), '<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>') def test_EasyXml_escaping(self): original = '<test>\'"\r&\nfoo' converted = '&lt;test&gt;\'&quot;&#xD;&amp;&#xA;foo' converted_apos = converted.replace("'", '&apos;') self.assertEqual( easy_xml.XmlToString(['test3', {'a': original}, original]), '<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' % (converted, converted_apos)) def test_EasyXml_pretty(self): self.assertEqual( easy_xml.XmlToString( ['test3', ['GrandParent', ['Parent1', ['Child'] ], ['Parent2'] ] ], pretty=True), '<?xml version="1.0" encoding="utf-8"?>\n' '<test3>\n' ' <GrandParent>\n' ' <Parent1>\n' ' <Child/>\n' ' </Parent1>\n' ' <Parent2/>\n' ' </GrandParent>\n' '</test3>\n') def test_EasyXml_complex(self): # We want to create: target = ( '<?xml version="1.0" encoding="utf-8"?>' '<Project>' '<PropertyGroup Label="Globals">' '<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>' '<Keyword>Win32Proj</Keyword>' '<RootNamespace>automated_ui_tests</RootNamespace>' '</PropertyGroup>' '<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>' '<PropertyGroup ' 'Condition="\'$(Configuration)|$(Platform)\'==' '\'Debug|Win32\'" Label="Configuration">' '<ConfigurationType>Application</ConfigurationType>' '<CharacterSet>Unicode</CharacterSet>' '</PropertyGroup>' '</Project>') xml = easy_xml.XmlToString( ['Project', ['PropertyGroup', {'Label': 'Globals'}, ['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'], ['Keyword', 'Win32Proj'], ['RootNamespace', 'automated_ui_tests'] ], ['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}], ['PropertyGroup', {'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'", 'Label': 'Configuration'}, ['ConfigurationType', 'Application'], ['CharacterSet', 'Unicode'] ] ]) self.assertEqual(xml, target) if __name__ == '__main__': unittest.main()
mit
Vassy/odoo
addons/lunch/report/report_lunch_order.py
39
2778
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import tools from openerp.osv import fields,osv class report_lunch_order(osv.osv): _name = "report.lunch.order.line" _description = "Lunch Orders Statistics" _auto = False _rec_name = 'date' _columns = { 'date': fields.date('Date Order', readonly=True, select=True), 'year': fields.char('Year', size=4, readonly=True), 'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'), ('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')], 'Month',readonly=True), 'day': fields.char('Day', size=128, readonly=True), 'user_id': fields.many2one('res.users', 'User Name'), 'price_total':fields.float('Total Price', readonly=True), 'note' : fields.text('Note',size=256,readonly=True), } _order = 'date desc' def init(self, cr): tools.drop_view_if_exists(cr, 'report_lunch_order_line') cr.execute(""" create or replace view report_lunch_order_line as ( select min(lo.id) as id, lo.user_id as user_id, lo.date as date, to_char(lo.date, 'YYYY') as year, to_char(lo.date, 'MM') as month, to_char(lo.date, 'YYYY-MM-DD') as day, lo.note as note, sum(lp.price) as price_total from lunch_order_line as lo left join lunch_product as lp on (lo.product_id = lp.id) group by lo.date,lo.user_id,lo.note ) """) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
rmcgibbo/scipy
scipy/optimize/tests/test_linprog.py
100
14613
""" Unit test for Linear Programming via Simplex Algorithm. """ from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import (assert_, assert_array_almost_equal, assert_allclose, assert_almost_equal, assert_raises, assert_equal, run_module_suite) from scipy.optimize import linprog, OptimizeWarning from scipy._lib._numpy_compat import _assert_warns def lpgen_2d(m,n): """ -> A b c LP test: m*n vars, m+n constraints row sums == n/m, col sums == 1 https://gist.github.com/denis-bz/8647461 """ np.random.seed(0) c = - np.random.exponential(size=(m,n)) Arow = np.zeros((m,m*n)) brow = np.zeros(m) for j in range(m): j1 = j + 1 Arow[j,j*n:j1*n] = 1 brow[j] = n/m Acol = np.zeros((n,m*n)) bcol = np.zeros(n) for j in range(n): j1 = j + 1 Acol[j,j::n] = 1 bcol[j] = 1 A = np.vstack((Arow,Acol)) b = np.hstack((brow,bcol)) return A, b, c.ravel() def _assert_infeasible(res): # res: linprog result object assert_(not res.success, "incorrectly reported success") assert_equal(res.status, 2, "failed to report infeasible status") def _assert_unbounded(res): # res: linprog result object assert_(not res.success, "incorrectly reported success") assert_equal(res.status, 3, "failed to report unbounded status") def _assert_success(res, desired_fun=None, desired_x=None): # res: linprog result object # desired_fun: desired objective function value or None # desired_x: desired solution or None assert_(res.success) assert_equal(res.status, 0) if desired_fun is not None: assert_allclose(res.fun, desired_fun, err_msg="converged to an unexpected objective value") if desired_x is not None: assert_allclose(res.x, desired_x, err_msg="converged to an unexpected solution") def test_aliasing_b_ub(): c = np.array([1.0]) A_ub = np.array([[1.0]]) b_ub_orig = np.array([3.0]) b_ub = b_ub_orig.copy() bounds = (-4.0, np.inf) res = linprog(c, A_ub=A_ub, b_ub=b_ub, bounds=bounds) _assert_success(res, desired_fun=-4, desired_x=[-4]) assert_allclose(b_ub_orig, b_ub) def test_aliasing_b_eq(): c = np.array([1.0]) A_eq = np.array([[1.0]]) b_eq_orig = np.array([3.0]) b_eq = b_eq_orig.copy() bounds = (-4.0, np.inf) res = linprog(c, A_eq=A_eq, b_eq=b_eq, bounds=bounds) _assert_success(res, desired_fun=3, desired_x=[3]) assert_allclose(b_eq_orig, b_eq) def test_bounds_second_form_unbounded_below(): c = np.array([1.0]) A_eq = np.array([[1.0]]) b_eq = np.array([3.0]) bounds = (None, 10.0) res = linprog(c, A_eq=A_eq, b_eq=b_eq, bounds=bounds) _assert_success(res, desired_fun=3, desired_x=[3]) def test_bounds_second_form_unbounded_above(): c = np.array([1.0]) A_eq = np.array([[1.0]]) b_eq = np.array([3.0]) bounds = (1.0, None) res = linprog(c, A_eq=A_eq, b_eq=b_eq, bounds=bounds) _assert_success(res, desired_fun=3, desired_x=[3]) def test_non_ndarray_args(): c = [1.0] A_ub = [[1.0]] b_ub = [3.0] A_eq = [[1.0]] b_eq = [2.0] bounds = (-1.0, 10.0) res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=bounds) _assert_success(res, desired_fun=2, desired_x=[2]) def test_linprog_upper_bound_constraints(): # Maximize a linear function subject to only linear upper bound constraints. # http://www.dam.brown.edu/people/huiwang/classes/am121/Archive/simplex_121_c.pdf c = np.array([3,2])*-1 # maximize A_ub = [[2,1], [1,1], [1,0]] b_ub = [10,8,4] res = (linprog(c,A_ub=A_ub,b_ub=b_ub)) _assert_success(res, desired_fun=-18, desired_x=[2, 6]) def test_linprog_mixed_constraints(): # Minimize linear function subject to non-negative variables. # http://www.statslab.cam.ac.uk/~ff271/teaching/opt/notes/notes8.pdf c = [6,3] A_ub = [[0, 3], [-1,-1], [-2, 1]] b_ub = [2,-1,-1] res = linprog(c,A_ub=A_ub,b_ub=b_ub) _assert_success(res, desired_fun=5, desired_x=[2/3, 1/3]) def test_linprog_cyclic_recovery(): # Test linprogs recovery from cycling using the Klee-Minty problem # Klee-Minty http://www.math.ubc.ca/~israel/m340/kleemin3.pdf c = np.array([100,10,1])*-1 # maximize A_ub = [[1, 0, 0], [20, 1, 0], [200,20, 1]] b_ub = [1,100,10000] res = linprog(c,A_ub=A_ub,b_ub=b_ub) _assert_success(res, desired_x=[0, 0, 10000]) def test_linprog_cyclic_bland(): # Test the effect of Bland's rule on a cycling problem c = np.array([-10, 57, 9, 24.]) A_ub = np.array([[0.5, -5.5, -2.5, 9], [0.5, -1.5, -0.5, 1], [1, 0, 0, 0]]) b_ub = [0, 0, 1] res = linprog(c, A_ub=A_ub, b_ub=b_ub, options=dict(maxiter=100)) assert_(not res.success) res = linprog(c, A_ub=A_ub, b_ub=b_ub, options=dict(maxiter=100, bland=True,)) _assert_success(res, desired_x=[1, 0, 1, 0]) def test_linprog_unbounded(): # Test linprog response to an unbounded problem c = np.array([1,1])*-1 # maximize A_ub = [[-1,1], [-1,-1]] b_ub = [-1,-2] res = linprog(c,A_ub=A_ub,b_ub=b_ub) _assert_unbounded(res) def test_linprog_infeasible(): # Test linrpog response to an infeasible problem c = [-1,-1] A_ub = [[1,0], [0,1], [-1,-1]] b_ub = [2,2,-5] res = linprog(c,A_ub=A_ub,b_ub=b_ub) _assert_infeasible(res) def test_nontrivial_problem(): # Test linprog for a problem involving all constraint types, # negative resource limits, and rounding issues. c = [-1,8,4,-6] A_ub = [[-7,-7,6,9], [1,-1,-3,0], [10,-10,-7,7], [6,-1,3,4]] b_ub = [-3,6,-6,6] A_eq = [[-10,1,1,-8]] b_eq = [-4] res = linprog(c,A_ub=A_ub,b_ub=b_ub,A_eq=A_eq,b_eq=b_eq) _assert_success(res, desired_fun=7083/1391, desired_x=[101/1391,1462/1391,0,752/1391]) def test_negative_variable(): # Test linprog with a problem with one unbounded variable and # another with a negative lower bound. c = np.array([-1,4])*-1 # maximize A_ub = np.array([[-3,1], [1, 2]], dtype=np.float64) A_ub_orig = A_ub.copy() b_ub = [6,4] x0_bounds = (-np.inf,np.inf) x1_bounds = (-3,np.inf) res = linprog(c,A_ub=A_ub,b_ub=b_ub,bounds=(x0_bounds,x1_bounds)) assert_equal(A_ub, A_ub_orig) # user input not overwritten _assert_success(res, desired_fun=-80/7, desired_x=[-8/7, 18/7]) def test_large_problem(): # Test linprog simplex with a rather large problem (400 variables, # 40 constraints) generated by https://gist.github.com/denis-bz/8647461 A,b,c = lpgen_2d(20,20) res = linprog(c,A_ub=A,b_ub=b) _assert_success(res, desired_fun=-64.049494229) def test_network_flow(): # A network flow problem with supply and demand at nodes # and with costs along directed edges. # https://www.princeton.edu/~rvdb/542/lectures/lec10.pdf c = [2, 4, 9, 11, 4, 3, 8, 7, 0, 15, 16, 18] n, p = -1, 1 A_eq = [ [n, n, p, 0, p, 0, 0, 0, 0, p, 0, 0], [p, 0, 0, p, 0, p, 0, 0, 0, 0, 0, 0], [0, 0, n, n, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, p, p, 0, 0, p, 0], [0, 0, 0, 0, n, n, n, 0, p, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, n, n, 0, 0, p], [0, 0, 0, 0, 0, 0, 0, 0, 0, n, n, n]] b_eq = [0, 19, -16, 33, 0, 0, -36] res = linprog(c=c, A_eq=A_eq, b_eq=b_eq) _assert_success(res, desired_fun=755) def test_network_flow_limited_capacity(): # A network flow problem with supply and demand at nodes # and with costs and capacities along directed edges. # http://blog.sommer-forst.de/2013/04/10/ cost = [2, 2, 1, 3, 1] bounds = [ [0, 4], [0, 2], [0, 2], [0, 3], [0, 5]] n, p = -1, 1 A_eq = [ [n, n, 0, 0, 0], [p, 0, n, n, 0], [0, p, p, 0, n], [0, 0, 0, p, p]] b_eq = [-4, 0, 0, 4] # Including the callback here ensures the solution can be # calculated correctly, even when phase 1 terminated # with some of the artificial variables as pivots # (i.e. basis[:m] contains elements corresponding to # the artificial variables) res = linprog(c=cost, A_eq=A_eq, b_eq=b_eq, bounds=bounds, callback=lambda x, **kwargs: None) _assert_success(res, desired_fun=14) def test_simplex_algorithm_wikipedia_example(): # http://en.wikipedia.org/wiki/Simplex_algorithm#Example Z = [-2, -3, -4] A_ub = [ [3, 2, 1], [2, 5, 3]] b_ub = [10, 15] res = linprog(c=Z, A_ub=A_ub, b_ub=b_ub) _assert_success(res, desired_fun=-20) def test_enzo_example(): # http://projects.scipy.org/scipy/attachment/ticket/1252/lp2.py # # Translated from Octave code at: # http://www.ecs.shimane-u.ac.jp/~kyoshida/lpeng.htm # and placed under MIT licence by Enzo Michelangeli # with permission explicitly granted by the original author, # Prof. Kazunobu Yoshida c = [4, 8, 3, 0, 0, 0] A_eq = [ [2, 5, 3, -1, 0, 0], [3, 2.5, 8, 0, -1, 0], [8, 10, 4, 0, 0, -1]] b_eq = [185, 155, 600] res = linprog(c=c, A_eq=A_eq, b_eq=b_eq) _assert_success(res, desired_fun=317.5, desired_x=[66.25, 0, 17.5, 0, 183.75, 0]) def test_enzo_example_b(): # rescued from https://github.com/scipy/scipy/pull/218 c = [2.8, 6.3, 10.8, -2.8, -6.3, -10.8] A_eq = [[-1, -1, -1, 0, 0, 0], [0, 0, 0, 1, 1, 1], [1, 0, 0, 1, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 1, 0, 0, 1]] b_eq = [-0.5, 0.4, 0.3, 0.3, 0.3] # Including the callback here ensures the solution can be # calculated correctly. res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, callback=lambda x, **kwargs: None) _assert_success(res, desired_fun=-1.77, desired_x=[0.3, 0.2, 0.0, 0.0, 0.1, 0.3]) def test_enzo_example_c_with_degeneracy(): # rescued from https://github.com/scipy/scipy/pull/218 m = 20 c = -np.ones(m) tmp = 2*np.pi*np.arange(1, m+1)/(m+1) A_eq = np.vstack((np.cos(tmp)-1, np.sin(tmp))) b_eq = [0, 0] res = linprog(c=c, A_eq=A_eq, b_eq=b_eq) _assert_success(res, desired_fun=0, desired_x=np.zeros(m)) def test_enzo_example_c_with_unboundedness(): # rescued from https://github.com/scipy/scipy/pull/218 m = 50 c = -np.ones(m) tmp = 2*np.pi*np.arange(m)/(m+1) A_eq = np.vstack((np.cos(tmp)-1, np.sin(tmp))) b_eq = [0, 0] res = linprog(c=c, A_eq=A_eq, b_eq=b_eq) _assert_unbounded(res) def test_enzo_example_c_with_infeasibility(): # rescued from https://github.com/scipy/scipy/pull/218 m = 50 c = -np.ones(m) tmp = 2*np.pi*np.arange(m)/(m+1) A_eq = np.vstack((np.cos(tmp)-1, np.sin(tmp))) b_eq = [1, 1] res = linprog(c=c, A_eq=A_eq, b_eq=b_eq) _assert_infeasible(res) def test_callback(): # Check that callback is as advertised callback_complete = [False] last_xk = [] def cb(xk, **kwargs): kwargs.pop('tableau') assert_(isinstance(kwargs.pop('phase'), int)) assert_(isinstance(kwargs.pop('nit'), int)) i, j = kwargs.pop('pivot') assert_(np.isscalar(i)) assert_(np.isscalar(j)) basis = kwargs.pop('basis') assert_(isinstance(basis, np.ndarray)) assert_(basis.dtype == np.int_) complete = kwargs.pop('complete') assert_(isinstance(complete, bool)) if complete: last_xk.append(xk) callback_complete[0] = True else: assert_(not callback_complete[0]) # no more kwargs assert_(not kwargs) c = np.array([-3,-2]) A_ub = [[2,1], [1,1], [1,0]] b_ub = [10,8,4] res = linprog(c,A_ub=A_ub,b_ub=b_ub, callback=cb) assert_(callback_complete[0]) assert_allclose(last_xk[0], res.x) def test_unknown_options_or_solver(): c = np.array([-3,-2]) A_ub = [[2,1], [1,1], [1,0]] b_ub = [10,8,4] _assert_warns(OptimizeWarning, linprog, c, A_ub=A_ub, b_ub=b_ub, options=dict(spam='42')) assert_raises(ValueError, linprog, c, A_ub=A_ub, b_ub=b_ub, method='ekki-ekki-ekki') def test_no_constraints(): res = linprog([-1, -2]) assert_equal(res.x, [0, 0]) _assert_unbounded(res) def test_simple_bounds(): res = linprog([1, 2], bounds=(1, 2)) _assert_success(res, desired_x=[1, 1]) res = linprog([1, 2], bounds=[(1, 2), (1, 2)]) _assert_success(res, desired_x=[1, 1]) def test_invalid_inputs(): for bad_bound in [[(5, 0), (1, 2), (3, 4)], [(1, 2), (3, 4)], [(1, 2), (3, 4), (3, 4, 5)], [(1, 2), (np.inf, np.inf), (3, 4)], [(1, 2), (-np.inf, -np.inf), (3, 4)], ]: assert_raises(ValueError, linprog, [1, 2, 3], bounds=bad_bound) assert_raises(ValueError, linprog, [1,2], A_ub=[[1,2]], b_ub=[1,2]) assert_raises(ValueError, linprog, [1,2], A_ub=[[1]], b_ub=[1]) assert_raises(ValueError, linprog, [1,2], A_eq=[[1,2]], b_eq=[1,2]) assert_raises(ValueError, linprog, [1,2], A_eq=[[1]], b_eq=[1]) assert_raises(ValueError, linprog, [1,2], A_eq=[1], b_eq=1) assert_raises(ValueError, linprog, [1,2], A_ub=np.zeros((1,1,3)), b_eq=1) def test_basic_artificial_vars(): # Test if linprog succeeds when at the end of Phase 1 some artificial # variables remain basic, and the row in T corresponding to the # artificial variables is not all zero. c = np.array([-0.1, -0.07, 0.004, 0.004, 0.004, 0.004]) A_ub = np.array([[1.0, 0, 0, 0, 0, 0], [-1.0, 0, 0, 0, 0, 0], [0, -1.0, 0, 0, 0, 0], [0, 1.0, 0, 0, 0, 0], [1.0, 1.0, 0, 0, 0, 0]]) b_ub = np.array([3.0, 3.0, 3.0, 3.0, 20.0]) A_eq = np.array([[1.0, 0, -1, 1, -1, 1], [0, -1.0, -1, 1, -1, 1]]) b_eq = np.array([0, 0]) res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, callback=lambda x, **kwargs: None) _assert_success(res, desired_fun=0, desired_x=np.zeros_like(c)) if __name__ == '__main__': run_module_suite()
bsd-3-clause
augmify/omim
android/UnitTests/runtests.py
53
5780
#!/usr/bin/env python """ This script is for running the Android tests on android devices from the build server. It may take as a parameter the build number (e.g. the build number from Jenkins). The build number may also be a string. The script removes all the apps whose ids contain "mapswithme", and cleans up the device logs before running the tests, so make sure you don't have important logs on devices before you connect them to the server. After the test, the device logs are filtered and saved as <build number>_<device_id>.log in the current directory. """ from __future__ import print_function import re import subprocess import time import sys import threading import os from os import listdir from os.path import isfile, join # this list is for removing possible MapsWithMe data folders, not used yet, but will be in the future # mapswithme_paths=["storage/sdcard0/MapsWithMe", "mnt/sdcard/MapsWithMe", # "/mnt/shell/emulated/0/MapsWithMe", "/mnt/sdcard/MapsWithMe"] ACTIVITY_NAME = "com.mapswithme.maps.unittests.debug/com.mapswithme.maps.unittests.AllTestsActivity" APP_ID = "com.mapswithme.maps.unittests.debug" APK_LOCATION = "build/outputs/apk/UnitTests-debug.apk" MAX_TEST_RUN_TIME = 3600 # seconds TEST_WAIT_PAUSE = 2 # seconds SERIAL_PATTERN = re.compile("^[\da-f]{4,}") build_number = "0" def exec_shell(command): # print "> " + command s = subprocess.check_output(command.split()) return s.split('\r\n') def adb(command, serial=None, shell=False): shell_cmd = list(['adb']) if serial is not None: shell_cmd += ['-s', serial] if shell: shell_cmd += ['shell'] shell_cmd += [command] return exec_shell(' '.join(shell_cmd).replace(" ", " ")) def uninstall(serial, package): adb("pm uninstall -k {}".format(package), serial=serial, shell=True) def install(serial, path): command = " -s {serial} install {path}".format(serial=serial, path=path) adb(command) def connected_devices(): adb_devices = adb("devices") filter_fn = lambda device : SERIAL_PATTERN.match(device) map_fn = lambda d : d.split("\t")[0] return [ id for id in process_output(adb_devices, map_fn, filter_fn)] def process_output(output, fn, filter_fn): """ adb returns an array of strings, at least one of them contains several lines of output. To all probability, only the 0th string in the array contains any meaningful output, but there is some chance that others might as well, so to be on the safe side, we process all of them. """ for full_string in output: lines = full_string.split("\n") for line in lines: if not filter_fn(line): continue yield fn(line) def packages(serial): packs = adb("pm list packages | grep mapswithme", serial=serial, shell=True) filter_fn = lambda x : x.startswith("package:") ret_fn = lambda x : x.split(":")[1] return process_output(packs, ret_fn, filter_fn) def app_is_running(serial): # if the app is not running, we get just an empty line, otherwise we get # the app info line + the empty line. This lets us assume that the app is not running. command = "ps | grep {}".format(APP_ID) result = adb(command, serial=serial, shell=True) return len(result) > 1 def run_app(serial): command = "am start -n {}".format(ACTIVITY_NAME) adb(command, serial=serial, shell=True) def save_log(serial): command = "logcat -d | grep MapsMeTest" device_log = adb(command, serial=serial) lines = process_output(device_log, lambda x: x, lambda x: True) write_lines_to_file(lines, serial) def write_lines_to_file(lines, serial): with open("{build_number}_{serial}.log".format(build_number=build_number, serial=serial), "w") as log_file: for line in lines: log_file.write(line + "\n") def clear_log(serial): command = "-s {serial} logcat -c".format(serial=serial) adb(command) def device_run_loop(serial): start = time.time() clear_log(serial) run_app(serial) elapsed_time = 0 while elapsed_time < MAX_TEST_RUN_TIME: if not app_is_running(serial): break time.sleep(TEST_WAIT_PAUSE) elapsed_time += TEST_WAIT_PAUSE if elapsed_time >= MAX_TEST_RUN_TIME: print("The tests on {serial} took too long".format(serial=serial)) save_log(serial) end = time.time() print("Ran tests on {serial} in {duration} sec.".format(serial=serial, duration=(end - start))) def clean_device(serial): start = time.time() for pack in packages(serial): uninstall(serial, pack) install(serial, APK_LOCATION) end = time.time() print("Cleaned up {serial} in {duration} sec.".format(serial=serial, duration=(end - start))) def process_devices(device_ids, fn): run_loop_threads = [] for serial in device_ids: thread = threading.Thread(target=fn, args=(serial,)) run_loop_threads.append(thread) thread.start() for thread in run_loop_threads: thread.join() def main(): logs = [ f for f in listdir(".") if f.endswith(".log") and isfile(join(".",f)) ] for log in logs: os.remove(log) if len(sys.argv) > 1: global build_number build_number = sys.argv[1] device_ids = connected_devices() print("Running on devices:") for device_id in device_ids: print(device_id) print("\nCleaning up devices and installing test apk...") process_devices(device_ids, clean_device) print("\nRunning the test suites...") process_devices(device_ids, device_run_loop) print("\nTests finished running on all devices") if __name__ == "__main__": main()
apache-2.0
sanketloke/scikit-learn
sklearn/multioutput.py
17
9084
""" This module implements multioutput regression and classification. The estimators provided in this module are meta-estimators: they require a base estimator to be provided in their constructor. The meta-estimator extends single output estimators to multioutput estimators. """ # Author: Tim Head <betatim@gmail.com> # Author: Hugo Bowne-Anderson <hugobowne@gmail.com> # Author: Chris Rivera <chris.richard.rivera@gmail.com> # Author: Michael Williamson # Author: James Ashton Nichols <james.ashton.nichols@gmail.com> # # License: BSD 3 clause import numpy as np from abc import ABCMeta, abstractmethod from .base import BaseEstimator, clone, MetaEstimatorMixin from .base import RegressorMixin, ClassifierMixin from .utils import check_array, check_random_state, check_X_y from .utils.fixes import parallel_helper from .utils.validation import check_is_fitted, has_fit_parameter from .externals.joblib import Parallel, delayed from .externals import six __all__ = ["MultiOutputRegressor", "MultiOutputClassifier"] def _fit_estimator(estimator, X, y, sample_weight=None): estimator = clone(estimator) if sample_weight is not None: estimator.fit(X, y, sample_weight=sample_weight) else: estimator.fit(X, y) return estimator class MultiOutputEstimator(six.with_metaclass(ABCMeta, BaseEstimator)): def __init__(self, estimator, n_jobs=1): self.estimator = estimator self.n_jobs = n_jobs def fit(self, X, y, sample_weight=None): """ Fit the model to data. Fit a separate model for each output variable. Parameters ---------- X : (sparse) array-like, shape (n_samples, n_features) Data. y : (sparse) array-like, shape (n_samples, n_outputs) Multi-output targets. An indicator matrix turns on multilabel estimation. sample_weight : array-like, shape = (n_samples) or None Sample weights. If None, then samples are equally weighted. Only supported if the underlying regressor supports sample weights. Returns ------- self """ if not hasattr(self.estimator, "fit"): raise ValueError("The base estimator should implement a fit method") X, y = check_X_y(X, y, multi_output=True, accept_sparse=True) if y.ndim == 1: raise ValueError("y must have at least two dimensions for " "multi target regression but has only one.") if (sample_weight is not None and not has_fit_parameter(self.estimator, 'sample_weight')): raise ValueError("Underlying regressor does not support" " sample weights.") self.estimators_ = Parallel(n_jobs=self.n_jobs)(delayed(_fit_estimator)( self.estimator, X, y[:, i], sample_weight) for i in range(y.shape[1])) return self def predict(self, X): """Predict multi-output variable using a model trained for each target variable. Parameters ---------- X : (sparse) array-like, shape (n_samples, n_features) Data. Returns ------- y : (sparse) array-like, shape (n_samples, n_outputs) Multi-output targets predicted across multiple predictors. Note: Separate models are generated for each predictor. """ check_is_fitted(self, 'estimators_') if not hasattr(self.estimator, "predict"): raise ValueError("The base estimator should implement a predict method") X = check_array(X, accept_sparse=True) y = Parallel(n_jobs=self.n_jobs)(delayed(parallel_helper)(e, 'predict', X) for e in self.estimators_) return np.asarray(y).T class MultiOutputRegressor(MultiOutputEstimator, RegressorMixin): """Multi target regression This strategy consists of fitting one regressor per target. This is a simple strategy for extending regressors that do not natively support multi-target regression. Parameters ---------- estimator : estimator object An estimator object implementing `fit` and `predict`. n_jobs : int, optional, default=1 The number of jobs to run in parallel for `fit`. If -1, then the number of jobs is set to the number of cores. When individual estimators are fast to train or predict using `n_jobs>1` can result in slower performance due to the overhead of spawning processes. """ def __init__(self, estimator, n_jobs=1): super(MultiOutputRegressor, self).__init__(estimator, n_jobs) def score(self, X, y, sample_weight=None): """Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the regression sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual sum of squares ((y_true - y_true.mean()) ** 2).sum(). Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Note ---- R^2 is calculated by weighting all the targets equally using `multioutput='uniform_average'`. Parameters ---------- X : array-like, shape (n_samples, n_features) Test samples. y : array-like, shape (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape [n_samples], optional Sample weights. Returns ------- score : float R^2 of self.predict(X) wrt. y. """ # XXX remove in 0.19 when r2_score default for multioutput changes from .metrics import r2_score return r2_score(y, self.predict(X), sample_weight=sample_weight, multioutput='uniform_average') class MultiOutputClassifier(MultiOutputEstimator, ClassifierMixin): """Multi target classification This strategy consists of fitting one classifier per target. This is a simple strategy for extending classifiers that do not natively support multi-target classification Parameters ---------- estimator : estimator object An estimator object implementing `fit`, `score` and `predict_proba`. n_jobs : int, optional, default=1 The number of jobs to use for the computation. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. The number of jobs to use for the computation. It does each target variable in y in parallel. Attributes ---------- estimators_ : list of `n_output` estimators Estimators used for predictions. """ def __init__(self, estimator, n_jobs=1): super(MultiOutputClassifier, self).__init__(estimator, n_jobs) def predict_proba(self, X): """Probability estimates. Returns prediction probabilites for each class of each output. Parameters ---------- X : array-like, shape (n_samples, n_features) Data Returns ------- T : (sparse) array-like, shape = (n_samples, n_classes, n_outputs) The class probabilities of the samples for each of the outputs """ check_is_fitted(self, 'estimators_') if not hasattr(self.estimator, "predict_proba"): raise ValueError("The base estimator should implement" "predict_proba method") results = np.dstack([estimator.predict_proba(X) for estimator in self.estimators_]) return results def score(self, X, y): """"Returns the mean accuracy on the given test data and labels. Parameters ---------- X : array-like, shape [n_samples, n_features] Test samples y : array-like, shape [n_samples, n_outputs] True values for X Returns ------- scores : float accuracy_score of self.predict(X) versus y """ check_is_fitted(self, 'estimators_') n_outputs_ = len(self.estimators_) if y.ndim == 1: raise ValueError("y must have at least two dimensions for " "multi target classification but has only one") if y.shape[1] != n_outputs_: raise ValueError("The number of outputs of Y for fit {0} and" " score {1} should be same". format(n_outputs_, y.shape[1])) y_pred = self.predict(X) return np.mean(np.all(y == y_pred, axis=1))
bsd-3-clause
openmv/micropython
ports/cc3200/tools/smoke.py
7
1886
from machine import Pin from machine import RTC import time import os """ Execute it like this: python3 run-tests.py --target wipy --device 192.168.1.1 ../cc3200/tools/smoke.py """ pin_map = [23, 24, 11, 12, 13, 14, 15, 16, 17, 22, 28, 10, 9, 8, 7, 6, 30, 31, 3, 0, 4, 5] test_bytes = os.urandom(1024) def test_pin_read(pull): # enable the pull resistor on all pins, then read the value for p in pin_map: pin = Pin("GP" + str(p), mode=Pin.IN, pull=pull) # read the pin value print(pin()) def test_pin_shorts(pull): if pull == Pin.PULL_UP: pull_inverted = Pin.PULL_DOWN else: pull_inverted = Pin.PULL_UP # enable all pulls of the specified type for p in pin_map: pin = Pin("GP" + str(p), mode=Pin.IN, pull=pull_inverted) # then change the pull one pin at a time and read its value i = 0 while i < len(pin_map): pin = Pin("GP" + str(pin_map[i]), mode=Pin.IN, pull=pull) Pin("GP" + str(pin_map[i - 1]), mode=Pin.IN, pull=pull_inverted) i += 1 # read the pin value print(pin()) test_pin_read(Pin.PULL_UP) test_pin_read(Pin.PULL_DOWN) test_pin_shorts(Pin.PULL_UP) test_pin_shorts(Pin.PULL_DOWN) # create a test directory os.mkdir("/flash/test") os.chdir("/flash/test") print(os.getcwd()) # create a new file f = open("test.txt", "w") n_w = f.write(test_bytes) print(n_w == len(test_bytes)) f.close() f = open("test.txt", "r") r = bytes(f.read(), "ascii") # check that we can write and read it correctly print(r == test_bytes) f.close() os.remove("test.txt") os.chdir("..") os.rmdir("test") ls = os.listdir() print("test" not in ls) print(ls) # test the real time clock rtc = RTC() while rtc.now()[6] > 800: pass time1 = rtc.now() time.sleep_ms(1000) time2 = rtc.now() print(time2[5] - time1[5] == 1) print(time2[6] - time1[6] < 5000) # microseconds
mit
zasdfgbnm/tensorflow
tensorflow/python/estimator/canned/dnn_linear_combined.py
2
24384
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TensorFlow estimators for Linear and DNN joined training models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import six from tensorflow.python.estimator import estimator from tensorflow.python.estimator.canned import dnn from tensorflow.python.estimator.canned import head as head_lib from tensorflow.python.estimator.canned import linear from tensorflow.python.estimator.canned import optimizers from tensorflow.python.framework import ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import nn from tensorflow.python.ops import partitioned_variables from tensorflow.python.ops import state_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops.losses import losses from tensorflow.python.summary import summary from tensorflow.python.training import sync_replicas_optimizer from tensorflow.python.training import training_util from tensorflow.python.util.tf_export import tf_export # The default learning rates are a historical artifact of the initial # implementation. _DNN_LEARNING_RATE = 0.001 _LINEAR_LEARNING_RATE = 0.005 def _check_no_sync_replicas_optimizer(optimizer): if isinstance(optimizer, sync_replicas_optimizer.SyncReplicasOptimizer): raise ValueError( 'SyncReplicasOptimizer does not support multi optimizers case. ' 'Therefore, it is not supported in DNNLinearCombined model. ' 'If you want to use this optimizer, please use either DNN or Linear ' 'model.') def _linear_learning_rate(num_linear_feature_columns): """Returns the default learning rate of the linear model. The calculation is a historical artifact of this initial implementation, but has proven a reasonable choice. Args: num_linear_feature_columns: The number of feature columns of the linear model. Returns: A float. """ default_learning_rate = 1. / math.sqrt(num_linear_feature_columns) return min(_LINEAR_LEARNING_RATE, default_learning_rate) def _add_layer_summary(value, tag): summary.scalar('%s/fraction_of_zero_values' % tag, nn.zero_fraction(value)) summary.histogram('%s/activation' % tag, value) def _dnn_linear_combined_model_fn(features, labels, mode, head, linear_feature_columns=None, linear_optimizer='Ftrl', dnn_feature_columns=None, dnn_optimizer='Adagrad', dnn_hidden_units=None, dnn_activation_fn=nn.relu, dnn_dropout=None, input_layer_partitioner=None, config=None): """Deep Neural Net and Linear combined model_fn. Args: features: dict of `Tensor`. labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype `int32` or `int64` in the range `[0, n_classes)`. mode: Defines whether this is training, evaluation or prediction. See `ModeKeys`. head: A `Head` instance. linear_feature_columns: An iterable containing all the feature columns used by the Linear model. linear_optimizer: string, `Optimizer` object, or callable that defines the optimizer to use for training the Linear model. Defaults to the Ftrl optimizer. dnn_feature_columns: An iterable containing all the feature columns used by the DNN model. dnn_optimizer: string, `Optimizer` object, or callable that defines the optimizer to use for training the DNN model. Defaults to the Adagrad optimizer. dnn_hidden_units: List of hidden units per DNN layer. dnn_activation_fn: Activation function applied to each DNN layer. If `None`, will use `tf.nn.relu`. dnn_dropout: When not `None`, the probability we will drop out a given DNN coordinate. input_layer_partitioner: Partitioner for input layer. config: `RunConfig` object to configure the runtime settings. Returns: An `EstimatorSpec` instance. Raises: ValueError: If both `linear_feature_columns` and `dnn_features_columns` are empty at the same time, or `input_layer_partitioner` is missing, or features has the wrong type. """ if not isinstance(features, dict): raise ValueError('features should be a dictionary of `Tensor`s. ' 'Given type: {}'.format(type(features))) if not linear_feature_columns and not dnn_feature_columns: raise ValueError( 'Either linear_feature_columns or dnn_feature_columns must be defined.') num_ps_replicas = config.num_ps_replicas if config else 0 input_layer_partitioner = input_layer_partitioner or ( partitioned_variables.min_max_variable_partitioner( max_partitions=num_ps_replicas, min_slice_size=64 << 20)) # Build DNN Logits. dnn_parent_scope = 'dnn' if not dnn_feature_columns: dnn_logits = None else: dnn_optimizer = optimizers.get_optimizer_instance( dnn_optimizer, learning_rate=_DNN_LEARNING_RATE) _check_no_sync_replicas_optimizer(dnn_optimizer) if not dnn_hidden_units: raise ValueError( 'dnn_hidden_units must be defined when dnn_feature_columns is ' 'specified.') dnn_partitioner = ( partitioned_variables.min_max_variable_partitioner( max_partitions=num_ps_replicas)) with variable_scope.variable_scope( dnn_parent_scope, values=tuple(six.itervalues(features)), partitioner=dnn_partitioner): dnn_logit_fn = dnn._dnn_logit_fn_builder( # pylint: disable=protected-access units=head.logits_dimension, hidden_units=dnn_hidden_units, feature_columns=dnn_feature_columns, activation_fn=dnn_activation_fn, dropout=dnn_dropout, input_layer_partitioner=input_layer_partitioner) dnn_logits = dnn_logit_fn(features=features, mode=mode) linear_parent_scope = 'linear' if not linear_feature_columns: linear_logits = None else: linear_optimizer = optimizers.get_optimizer_instance( linear_optimizer, learning_rate=_linear_learning_rate(len(linear_feature_columns))) _check_no_sync_replicas_optimizer(linear_optimizer) with variable_scope.variable_scope( linear_parent_scope, values=tuple(six.itervalues(features)), partitioner=input_layer_partitioner) as scope: logit_fn = linear._linear_logit_fn_builder( # pylint: disable=protected-access units=head.logits_dimension, feature_columns=linear_feature_columns) linear_logits = logit_fn(features=features) _add_layer_summary(linear_logits, scope.name) # Combine logits and build full model. if dnn_logits is not None and linear_logits is not None: logits = dnn_logits + linear_logits elif dnn_logits is not None: logits = dnn_logits else: logits = linear_logits def _train_op_fn(loss): """Returns the op to optimize the loss.""" train_ops = [] global_step = training_util.get_global_step() if dnn_logits is not None: train_ops.append( dnn_optimizer.minimize( loss, var_list=ops.get_collection( ops.GraphKeys.TRAINABLE_VARIABLES, scope=dnn_parent_scope))) if linear_logits is not None: train_ops.append( linear_optimizer.minimize( loss, var_list=ops.get_collection( ops.GraphKeys.TRAINABLE_VARIABLES, scope=linear_parent_scope))) train_op = control_flow_ops.group(*train_ops) with ops.control_dependencies([train_op]): with ops.colocate_with(global_step): return state_ops.assign_add(global_step, 1) return head.create_estimator_spec( features=features, mode=mode, labels=labels, train_op_fn=_train_op_fn, logits=logits) @tf_export('estimator.DNNLinearCombinedClassifier') class DNNLinearCombinedClassifier(estimator.Estimator): """An estimator for TensorFlow Linear and DNN joined classification models. Note: This estimator is also known as wide-n-deep. Example: ```python numeric_feature = numeric_column(...) categorical_column_a = categorical_column_with_hash_bucket(...) categorical_column_b = categorical_column_with_hash_bucket(...) categorical_feature_a_x_categorical_feature_b = crossed_column(...) categorical_feature_a_emb = embedding_column( categorical_column=categorical_feature_a, ...) categorical_feature_b_emb = embedding_column( categorical_id_column=categorical_feature_b, ...) estimator = DNNLinearCombinedClassifier( # wide settings linear_feature_columns=[categorical_feature_a_x_categorical_feature_b], linear_optimizer=tf.train.FtrlOptimizer(...), # deep settings dnn_feature_columns=[ categorical_feature_a_emb, categorical_feature_b_emb, numeric_feature], dnn_hidden_units=[1000, 500, 100], dnn_optimizer=tf.train.ProximalAdagradOptimizer(...), # warm-start settings warm_start_from="/path/to/checkpoint/dir") # To apply L1 and L2 regularization, you can set optimizers as follows: tf.train.ProximalAdagradOptimizer( learning_rate=0.1, l1_regularization_strength=0.001, l2_regularization_strength=0.001) # It is same for FtrlOptimizer. # Input builders def input_fn_train: # returns x, y pass estimator.train(input_fn=input_fn_train, steps=100) def input_fn_eval: # returns x, y pass metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10) def input_fn_predict: # returns x, None pass predictions = estimator.predict(input_fn=input_fn_predict) ``` Input of `train` and `evaluate` should have following features, otherwise there will be a `KeyError`: * for each `column` in `dnn_feature_columns` + `linear_feature_columns`: - if `column` is a `_CategoricalColumn`, a feature with `key=column.name` whose `value` is a `SparseTensor`. - if `column` is a `_WeightedCategoricalColumn`, two features: the first with `key` the id column name, the second with `key` the weight column name. Both features' `value` must be a `SparseTensor`. - if `column` is a `_DenseColumn`, a feature with `key=column.name` whose `value` is a `Tensor`. Loss is calculated by using softmax cross entropy. @compatibility(eager) Estimators are not compatible with eager execution. @end_compatibility """ def __init__(self, model_dir=None, linear_feature_columns=None, linear_optimizer='Ftrl', dnn_feature_columns=None, dnn_optimizer='Adagrad', dnn_hidden_units=None, dnn_activation_fn=nn.relu, dnn_dropout=None, n_classes=2, weight_column=None, label_vocabulary=None, input_layer_partitioner=None, config=None, warm_start_from=None, loss_reduction=losses.Reduction.SUM): """Initializes a DNNLinearCombinedClassifier instance. Args: model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. linear_feature_columns: An iterable containing all the feature columns used by linear part of the model. All items in the set must be instances of classes derived from `FeatureColumn`. linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to the linear part of the model. Defaults to FTRL optimizer. dnn_feature_columns: An iterable containing all the feature columns used by deep part of the model. All items in the set must be instances of classes derived from `FeatureColumn`. dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to the deep part of the model. Defaults to Adagrad optimizer. dnn_hidden_units: List of hidden units per layer. All layers are fully connected. dnn_activation_fn: Activation function applied to each layer. If None, will use `tf.nn.relu`. dnn_dropout: When not None, the probability we will drop out a given coordinate. n_classes: Number of label classes. Defaults to 2, namely binary classification. Must be > 1. weight_column: A string or a `_NumericColumn` created by `tf.feature_column.numeric_column` defining feature column representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. If it is a string, it is used as a key to fetch weight tensor from the `features`. If it is a `_NumericColumn`, raw tensor is fetched by key `weight_column.key`, then weight_column.normalizer_fn is applied on it to get weight tensor. label_vocabulary: A list of strings represents possible label values. If given, labels must be string type and have any value in `label_vocabulary`. If it is not given, that means labels are already encoded as integer or float within [0, 1] for `n_classes=2` and encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 . Also there will be errors if vocabulary is not provided and labels are string. input_layer_partitioner: Partitioner for input layer. Defaults to `min_max_variable_partitioner` with `min_slice_size` 64 << 20. config: RunConfig object to configure the runtime settings. warm_start_from: A string filepath to a checkpoint to warm-start from, or a `WarmStartSettings` object to fully configure warm-starting. If the string filepath is provided instead of a `WarmStartSettings`, then all weights are warm-started, and it is assumed that vocabularies and Tensor names are unchanged. loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to reduce training loss over batch. Defaults to `SUM`. Raises: ValueError: If both linear_feature_columns and dnn_features_columns are empty at the same time. """ linear_feature_columns = linear_feature_columns or [] dnn_feature_columns = dnn_feature_columns or [] self._feature_columns = ( list(linear_feature_columns) + list(dnn_feature_columns)) if not self._feature_columns: raise ValueError('Either linear_feature_columns or dnn_feature_columns ' 'must be defined.') if n_classes == 2: head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( # pylint: disable=protected-access weight_column=weight_column, label_vocabulary=label_vocabulary, loss_reduction=loss_reduction) else: head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access n_classes, weight_column=weight_column, label_vocabulary=label_vocabulary, loss_reduction=loss_reduction) def _model_fn(features, labels, mode, config): """Call the _dnn_linear_combined_model_fn.""" return _dnn_linear_combined_model_fn( features=features, labels=labels, mode=mode, head=head, linear_feature_columns=linear_feature_columns, linear_optimizer=linear_optimizer, dnn_feature_columns=dnn_feature_columns, dnn_optimizer=dnn_optimizer, dnn_hidden_units=dnn_hidden_units, dnn_activation_fn=dnn_activation_fn, dnn_dropout=dnn_dropout, input_layer_partitioner=input_layer_partitioner, config=config) super(DNNLinearCombinedClassifier, self).__init__( model_fn=_model_fn, model_dir=model_dir, config=config, warm_start_from=warm_start_from) @tf_export('estimator.DNNLinearCombinedRegressor') class DNNLinearCombinedRegressor(estimator.Estimator): """An estimator for TensorFlow Linear and DNN joined models for regression. Note: This estimator is also known as wide-n-deep. Example: ```python numeric_feature = numeric_column(...) categorical_column_a = categorical_column_with_hash_bucket(...) categorical_column_b = categorical_column_with_hash_bucket(...) categorical_feature_a_x_categorical_feature_b = crossed_column(...) categorical_feature_a_emb = embedding_column( categorical_column=categorical_feature_a, ...) categorical_feature_b_emb = embedding_column( categorical_column=categorical_feature_b, ...) estimator = DNNLinearCombinedRegressor( # wide settings linear_feature_columns=[categorical_feature_a_x_categorical_feature_b], linear_optimizer=tf.train.FtrlOptimizer(...), # deep settings dnn_feature_columns=[ categorical_feature_a_emb, categorical_feature_b_emb, numeric_feature], dnn_hidden_units=[1000, 500, 100], dnn_optimizer=tf.train.ProximalAdagradOptimizer(...), # warm-start settings warm_start_from="/path/to/checkpoint/dir") # To apply L1 and L2 regularization, you can set optimizers as follows: tf.train.ProximalAdagradOptimizer( learning_rate=0.1, l1_regularization_strength=0.001, l2_regularization_strength=0.001) # It is same for FtrlOptimizer. # Input builders def input_fn_train: # returns x, y pass estimator.train(input_fn=input_fn_train, steps=100) def input_fn_eval: # returns x, y pass metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10) def input_fn_predict: # returns x, None pass predictions = estimator.predict(input_fn=input_fn_predict) ``` Input of `train` and `evaluate` should have following features, otherwise there will be a `KeyError`: * for each `column` in `dnn_feature_columns` + `linear_feature_columns`: - if `column` is a `_CategoricalColumn`, a feature with `key=column.name` whose `value` is a `SparseTensor`. - if `column` is a `_WeightedCategoricalColumn`, two features: the first with `key` the id column name, the second with `key` the weight column name. Both features' `value` must be a `SparseTensor`. - if `column` is a `_DenseColumn`, a feature with `key=column.name` whose `value` is a `Tensor`. Loss is calculated by using mean squared error. @compatibility(eager) Estimators are not compatible with eager execution. @end_compatibility """ def __init__(self, model_dir=None, linear_feature_columns=None, linear_optimizer='Ftrl', dnn_feature_columns=None, dnn_optimizer='Adagrad', dnn_hidden_units=None, dnn_activation_fn=nn.relu, dnn_dropout=None, label_dimension=1, weight_column=None, input_layer_partitioner=None, config=None, warm_start_from=None, loss_reduction=losses.Reduction.SUM): """Initializes a DNNLinearCombinedRegressor instance. Args: model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. linear_feature_columns: An iterable containing all the feature columns used by linear part of the model. All items in the set must be instances of classes derived from `FeatureColumn`. linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to the linear part of the model. Defaults to FTRL optimizer. dnn_feature_columns: An iterable containing all the feature columns used by deep part of the model. All items in the set must be instances of classes derived from `FeatureColumn`. dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to the deep part of the model. Defaults to Adagrad optimizer. dnn_hidden_units: List of hidden units per layer. All layers are fully connected. dnn_activation_fn: Activation function applied to each layer. If None, will use `tf.nn.relu`. dnn_dropout: When not None, the probability we will drop out a given coordinate. label_dimension: Number of regression targets per example. This is the size of the last dimension of the labels and logits `Tensor` objects (typically, these have shape `[batch_size, label_dimension]`). weight_column: A string or a `_NumericColumn` created by `tf.feature_column.numeric_column` defining feature column representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. If it is a string, it is used as a key to fetch weight tensor from the `features`. If it is a `_NumericColumn`, raw tensor is fetched by key `weight_column.key`, then weight_column.normalizer_fn is applied on it to get weight tensor. input_layer_partitioner: Partitioner for input layer. Defaults to `min_max_variable_partitioner` with `min_slice_size` 64 << 20. config: RunConfig object to configure the runtime settings. warm_start_from: A string filepath to a checkpoint to warm-start from, or a `WarmStartSettings` object to fully configure warm-starting. If the string filepath is provided instead of a `WarmStartSettings`, then all weights are warm-started, and it is assumed that vocabularies and Tensor names are unchanged. loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to reduce training loss over batch. Defaults to `SUM`. Raises: ValueError: If both linear_feature_columns and dnn_features_columns are empty at the same time. """ linear_feature_columns = linear_feature_columns or [] dnn_feature_columns = dnn_feature_columns or [] self._feature_columns = ( list(linear_feature_columns) + list(dnn_feature_columns)) if not self._feature_columns: raise ValueError('Either linear_feature_columns or dnn_feature_columns ' 'must be defined.') def _model_fn(features, labels, mode, config): """Call the _dnn_linear_combined_model_fn.""" return _dnn_linear_combined_model_fn( features=features, labels=labels, mode=mode, head=head_lib. # pylint: disable=protected-access _regression_head_with_mean_squared_error_loss( label_dimension=label_dimension, weight_column=weight_column, loss_reduction=loss_reduction), linear_feature_columns=linear_feature_columns, linear_optimizer=linear_optimizer, dnn_feature_columns=dnn_feature_columns, dnn_optimizer=dnn_optimizer, dnn_hidden_units=dnn_hidden_units, dnn_activation_fn=dnn_activation_fn, dnn_dropout=dnn_dropout, input_layer_partitioner=input_layer_partitioner, config=config) super(DNNLinearCombinedRegressor, self).__init__( model_fn=_model_fn, model_dir=model_dir, config=config, warm_start_from=warm_start_from)
apache-2.0
chjw8016/GreenOdoo7-haibao
openerp/addons/hr_recruitment/report/hr_recruitment_report.py
52
5841
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import tools from openerp.osv import fields,osv from .. import hr_recruitment from openerp.addons.decimal_precision import decimal_precision as dp AVAILABLE_STATES = [ ('draft','New'), ('open','Open'), ('cancel', 'Refused'), ('done', 'Hired'), ('pending','Pending') ] class hr_recruitment_report(osv.osv): _name = "hr.recruitment.report" _description = "Recruitments Statistics" _auto = False _rec_name = 'date' _columns = { 'user_id': fields.many2one('res.users', 'User', readonly=True), 'nbr': fields.integer('# of Applications', readonly=True), 'state': fields.selection(AVAILABLE_STATES, 'Status', size=16, readonly=True), 'month':fields.selection([('01', 'January'), ('02', 'February'), \ ('03', 'March'), ('04', 'April'),\ ('05', 'May'), ('06', 'June'), \ ('07', 'July'), ('08', 'August'),\ ('09', 'September'), ('10', 'October'),\ ('11', 'November'), ('12', 'December')], 'Month', readonly=True), 'company_id': fields.many2one('res.company', 'Company', readonly=True), 'day': fields.char('Day', size=128, readonly=True), 'year': fields.char('Year', size=4, readonly=True), 'date': fields.date('Date', readonly=True), 'date_closed': fields.date('Closed', readonly=True), 'job_id': fields.many2one('hr.job', 'Applied Job',readonly=True), 'stage_id': fields.many2one ('hr.recruitment.stage', 'Stage'), 'type_id': fields.many2one('hr.recruitment.degree', 'Degree'), 'department_id': fields.many2one('hr.department','Department',readonly=True), 'priority': fields.selection(hr_recruitment.AVAILABLE_PRIORITIES, 'Appreciation'), 'salary_prop' : fields.float("Salary Proposed", digits_compute=dp.get_precision('Account')), 'salary_prop_avg' : fields.float("Avg. Proposed Salary", group_operator="avg", digits_compute=dp.get_precision('Account')), 'salary_exp' : fields.float("Salary Expected", digits_compute=dp.get_precision('Account')), 'salary_exp_avg' : fields.float("Avg. Expected Salary", group_operator="avg", digits_compute=dp.get_precision('Account')), 'partner_id': fields.many2one('res.partner', 'Partner',readonly=True), 'available': fields.float("Availability"), 'delay_close': fields.float('Avg. Delay to Close', digits=(16,2), readonly=True, group_operator="avg", help="Number of Days to close the project issue"), } _order = 'date desc' def init(self, cr): tools.drop_view_if_exists(cr, 'hr_recruitment_report') cr.execute(""" create or replace view hr_recruitment_report as ( select min(s.id) as id, date_trunc('day',s.create_date) as date, date_trunc('day',s.date_closed) as date_closed, to_char(s.create_date, 'YYYY') as year, to_char(s.create_date, 'MM') as month, to_char(s.create_date, 'YYYY-MM-DD') as day, s.state, s.partner_id, s.company_id, s.user_id, s.job_id, s.type_id, sum(s.availability) as available, s.department_id, s.priority, s.stage_id, sum(salary_proposed) as salary_prop, (sum(salary_proposed)/count(*)) as salary_prop_avg, sum(salary_expected) as salary_exp, (sum(salary_expected)/count(*)) as salary_exp_avg, extract('epoch' from (s.date_closed-s.create_date))/(3600*24) as delay_close, count(*) as nbr from hr_applicant s group by to_char(s.create_date, 'YYYY'), to_char(s.create_date, 'MM'), to_char(s.create_date, 'YYYY-MM-DD') , date_trunc('day',s.create_date), date_trunc('day',s.date_closed), s.date_open, s.create_date, s.date_closed, s.state, s.partner_id, s.company_id, s.user_id, s.stage_id, s.type_id, s.priority, s.job_id, s.department_id ) """) hr_recruitment_report() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
mit
DD-L/deel.boost.python
revise/libs/python/test/test_cltree.py
12
1115
# Copyright David Abrahams 2004. Distributed under the Boost # Software License, Version 1.0. (See accompanying # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #!/usr/bin/env python from cltree import basic,symbol,constant,variable b = basic() c = constant() s = symbol() v = variable() assert isinstance(b,basic) assert not isinstance(b,symbol) assert not isinstance(b,constant) assert not isinstance(b,variable) assert isinstance(c,basic) assert isinstance(c,constant) assert not isinstance(c,symbol) assert not isinstance(c,variable) assert not isinstance(s,basic) assert isinstance(s,symbol) assert not isinstance(s,constant) assert not isinstance(s,variable) assert isinstance(v,basic) assert not isinstance(v,symbol) assert not isinstance(v,constant) assert isinstance(v,variable) print 'b=',b assert repr(b)=='cltree.basic()' print 's=',s assert repr(s)!='cltree.wrapped_symbol()' # because not isinstance(s,basic) print 'c=',c assert repr(c)=='cltree.constant()' print 'v=',v assert repr(v)=='cltree.wrapped_variable()' print 'ok'
mit
fsmk/fsmkschool
broadgauge/oauth.py
2
7055
"""OAuth integration. """ from rauth import OAuth2Service import web import logging import json logger = logging.getLogger(__name__) def oauth_service(service, redirect_uri): if service == 'github': return GitHub(redirect_uri) elif service == 'google': return Google(redirect_uri) elif service == 'facebook': return Facebook(redirect_uri) def get_oauth_services(): """Returns an iterator over the available oauth services. Each entry in the iterator will be a storage object containing id and name of the service. For example: web.storage(name='gitbub', title='GitHub') """ if 'github_client_id' in web.config: yield web.storage(name='github', title='GitHub') if 'google_client_id' in web.config: yield web.storage(name='google', title='Google') if 'facebook_client_id' in web.config: yield web.storage(name='facebook', title='Facebook') class GitHub(OAuth2Service): """GitHub OAuth integration. """ def __init__(self, redirect_uri): OAuth2Service.__init__(self, client_id=web.config.github_client_id, client_secret=web.config.github_client_secret, name='github', authorize_url='https://github.com/login/oauth/authorize', access_token_url='https://github.com/login/oauth/access_token', base_url='https://api.github.com/') self.redirect_uri = redirect_uri def get_authorize_url(self, **params): params.setdefault('response_type', 'code') params.setdefault('redirect_uri', self.redirect_uri) params.setdefault('scope', 'user:email') return OAuth2Service.get_authorize_url(self, **params) def get_auth_session(self, **kwargs): if 'data' in kwargs and isinstance(kwargs['data'], dict): kwargs['data'].setdefault('redirect_uri', self.redirect_uri) return OAuth2Service.get_auth_session(self, **kwargs) def get_userdata(self, code): """Returns the relevant userdata from github. This function must be called from githun oauth callback and the auth code must be passed as argument. """ try: session = self.get_auth_session(data={'code': code}) d = session.get('user').json() email = self.get_verified_email(session) if not email: logger.error("No verified email found for this user {}".format(d['login'])) return return dict( name=d["name"], email=email, username=d["login"], github=d["login"], service="GitHub") except KeyError, e: logger.error("failed to get user data from github. Error: %s", str(e)) def get_verified_email(self, session): """Finds verified email of the user using oauth session. """ data = session.get('https://api.github.com/user/emails').json() emails = [d['email'] for d in data if d['verified'] and d['primary']] if emails: return emails[0] class Google(OAuth2Service): """Google OAuth integration. """ def __init__(self, redirect_uri): OAuth2Service.__init__(self, client_id=web.config.google_client_id, client_secret=web.config.google_client_secret, name='google', authorize_url='https://accounts.google.com/o/oauth2/auth', access_token_url='https://accounts.google.com/o/oauth2/token', base_url='https://www.googleapis.com/oauth2/v1/') self.redirect_uri = redirect_uri def get_authorize_url(self, **params): params.setdefault('response_type', 'code') params.setdefault('redirect_uri', self.redirect_uri) params.setdefault('scope', 'profile email') return OAuth2Service.get_authorize_url(self, **params) def get_auth_session(self, **kwargs): if 'data' in kwargs and isinstance(kwargs['data'], dict): kwargs['data'].setdefault('redirect_uri', self.redirect_uri) kwargs['data'].setdefault('grant_type', 'authorization_code') print kwargs return OAuth2Service.get_auth_session(self, **kwargs) def get_userdata(self, code): """Returns the relevant userdata from github. This function must be called from githun oauth callback and the auth code must be passed as argument. """ try: session = self.get_auth_session(data={'code': code}, decoder=json.loads) d = session.get('userinfo').json() # suggest basename of the email as username username = d['email'].split("@")[0] return dict( name=d['name'], email=d['email'], username=username, service='Google') except KeyError, e: logger.error("failed to get user data from google. Error: %s", str(e), exc_info=True) class Facebook(OAuth2Service): """Facebook OAuth integration. """ def __init__(self, redirect_uri): OAuth2Service.__init__(self, client_id=web.config.facebook_client_id, client_secret=web.config.facebook_client_secret, name='facebook', authorize_url='https://graph.facebook.com/oauth/authorize', access_token_url='https://graph.facebook.com/oauth/access_token', base_url='https://graph.facebook.com/') self.redirect_uri = redirect_uri def get_authorize_url(self, **params): params.setdefault('response_type', 'code') params.setdefault('redirect_uri', self.redirect_uri) params.setdefault('scope', 'email') return OAuth2Service.get_authorize_url(self, **params) def get_auth_session(self, **kwargs): if 'data' in kwargs and isinstance(kwargs['data'], dict): kwargs['data'].setdefault('redirect_uri', self.redirect_uri) kwargs['data'].setdefault('grant_type', 'authorization_code') return OAuth2Service.get_auth_session(self, **kwargs) def get_userdata(self, code): """Returns the relevant userdata from github. This function must be called from githun oauth callback and the auth code must be passed as argument. """ try: session = self.get_auth_session( data={'code': code, 'redirect_uri': self.redirect_uri}) d = session.get('me').json() # suggest basename of the email as username username = d['email'].split("@")[0] return dict( name=d['name'], email=d['email'], username=username, service='Facebook') except KeyError, e: logger.error("failed to get user data from facebook. Error: %s", str(e), exc_info=True)
bsd-3-clause
sdeepanshu02/microblog
flask/Lib/site-packages/pip/_vendor/requests/packages/urllib3/__init__.py
192
2648
""" urllib3 - Thread-safe connection pooling and re-using. """ from __future__ import absolute_import import warnings from .connectionpool import ( HTTPConnectionPool, HTTPSConnectionPool, connection_from_url ) from . import exceptions from .filepost import encode_multipart_formdata from .poolmanager import PoolManager, ProxyManager, proxy_from_url from .response import HTTPResponse from .util.request import make_headers from .util.url import get_host from .util.timeout import Timeout from .util.retry import Retry # Set default logging handler to avoid "No handler found" warnings. import logging try: # Python 2.7+ from logging import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass __author__ = 'Andrey Petrov (andrey.petrov@shazow.net)' __license__ = 'MIT' __version__ = '1.13.1' __all__ = ( 'HTTPConnectionPool', 'HTTPSConnectionPool', 'PoolManager', 'ProxyManager', 'HTTPResponse', 'Retry', 'Timeout', 'add_stderr_logger', 'connection_from_url', 'disable_warnings', 'encode_multipart_formdata', 'get_host', 'make_headers', 'proxy_from_url', ) logging.getLogger(__name__).addHandler(NullHandler()) def add_stderr_logger(level=logging.DEBUG): """ Helper for quickly adding a StreamHandler to the logger. Useful for debugging. Returns the handler after adding it. """ # This method needs to be in this __init__.py to get the __name__ correct # even if urllib3 is vendored within another package. logger = logging.getLogger(__name__) handler = logging.StreamHandler() handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) logger.addHandler(handler) logger.setLevel(level) logger.debug('Added a stderr logging handler to logger: %s' % __name__) return handler # ... Clean up. del NullHandler # SecurityWarning's always go off by default. warnings.simplefilter('always', exceptions.SecurityWarning, append=True) # SubjectAltNameWarning's should go off once per host warnings.simplefilter('default', exceptions.SubjectAltNameWarning) # InsecurePlatformWarning's don't vary between requests, so we keep it default. warnings.simplefilter('default', exceptions.InsecurePlatformWarning, append=True) # SNIMissingWarnings should go off only once. warnings.simplefilter('default', exceptions.SNIMissingWarning) def disable_warnings(category=exceptions.HTTPWarning): """ Helper for quickly disabling all urllib3 warnings. """ warnings.simplefilter('ignore', category)
bsd-3-clause
rupran/ansible
lib/ansible/modules/packaging/os/urpmi.py
71
6225
#!/usr/bin/python -tt # -*- coding: utf-8 -*- # (c) 2013, Philippe Makowski # Written by Philippe Makowski <philippem@mageia.org> # Based on apt module written by Matthew Williams <matthew@flowroute.com> # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: urpmi short_description: Urpmi manager description: - Manages packages with I(urpmi) (such as for Mageia or Mandriva) version_added: "1.3.4" options: pkg: description: - name of package to install, upgrade or remove. required: true default: null state: description: - Indicates the desired package state required: false default: present choices: [ "absent", "present" ] update_cache: description: - update the package database first C(urpmi.update -a). required: false default: no choices: [ "yes", "no" ] no-recommends: description: - Corresponds to the C(--no-recommends) option for I(urpmi). required: false default: yes choices: [ "yes", "no" ] force: description: - Assume "yes" is the answer to any question urpmi has to ask. Corresponds to the C(--force) option for I(urpmi). required: false default: yes choices: [ "yes", "no" ] author: "Philippe Makowski (@pmakowski)" notes: [] ''' EXAMPLES = ''' # install package foo - urpmi: pkg: foo state: present # remove package foo - urpmi: pkg: foo state: absent # description: remove packages foo and bar - urpmi: pkg: foo,bar state: absent # description: update the package database (urpmi.update -a -q) and install bar (bar will be the updated if a newer version exists) - urpmi: name: bar state: present update_cache: yes ''' import shlex import os import sys URPMI_PATH = '/usr/sbin/urpmi' URPME_PATH = '/usr/sbin/urpme' def query_package(module, name): # rpm -q returns 0 if the package is installed, # 1 if it is not installed cmd = "rpm -q %s" % (name) rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc == 0: return True else: return False def query_package_provides(module, name): # rpm -q returns 0 if the package is installed, # 1 if it is not installed cmd = "rpm -q --provides %s" % (name) rc, stdout, stderr = module.run_command(cmd, check_rc=False) return rc == 0 def update_package_db(module): cmd = "urpmi.update -a -q" rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: module.fail_json(msg="could not update package db") def remove_packages(module, packages): remove_c = 0 # Using a for loop in case of error, we can report the package that failed for package in packages: # Query the package first, to see if we even need to remove if not query_package(module, package): continue cmd = "%s --auto %s" % (URPME_PATH, package) rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: module.fail_json(msg="failed to remove %s" % (package)) remove_c += 1 if remove_c > 0: module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) module.exit_json(changed=False, msg="package(s) already absent") def install_packages(module, pkgspec, force=True, no_recommends=True): packages = "" for package in pkgspec: if not query_package_provides(module, package): packages += "'%s' " % package if len(packages) != 0: if no_recommends: no_recommends_yes = '--no-recommends' else: no_recommends_yes = '' if force: force_yes = '--force' else: force_yes = '' cmd = ("%s --auto %s --quiet %s %s" % (URPMI_PATH, force_yes, no_recommends_yes, packages)) rc, out, err = module.run_command(cmd) installed = True for packages in pkgspec: if not query_package_provides(module, package): installed = False # urpmi always have 0 for exit code if --force is used if rc or not installed: module.fail_json(msg="'urpmi %s' failed: %s" % (packages, err)) else: module.exit_json(changed=True, msg="%s present(s)" % packages) else: module.exit_json(changed=False) def main(): module = AnsibleModule( argument_spec = dict( state = dict(default='installed', choices=['installed', 'removed', 'absent', 'present']), update_cache = dict(default=False, aliases=['update-cache'], type='bool'), force = dict(default=True, type='bool'), no_recommends = dict(default=True, aliases=['no-recommends'], type='bool'), package = dict(aliases=['pkg', 'name'], required=True))) if not os.path.exists(URPMI_PATH): module.fail_json(msg="cannot find urpmi, looking for %s" % (URPMI_PATH)) p = module.params force_yes = p['force'] no_recommends_yes = p['no_recommends'] if p['update_cache']: update_package_db(module) packages = p['package'].split(',') if p['state'] in [ 'installed', 'present' ]: install_packages(module, packages, force_yes, no_recommends_yes) elif p['state'] in [ 'removed', 'absent' ]: remove_packages(module, packages) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
theheros/kbengine
kbe/res/scripts/common/Lib/encodings/iso8859_4.py
37
13683
""" Python Character Mapping Codec iso8859_4 generated from 'MAPPINGS/ISO8859/8859-4.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='iso8859-4', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( '\x00' # 0x00 -> NULL '\x01' # 0x01 -> START OF HEADING '\x02' # 0x02 -> START OF TEXT '\x03' # 0x03 -> END OF TEXT '\x04' # 0x04 -> END OF TRANSMISSION '\x05' # 0x05 -> ENQUIRY '\x06' # 0x06 -> ACKNOWLEDGE '\x07' # 0x07 -> BELL '\x08' # 0x08 -> BACKSPACE '\t' # 0x09 -> HORIZONTAL TABULATION '\n' # 0x0A -> LINE FEED '\x0b' # 0x0B -> VERTICAL TABULATION '\x0c' # 0x0C -> FORM FEED '\r' # 0x0D -> CARRIAGE RETURN '\x0e' # 0x0E -> SHIFT OUT '\x0f' # 0x0F -> SHIFT IN '\x10' # 0x10 -> DATA LINK ESCAPE '\x11' # 0x11 -> DEVICE CONTROL ONE '\x12' # 0x12 -> DEVICE CONTROL TWO '\x13' # 0x13 -> DEVICE CONTROL THREE '\x14' # 0x14 -> DEVICE CONTROL FOUR '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE '\x16' # 0x16 -> SYNCHRONOUS IDLE '\x17' # 0x17 -> END OF TRANSMISSION BLOCK '\x18' # 0x18 -> CANCEL '\x19' # 0x19 -> END OF MEDIUM '\x1a' # 0x1A -> SUBSTITUTE '\x1b' # 0x1B -> ESCAPE '\x1c' # 0x1C -> FILE SEPARATOR '\x1d' # 0x1D -> GROUP SEPARATOR '\x1e' # 0x1E -> RECORD SEPARATOR '\x1f' # 0x1F -> UNIT SEPARATOR ' ' # 0x20 -> SPACE '!' # 0x21 -> EXCLAMATION MARK '"' # 0x22 -> QUOTATION MARK '#' # 0x23 -> NUMBER SIGN '$' # 0x24 -> DOLLAR SIGN '%' # 0x25 -> PERCENT SIGN '&' # 0x26 -> AMPERSAND "'" # 0x27 -> APOSTROPHE '(' # 0x28 -> LEFT PARENTHESIS ')' # 0x29 -> RIGHT PARENTHESIS '*' # 0x2A -> ASTERISK '+' # 0x2B -> PLUS SIGN ',' # 0x2C -> COMMA '-' # 0x2D -> HYPHEN-MINUS '.' # 0x2E -> FULL STOP '/' # 0x2F -> SOLIDUS '0' # 0x30 -> DIGIT ZERO '1' # 0x31 -> DIGIT ONE '2' # 0x32 -> DIGIT TWO '3' # 0x33 -> DIGIT THREE '4' # 0x34 -> DIGIT FOUR '5' # 0x35 -> DIGIT FIVE '6' # 0x36 -> DIGIT SIX '7' # 0x37 -> DIGIT SEVEN '8' # 0x38 -> DIGIT EIGHT '9' # 0x39 -> DIGIT NINE ':' # 0x3A -> COLON ';' # 0x3B -> SEMICOLON '<' # 0x3C -> LESS-THAN SIGN '=' # 0x3D -> EQUALS SIGN '>' # 0x3E -> GREATER-THAN SIGN '?' # 0x3F -> QUESTION MARK '@' # 0x40 -> COMMERCIAL AT 'A' # 0x41 -> LATIN CAPITAL LETTER A 'B' # 0x42 -> LATIN CAPITAL LETTER B 'C' # 0x43 -> LATIN CAPITAL LETTER C 'D' # 0x44 -> LATIN CAPITAL LETTER D 'E' # 0x45 -> LATIN CAPITAL LETTER E 'F' # 0x46 -> LATIN CAPITAL LETTER F 'G' # 0x47 -> LATIN CAPITAL LETTER G 'H' # 0x48 -> LATIN CAPITAL LETTER H 'I' # 0x49 -> LATIN CAPITAL LETTER I 'J' # 0x4A -> LATIN CAPITAL LETTER J 'K' # 0x4B -> LATIN CAPITAL LETTER K 'L' # 0x4C -> LATIN CAPITAL LETTER L 'M' # 0x4D -> LATIN CAPITAL LETTER M 'N' # 0x4E -> LATIN CAPITAL LETTER N 'O' # 0x4F -> LATIN CAPITAL LETTER O 'P' # 0x50 -> LATIN CAPITAL LETTER P 'Q' # 0x51 -> LATIN CAPITAL LETTER Q 'R' # 0x52 -> LATIN CAPITAL LETTER R 'S' # 0x53 -> LATIN CAPITAL LETTER S 'T' # 0x54 -> LATIN CAPITAL LETTER T 'U' # 0x55 -> LATIN CAPITAL LETTER U 'V' # 0x56 -> LATIN CAPITAL LETTER V 'W' # 0x57 -> LATIN CAPITAL LETTER W 'X' # 0x58 -> LATIN CAPITAL LETTER X 'Y' # 0x59 -> LATIN CAPITAL LETTER Y 'Z' # 0x5A -> LATIN CAPITAL LETTER Z '[' # 0x5B -> LEFT SQUARE BRACKET '\\' # 0x5C -> REVERSE SOLIDUS ']' # 0x5D -> RIGHT SQUARE BRACKET '^' # 0x5E -> CIRCUMFLEX ACCENT '_' # 0x5F -> LOW LINE '`' # 0x60 -> GRAVE ACCENT 'a' # 0x61 -> LATIN SMALL LETTER A 'b' # 0x62 -> LATIN SMALL LETTER B 'c' # 0x63 -> LATIN SMALL LETTER C 'd' # 0x64 -> LATIN SMALL LETTER D 'e' # 0x65 -> LATIN SMALL LETTER E 'f' # 0x66 -> LATIN SMALL LETTER F 'g' # 0x67 -> LATIN SMALL LETTER G 'h' # 0x68 -> LATIN SMALL LETTER H 'i' # 0x69 -> LATIN SMALL LETTER I 'j' # 0x6A -> LATIN SMALL LETTER J 'k' # 0x6B -> LATIN SMALL LETTER K 'l' # 0x6C -> LATIN SMALL LETTER L 'm' # 0x6D -> LATIN SMALL LETTER M 'n' # 0x6E -> LATIN SMALL LETTER N 'o' # 0x6F -> LATIN SMALL LETTER O 'p' # 0x70 -> LATIN SMALL LETTER P 'q' # 0x71 -> LATIN SMALL LETTER Q 'r' # 0x72 -> LATIN SMALL LETTER R 's' # 0x73 -> LATIN SMALL LETTER S 't' # 0x74 -> LATIN SMALL LETTER T 'u' # 0x75 -> LATIN SMALL LETTER U 'v' # 0x76 -> LATIN SMALL LETTER V 'w' # 0x77 -> LATIN SMALL LETTER W 'x' # 0x78 -> LATIN SMALL LETTER X 'y' # 0x79 -> LATIN SMALL LETTER Y 'z' # 0x7A -> LATIN SMALL LETTER Z '{' # 0x7B -> LEFT CURLY BRACKET '|' # 0x7C -> VERTICAL LINE '}' # 0x7D -> RIGHT CURLY BRACKET '~' # 0x7E -> TILDE '\x7f' # 0x7F -> DELETE '\x80' # 0x80 -> <control> '\x81' # 0x81 -> <control> '\x82' # 0x82 -> <control> '\x83' # 0x83 -> <control> '\x84' # 0x84 -> <control> '\x85' # 0x85 -> <control> '\x86' # 0x86 -> <control> '\x87' # 0x87 -> <control> '\x88' # 0x88 -> <control> '\x89' # 0x89 -> <control> '\x8a' # 0x8A -> <control> '\x8b' # 0x8B -> <control> '\x8c' # 0x8C -> <control> '\x8d' # 0x8D -> <control> '\x8e' # 0x8E -> <control> '\x8f' # 0x8F -> <control> '\x90' # 0x90 -> <control> '\x91' # 0x91 -> <control> '\x92' # 0x92 -> <control> '\x93' # 0x93 -> <control> '\x94' # 0x94 -> <control> '\x95' # 0x95 -> <control> '\x96' # 0x96 -> <control> '\x97' # 0x97 -> <control> '\x98' # 0x98 -> <control> '\x99' # 0x99 -> <control> '\x9a' # 0x9A -> <control> '\x9b' # 0x9B -> <control> '\x9c' # 0x9C -> <control> '\x9d' # 0x9D -> <control> '\x9e' # 0x9E -> <control> '\x9f' # 0x9F -> <control> '\xa0' # 0xA0 -> NO-BREAK SPACE '\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK '\u0138' # 0xA2 -> LATIN SMALL LETTER KRA '\u0156' # 0xA3 -> LATIN CAPITAL LETTER R WITH CEDILLA '\xa4' # 0xA4 -> CURRENCY SIGN '\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE '\u013b' # 0xA6 -> LATIN CAPITAL LETTER L WITH CEDILLA '\xa7' # 0xA7 -> SECTION SIGN '\xa8' # 0xA8 -> DIAERESIS '\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON '\u0112' # 0xAA -> LATIN CAPITAL LETTER E WITH MACRON '\u0122' # 0xAB -> LATIN CAPITAL LETTER G WITH CEDILLA '\u0166' # 0xAC -> LATIN CAPITAL LETTER T WITH STROKE '\xad' # 0xAD -> SOFT HYPHEN '\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON '\xaf' # 0xAF -> MACRON '\xb0' # 0xB0 -> DEGREE SIGN '\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK '\u02db' # 0xB2 -> OGONEK '\u0157' # 0xB3 -> LATIN SMALL LETTER R WITH CEDILLA '\xb4' # 0xB4 -> ACUTE ACCENT '\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE '\u013c' # 0xB6 -> LATIN SMALL LETTER L WITH CEDILLA '\u02c7' # 0xB7 -> CARON '\xb8' # 0xB8 -> CEDILLA '\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON '\u0113' # 0xBA -> LATIN SMALL LETTER E WITH MACRON '\u0123' # 0xBB -> LATIN SMALL LETTER G WITH CEDILLA '\u0167' # 0xBC -> LATIN SMALL LETTER T WITH STROKE '\u014a' # 0xBD -> LATIN CAPITAL LETTER ENG '\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON '\u014b' # 0xBF -> LATIN SMALL LETTER ENG '\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX '\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE '\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE '\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK '\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE '\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS '\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX '\u012a' # 0xCF -> LATIN CAPITAL LETTER I WITH MACRON '\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE '\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA '\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON '\u0136' # 0xD3 -> LATIN CAPITAL LETTER K WITH CEDILLA '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX '\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS '\xd7' # 0xD7 -> MULTIPLICATION SIGN '\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE '\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS '\u0168' # 0xDD -> LATIN CAPITAL LETTER U WITH TILDE '\u016a' # 0xDE -> LATIN CAPITAL LETTER U WITH MACRON '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S '\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX '\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE '\xe6' # 0xE6 -> LATIN SMALL LETTER AE '\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK '\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE '\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS '\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX '\u012b' # 0xEF -> LATIN SMALL LETTER I WITH MACRON '\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE '\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA '\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON '\u0137' # 0xF3 -> LATIN SMALL LETTER K WITH CEDILLA '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX '\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS '\xf7' # 0xF7 -> DIVISION SIGN '\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE '\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS '\u0169' # 0xFD -> LATIN SMALL LETTER U WITH TILDE '\u016b' # 0xFE -> LATIN SMALL LETTER U WITH MACRON '\u02d9' # 0xFF -> DOT ABOVE ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
lgpl-3.0
mxrrow/zaicoin
src/deps/boost/tools/regression/xsl_reports/make_snapshot.py
29
5245
# Copyright (c) MetaCommunications, Inc. 2003-2007 # # Distributed under the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) import tarfile import shutil import time import os.path import string import sys import traceback def retry( f, args, max_attempts=5, sleep_secs=10 ): for attempts in range( max_attempts, -1, -1 ): try: return f( *args ) except Exception, msg: utils.log( '%s failed with message "%s"' % ( f.__name__, msg ) ) if attempts == 0: utils.log( 'Giving up.' ) raise utils.log( 'Retrying (%d more attempts).' % attempts ) time.sleep( sleep_secs ) def rmtree( path ): if os.path.exists( path ): if sys.platform == 'win32': os.system( 'del /f /s /q "%s" >nul 2>&1' % path ) shutil.rmtree( path ) else: os.system( 'rm -f -r "%s"' % path ) def svn_command( command ): utils.log( 'Executing SVN command "%s"' % command ) rc = os.system( command ) if rc != 0: raise Exception( 'SVN command "%s" failed with code %d' % ( command, rc ) ) def svn_export( sources_dir, user, tag ): if user is None or user == 'anonymous': command = 'svn export --force http://svn.boost.org/svn/boost/%s %s' % ( tag, sources_dir ) else: command = 'svn export --force --non-interactive --username=%s https://svn.boost.org/svn/boost/%s %s' \ % ( user, tag, sources_dir ) os.chdir( os.path.basename( sources_dir ) ) retry( svn_command , ( command, ) ) def make_tarball( working_dir , tag , user , site_dir ): timestamp = time.time() timestamp_suffix = time.strftime( '%y-%m-%d-%H%M', time.gmtime( timestamp ) ) tag_suffix = tag.split( '/' )[-1] sources_dir = os.path.join( working_dir , 'boost-%s-%s' % ( tag_suffix, timestamp_suffix ) ) if os.path.exists( sources_dir ): utils.log( 'Directory "%s" already exists, cleaning it up...' % sources_dir ) rmtree( sources_dir ) try: os.mkdir( sources_dir ) utils.log( 'Exporting files from SVN...' ) svn_export( sources_dir, user, tag ) except: utils.log( 'Cleaning up...' ) rmtree( sources_dir ) raise tarball_name = 'boost-%s.tar.bz2' % tag_suffix tarball_path = os.path.join( working_dir, tarball_name ) utils.log( 'Archiving "%s" to "%s"...' % ( sources_dir, tarball_path ) ) tar = tarfile.open( tarball_path, 'w|bz2' ) tar.posix = False # see http://tinyurl.com/4ebd8 tar.add( sources_dir, os.path.basename( sources_dir ) ) tar.close() tarball_timestamp_path = os.path.join( working_dir, 'boost-%s.timestamp' % tag_suffix ) utils.log( 'Writing timestamp into "%s"...' % tarball_timestamp_path ) timestamp_file = open( tarball_timestamp_path, 'w' ) timestamp_file.write( '%f' % timestamp ) timestamp_file.close() md5sum_path = os.path.join( working_dir, 'boost-%s.md5' % tag_suffix ) utils.log( 'Writing md5 checksum into "%s"...' % md5sum_path ) old_dir = os.getcwd() os.chdir( os.path.dirname( tarball_path ) ) os.system( 'md5sum -b "%s" >"%s"' % ( os.path.basename( tarball_path ), md5sum_path ) ) os.chdir( old_dir ) if site_dir is not None: utils.log( 'Moving "%s" to the site location "%s"...' % ( tarball_name, site_dir ) ) temp_site_dir = os.path.join( site_dir, 'temp' ) if not os.path.exists( temp_site_dir ): os.mkdir( temp_site_dir ) shutil.move( tarball_path, temp_site_dir ) shutil.move( os.path.join( temp_site_dir, tarball_name ), site_dir ) shutil.move( tarball_timestamp_path, site_dir ) shutil.move( md5sum_path, site_dir ) utils.log( 'Removing "%s"...' % sources_dir ) rmtree( sources_dir ) def accept_args( args ): args_spec = [ 'working-dir=' , 'tag=' , 'user=' , 'site-dir=' , 'mail=' , 'help' ] options = { '--tag': 'trunk' , '--user': None , '--site-dir': None } utils.accept_args( args_spec, args, options, usage ) return ( options[ '--working-dir' ] , options[ '--tag' ] , options[ '--user' ] , options[ '--site-dir' ] ) def usage(): print 'Usage: %s [options]' % os.path.basename( sys.argv[0] ) print ''' \t--working-dir working directory \t--tag snapshot tag (i.e. 'trunk') \t--user Boost SVN user ID (optional) \t--site-dir site directory to copy the snapshot to (optional) ''' def main(): make_tarball( *accept_args( sys.argv[ 1: ] ) ) if __name__ != '__main__': import utils else: # in absense of relative import... xsl_path = os.path.abspath( os.path.dirname( sys.argv[ 0 ] ) ) while os.path.basename( xsl_path ) != 'xsl_reports': xsl_path = os.path.dirname( xsl_path ) sys.path.append( xsl_path ) import utils main()
mit
jianlirong/incubator-hawq
tools/bin/ext/yaml/loader.py
671
1132
__all__ = ['BaseLoader', 'SafeLoader', 'Loader'] from reader import * from scanner import * from parser import * from composer import * from constructor import * from resolver import * class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): def __init__(self, stream): Reader.__init__(self, stream) Scanner.__init__(self) Parser.__init__(self) Composer.__init__(self) BaseConstructor.__init__(self) BaseResolver.__init__(self) class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): def __init__(self, stream): Reader.__init__(self, stream) Scanner.__init__(self) Parser.__init__(self) Composer.__init__(self) SafeConstructor.__init__(self) Resolver.__init__(self) class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): def __init__(self, stream): Reader.__init__(self, stream) Scanner.__init__(self) Parser.__init__(self) Composer.__init__(self) Constructor.__init__(self) Resolver.__init__(self)
apache-2.0
patmcb/odoo
addons/hr_timesheet_sheet/hr_timesheet_sheet.py
72
35950
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from datetime import datetime from dateutil.relativedelta import relativedelta from pytz import timezone import pytz from openerp.osv import fields, osv from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT from openerp.tools.translate import _ class hr_timesheet_sheet(osv.osv): _name = "hr_timesheet_sheet.sheet" _inherit = "mail.thread" _table = 'hr_timesheet_sheet_sheet' _order = "id desc" _description="Timesheet" def _total(self, cr, uid, ids, name, args, context=None): """ Compute the attendances, analytic lines timesheets and differences between them for all the days of a timesheet and the current day """ res = {} for sheet in self.browse(cr, uid, ids, context=context or {}): res.setdefault(sheet.id, { 'total_attendance': 0.0, 'total_timesheet': 0.0, 'total_difference': 0.0, }) for period in sheet.period_ids: res[sheet.id]['total_attendance'] += period.total_attendance res[sheet.id]['total_timesheet'] += period.total_timesheet res[sheet.id]['total_difference'] += period.total_attendance - period.total_timesheet return res def check_employee_attendance_state(self, cr, uid, sheet_id, context=None): ids_signin = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_in')]) ids_signout = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_out')]) if len(ids_signin) != len(ids_signout): raise osv.except_osv(('Warning!'),_('The timesheet cannot be validated as it does not contain an equal number of sign ins and sign outs.')) return True def copy(self, cr, uid, ids, *args, **argv): raise osv.except_osv(_('Error!'), _('You cannot duplicate a timesheet.')) def create(self, cr, uid, vals, context=None): if 'employee_id' in vals: if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id: raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link him/her to a user.')) if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).product_id: raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link the employee to a product, like \'Consultant\'.')) if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).journal_id: raise osv.except_osv(_('Configuration Error!'), _('In order to create a timesheet for this employee, you must assign an analytic journal to the employee, like \'Timesheet Journal\'.')) if vals.get('attendances_ids'): # If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context) return super(hr_timesheet_sheet, self).create(cr, uid, vals, context=context) def write(self, cr, uid, ids, vals, context=None): if 'employee_id' in vals: new_user_id = self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id.id or False if not new_user_id: raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link him/her to a user.')) if not self._sheet_date(cr, uid, ids, forced_user_id=new_user_id, context=context): raise osv.except_osv(_('Error!'), _('You cannot have 2 timesheets that overlap!\nYou should use the menu \'My Timesheet\' to avoid this problem.')) if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).product_id: raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link the employee to a product.')) if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).journal_id: raise osv.except_osv(_('Configuration Error!'), _('In order to create a timesheet for this employee, you must assign an analytic journal to the employee, like \'Timesheet Journal\'.')) if vals.get('attendances_ids'): # If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint # In addition to the date order, deleting attendances are done before inserting attendances vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context) res = super(hr_timesheet_sheet, self).write(cr, uid, ids, vals, context=context) if vals.get('attendances_ids'): for timesheet in self.browse(cr, uid, ids): if not self.pool['hr.attendance']._altern_si_so(cr, uid, [att.id for att in timesheet.attendances_ids]): raise osv.except_osv(_('Warning !'), _('Error ! Sign in (resp. Sign out) must follow Sign out (resp. Sign in)')) return res def sort_attendances(self, cr, uid, attendance_tuples, context=None): date_attendances = [] for att_tuple in attendance_tuples: if att_tuple[0] in [0,1,4]: if att_tuple[0] in [0,1]: if att_tuple[2] and att_tuple[2].has_key('name'): name = att_tuple[2]['name'] else: name = self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name else: name = self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name date_attendances.append((1, name, att_tuple)) elif att_tuple[0] in [2,3]: date_attendances.append((0, self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name, att_tuple)) else: date_attendances.append((0, False, att_tuple)) date_attendances.sort() return [att[2] for att in date_attendances] def button_confirm(self, cr, uid, ids, context=None): for sheet in self.browse(cr, uid, ids, context=context): if sheet.employee_id and sheet.employee_id.parent_id and sheet.employee_id.parent_id.user_id: self.message_subscribe_users(cr, uid, [sheet.id], user_ids=[sheet.employee_id.parent_id.user_id.id], context=context) self.check_employee_attendance_state(cr, uid, sheet.id, context=context) di = sheet.user_id.company_id.timesheet_max_difference if (abs(sheet.total_difference) < di) or not di: sheet.signal_workflow('confirm') else: raise osv.except_osv(_('Warning!'), _('Please verify that the total difference of the sheet is lower than %.2f.') %(di,)) return True def attendance_action_change(self, cr, uid, ids, context=None): hr_employee = self.pool.get('hr.employee') employee_ids = [] for sheet in self.browse(cr, uid, ids, context=context): if sheet.employee_id.id not in employee_ids: employee_ids.append(sheet.employee_id.id) return hr_employee.attendance_action_change(cr, uid, employee_ids, context=context) def _count_all(self, cr, uid, ids, field_name, arg, context=None): Timesheet = self.pool['hr.analytic.timesheet'] Attendance = self.pool['hr.attendance'] return { sheet_id: { 'timesheet_activity_count': Timesheet.search_count(cr,uid, [('sheet_id','=', sheet_id)], context=context), 'attendance_count': Attendance.search_count(cr,uid, [('sheet_id', '=', sheet_id)], context=context) } for sheet_id in ids } _columns = { 'name': fields.char('Note', select=1, states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}), 'employee_id': fields.many2one('hr.employee', 'Employee', required=True), 'user_id': fields.related('employee_id', 'user_id', type="many2one", relation="res.users", store=True, string="User", required=False, readonly=True),#fields.many2one('res.users', 'User', required=True, select=1, states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}), 'date_from': fields.date('Date from', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}), 'date_to': fields.date('Date to', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}), 'timesheet_ids' : fields.one2many('hr.analytic.timesheet', 'sheet_id', 'Timesheet lines', readonly=True, states={ 'draft': [('readonly', False)], 'new': [('readonly', False)]} ), 'attendances_ids' : fields.one2many('hr.attendance', 'sheet_id', 'Attendances'), 'state' : fields.selection([ ('new', 'New'), ('draft','Open'), ('confirm','Waiting Approval'), ('done','Approved')], 'Status', select=True, required=True, readonly=True, help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed timesheet. \ \n* The \'Confirmed\' status is used for to confirm the timesheet by user. \ \n* The \'Done\' status is used when users timesheet is accepted by his/her senior.'), 'state_attendance' : fields.related('employee_id', 'state', type='selection', selection=[('absent', 'Absent'), ('present', 'Present')], string='Current Status', readonly=True), 'total_attendance': fields.function(_total, method=True, string='Total Attendance', multi="_total"), 'total_timesheet': fields.function(_total, method=True, string='Total Timesheet', multi="_total"), 'total_difference': fields.function(_total, method=True, string='Difference', multi="_total"), 'period_ids': fields.one2many('hr_timesheet_sheet.sheet.day', 'sheet_id', 'Period', readonly=True), 'account_ids': fields.one2many('hr_timesheet_sheet.sheet.account', 'sheet_id', 'Analytic accounts', readonly=True), 'company_id': fields.many2one('res.company', 'Company'), 'department_id':fields.many2one('hr.department','Department'), 'timesheet_activity_count': fields.function(_count_all, type='integer', string='Timesheet Activities', multi=True), 'attendance_count': fields.function(_count_all, type='integer', string="Attendances", multi=True), } def _default_date_from(self, cr, uid, context=None): user = self.pool.get('res.users').browse(cr, uid, uid, context=context) r = user.company_id and user.company_id.timesheet_range or 'month' if r=='month': return time.strftime('%Y-%m-01') elif r=='week': return (datetime.today() + relativedelta(weekday=0, days=-6)).strftime('%Y-%m-%d') elif r=='year': return time.strftime('%Y-01-01') return fields.date.context_today(self, cr, uid, context) def _default_date_to(self, cr, uid, context=None): user = self.pool.get('res.users').browse(cr, uid, uid, context=context) r = user.company_id and user.company_id.timesheet_range or 'month' if r=='month': return (datetime.today() + relativedelta(months=+1,day=1,days=-1)).strftime('%Y-%m-%d') elif r=='week': return (datetime.today() + relativedelta(weekday=6)).strftime('%Y-%m-%d') elif r=='year': return time.strftime('%Y-12-31') return fields.date.context_today(self, cr, uid, context) def _default_employee(self, cr, uid, context=None): emp_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id','=',uid)], context=context) return emp_ids and emp_ids[0] or False _defaults = { 'date_from' : _default_date_from, 'date_to' : _default_date_to, 'state': 'new', 'employee_id': _default_employee, 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'hr_timesheet_sheet.sheet', context=c) } def _sheet_date(self, cr, uid, ids, forced_user_id=False, context=None): for sheet in self.browse(cr, uid, ids, context=context): new_user_id = forced_user_id or sheet.employee_id.user_id and sheet.employee_id.user_id.id if new_user_id: cr.execute('SELECT id \ FROM hr_timesheet_sheet_sheet \ WHERE (date_from <= %s and %s <= date_to) \ AND user_id=%s \ AND id <> %s',(sheet.date_to, sheet.date_from, new_user_id, sheet.id)) if cr.fetchall(): return False return True _constraints = [ (_sheet_date, 'You cannot have 2 timesheets that overlap!\nPlease use the menu \'My Current Timesheet\' to avoid this problem.', ['date_from','date_to']), ] def action_set_to_draft(self, cr, uid, ids, *args): self.write(cr, uid, ids, {'state': 'draft'}) self.create_workflow(cr, uid, ids) return True def name_get(self, cr, uid, ids, context=None): if not ids: return [] if isinstance(ids, (long, int)): ids = [ids] return [(r['id'], _('Week ')+datetime.strptime(r['date_from'], '%Y-%m-%d').strftime('%U')) \ for r in self.read(cr, uid, ids, ['date_from'], context=context, load='_classic_write')] def unlink(self, cr, uid, ids, context=None): sheets = self.read(cr, uid, ids, ['state','total_attendance'], context=context) for sheet in sheets: if sheet['state'] in ('confirm', 'done'): raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a timesheet which is already confirmed.')) elif sheet['total_attendance'] <> 0.00: raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a timesheet which have attendance entries.')) toremove = [] analytic_timesheet = self.pool.get('hr.analytic.timesheet') for sheet in self.browse(cr, uid, ids, context=context): for timesheet in sheet.timesheet_ids: toremove.append(timesheet.id) analytic_timesheet.unlink(cr, uid, toremove, context=context) return super(hr_timesheet_sheet, self).unlink(cr, uid, ids, context=context) def onchange_employee_id(self, cr, uid, ids, employee_id, context=None): department_id = False user_id = False if employee_id: empl_id = self.pool.get('hr.employee').browse(cr, uid, employee_id, context=context) department_id = empl_id.department_id.id user_id = empl_id.user_id.id return {'value': {'department_id': department_id, 'user_id': user_id,}} # ------------------------------------------------ # OpenChatter methods and notifications # ------------------------------------------------ def _needaction_domain_get(self, cr, uid, context=None): emp_obj = self.pool.get('hr.employee') empids = emp_obj.search(cr, uid, [('parent_id.user_id', '=', uid)], context=context) if not empids: return False dom = ['&', ('state', '=', 'confirm'), ('employee_id', 'in', empids)] return dom class account_analytic_line(osv.osv): _inherit = "account.analytic.line" def _get_default_date(self, cr, uid, context=None): if context is None: context = {} #get the default date (should be: today) res = super(account_analytic_line, self)._get_default_date(cr, uid, context=context) #if we got the dates from and to from the timesheet and if the default date is in between, we use the default #but if the default isn't included in those dates, we use the date start of the timesheet as default if context.get('timesheet_date_from') and context.get('timesheet_date_to'): if context['timesheet_date_from'] <= res <= context['timesheet_date_to']: return res return context.get('timesheet_date_from') #if we don't get the dates from the timesheet, we return the default value from super() return res class account_analytic_account(osv.osv): _inherit = "account.analytic.account" def name_create(self, cr, uid, name, context=None): if context is None: context = {} group_template_required = self.pool['res.users'].has_group(cr, uid, 'account_analytic_analysis.group_template_required') if not context.get('default_use_timesheets') or group_template_required: return super(account_analytic_account, self).name_create(cr, uid, name, context=context) rec_id = self.create(cr, uid, {self._rec_name: name}, context) return self.name_get(cr, uid, [rec_id], context)[0] class hr_timesheet_line(osv.osv): _inherit = "hr.analytic.timesheet" def _sheet(self, cursor, user, ids, name, args, context=None): sheet_obj = self.pool.get('hr_timesheet_sheet.sheet') res = {}.fromkeys(ids, False) for ts_line in self.browse(cursor, user, ids, context=context): sheet_ids = sheet_obj.search(cursor, user, [('date_to', '>=', ts_line.date), ('date_from', '<=', ts_line.date), ('employee_id.user_id', '=', ts_line.user_id.id), ('state', 'in', ['draft', 'new'])], context=context) if sheet_ids: # [0] because only one sheet possible for an employee between 2 dates res[ts_line.id] = sheet_obj.name_get(cursor, user, sheet_ids, context=context)[0] return res def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None): ts_line_ids = [] for ts in self.browse(cr, uid, ids, context=context): cr.execute(""" SELECT l.id FROM hr_analytic_timesheet l INNER JOIN account_analytic_line al ON (l.line_id = al.id) WHERE %(date_to)s >= al.date AND %(date_from)s <= al.date AND %(user_id)s = al.user_id GROUP BY l.id""", {'date_from': ts.date_from, 'date_to': ts.date_to, 'user_id': ts.employee_id.user_id.id,}) ts_line_ids.extend([row[0] for row in cr.fetchall()]) return ts_line_ids def _get_account_analytic_line(self, cr, uid, ids, context=None): ts_line_ids = self.pool.get('hr.analytic.timesheet').search(cr, uid, [('line_id', 'in', ids)]) return ts_line_ids _columns = { 'sheet_id': fields.function(_sheet, string='Sheet', select="1", type='many2one', relation='hr_timesheet_sheet.sheet', ondelete="cascade", store={ 'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10), 'account.analytic.line': (_get_account_analytic_line, ['user_id', 'date'], 10), 'hr.analytic.timesheet': (lambda self,cr,uid,ids,context=None: ids, None, 10), }, ), } def write(self, cr, uid, ids, values, context=None): if isinstance(ids, (int, long)): ids = [ids] self._check(cr, uid, ids) return super(hr_timesheet_line, self).write(cr, uid, ids, values, context=context) def unlink(self, cr, uid, ids, *args, **kwargs): if isinstance(ids, (int, long)): ids = [ids] self._check(cr, uid, ids) return super(hr_timesheet_line,self).unlink(cr, uid, ids,*args, **kwargs) def _check(self, cr, uid, ids): for att in self.browse(cr, uid, ids): if att.sheet_id and att.sheet_id.state not in ('draft', 'new'): raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet.')) return True def multi_on_change_account_id(self, cr, uid, ids, account_ids, context=None): return dict([(el, self.on_change_account_id(cr, uid, ids, el, context.get('user_id', uid))) for el in account_ids]) class hr_attendance(osv.osv): _inherit = "hr.attendance" def _get_default_date(self, cr, uid, context=None): if context is None: context = {} if 'name' in context: return context['name'] + time.strftime(' %H:%M:%S') return time.strftime('%Y-%m-%d %H:%M:%S') def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None): attendance_ids = [] for ts in self.browse(cr, uid, ids, context=context): cr.execute(""" SELECT a.id FROM hr_attendance a INNER JOIN hr_employee e INNER JOIN resource_resource r ON (e.resource_id = r.id) ON (a.employee_id = e.id) LEFT JOIN res_users u ON r.user_id = u.id LEFT JOIN res_partner p ON u.partner_id = p.id WHERE %(date_to)s >= date_trunc('day', a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC')) AND %(date_from)s <= date_trunc('day', a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC')) AND %(user_id)s = r.user_id GROUP BY a.id""", {'date_from': ts.date_from, 'date_to': ts.date_to, 'user_id': ts.employee_id.user_id.id,}) attendance_ids.extend([row[0] for row in cr.fetchall()]) return attendance_ids def _get_attendance_employee_tz(self, cr, uid, employee_id, date, context=None): """ Simulate timesheet in employee timezone Return the attendance date in string format in the employee tz converted from utc timezone as we consider date of employee timesheet is in employee timezone """ employee_obj = self.pool['hr.employee'] tz = False if employee_id: employee = employee_obj.browse(cr, uid, employee_id, context=context) tz = employee.user_id.partner_id.tz if not date: date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT) att_tz = timezone(tz or 'utc') attendance_dt = datetime.strptime(date, DEFAULT_SERVER_DATETIME_FORMAT) att_tz_dt = pytz.utc.localize(attendance_dt) att_tz_dt = att_tz_dt.astimezone(att_tz) # We take only the date omiting the hours as we compare with timesheet # date_from which is a date format thus using hours would lead to # be out of scope of timesheet att_tz_date_str = datetime.strftime(att_tz_dt, DEFAULT_SERVER_DATE_FORMAT) return att_tz_date_str def _get_current_sheet(self, cr, uid, employee_id, date=False, context=None): sheet_obj = self.pool['hr_timesheet_sheet.sheet'] if not date: date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT) att_tz_date_str = self._get_attendance_employee_tz( cr, uid, employee_id, date=date, context=context) sheet_ids = sheet_obj.search(cr, uid, [('date_from', '<=', att_tz_date_str), ('date_to', '>=', att_tz_date_str), ('employee_id', '=', employee_id)], limit=1, context=context) return sheet_ids and sheet_ids[0] or False def _sheet(self, cursor, user, ids, name, args, context=None): res = {}.fromkeys(ids, False) for attendance in self.browse(cursor, user, ids, context=context): res[attendance.id] = self._get_current_sheet( cursor, user, attendance.employee_id.id, attendance.name, context=context) return res _columns = { 'sheet_id': fields.function(_sheet, string='Sheet', type='many2one', relation='hr_timesheet_sheet.sheet', store={ 'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10), 'hr.attendance': (lambda self,cr,uid,ids,context=None: ids, ['employee_id', 'name', 'day'], 10), }, ) } _defaults = { 'name': _get_default_date, } def create(self, cr, uid, vals, context=None): if context is None: context = {} sheet_id = context.get('sheet_id') or self._get_current_sheet(cr, uid, vals.get('employee_id'), vals.get('name'), context=context) if sheet_id: att_tz_date_str = self._get_attendance_employee_tz( cr, uid, vals.get('employee_id'), date=vals.get('name'), context=context) ts = self.pool.get('hr_timesheet_sheet.sheet').browse(cr, uid, sheet_id, context=context) if ts.state not in ('draft', 'new'): raise osv.except_osv(_('Error!'), _('You can not enter an attendance in a submitted timesheet. Ask your manager to reset it before adding attendance.')) elif ts.date_from > att_tz_date_str or ts.date_to < att_tz_date_str: raise osv.except_osv(_('User Error!'), _('You can not enter an attendance date outside the current timesheet dates.')) return super(hr_attendance,self).create(cr, uid, vals, context=context) def unlink(self, cr, uid, ids, *args, **kwargs): if isinstance(ids, (int, long)): ids = [ids] self._check(cr, uid, ids) return super(hr_attendance,self).unlink(cr, uid, ids,*args, **kwargs) def write(self, cr, uid, ids, vals, context=None): if context is None: context = {} if isinstance(ids, (int, long)): ids = [ids] self._check(cr, uid, ids) res = super(hr_attendance,self).write(cr, uid, ids, vals, context=context) if 'sheet_id' in context: for attendance in self.browse(cr, uid, ids, context=context): if context['sheet_id'] != attendance.sheet_id.id: raise osv.except_osv(_('User Error!'), _('You cannot enter an attendance ' \ 'date outside the current timesheet dates.')) return res def _check(self, cr, uid, ids): for att in self.browse(cr, uid, ids): if att.sheet_id and att.sheet_id.state not in ('draft', 'new'): raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet')) return True class hr_timesheet_sheet_sheet_day(osv.osv): _name = "hr_timesheet_sheet.sheet.day" _description = "Timesheets by Period" _auto = False _order='name' _columns = { 'name': fields.date('Date', readonly=True), 'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True, select="1"), 'total_timesheet': fields.float('Total Timesheet', readonly=True), 'total_attendance': fields.float('Attendance', readonly=True), 'total_difference': fields.float('Difference', readonly=True), } _depends = { 'account.analytic.line': ['date', 'unit_amount'], 'hr.analytic.timesheet': ['line_id', 'sheet_id'], 'hr.attendance': ['action', 'name', 'sheet_id'], } def init(self, cr): cr.execute("""create or replace view hr_timesheet_sheet_sheet_day as SELECT id, name, sheet_id, total_timesheet, total_attendance, cast(round(cast(total_attendance - total_timesheet as Numeric),2) as Double Precision) AS total_difference FROM (( SELECT MAX(id) as id, name, sheet_id, timezone, SUM(total_timesheet) as total_timesheet, CASE WHEN SUM(orphan_attendances) != 0 THEN (SUM(total_attendance) + CASE WHEN current_date <> name THEN 1440 ELSE (EXTRACT(hour FROM current_time AT TIME ZONE 'UTC' AT TIME ZONE coalesce(timezone, 'UTC')) * 60) + EXTRACT(minute FROM current_time AT TIME ZONE 'UTC' AT TIME ZONE coalesce(timezone, 'UTC')) END ) ELSE SUM(total_attendance) END /60 as total_attendance FROM (( select min(hrt.id) as id, p.tz as timezone, l.date::date as name, s.id as sheet_id, sum(l.unit_amount) as total_timesheet, 0 as orphan_attendances, 0.0 as total_attendance from hr_analytic_timesheet hrt JOIN account_analytic_line l ON l.id = hrt.line_id LEFT JOIN hr_timesheet_sheet_sheet s ON s.id = hrt.sheet_id JOIN hr_employee e ON s.employee_id = e.id JOIN resource_resource r ON e.resource_id = r.id LEFT JOIN res_users u ON r.user_id = u.id LEFT JOIN res_partner p ON u.partner_id = p.id group by l.date::date, s.id, timezone ) union ( select -min(a.id) as id, p.tz as timezone, (a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC'))::date as name, s.id as sheet_id, 0.0 as total_timesheet, SUM(CASE WHEN a.action = 'sign_in' THEN -1 ELSE 1 END) as orphan_attendances, SUM(((EXTRACT(hour FROM (a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC'))) * 60) + EXTRACT(minute FROM (a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC')))) * (CASE WHEN a.action = 'sign_in' THEN -1 ELSE 1 END)) as total_attendance from hr_attendance a LEFT JOIN hr_timesheet_sheet_sheet s ON s.id = a.sheet_id JOIN hr_employee e ON a.employee_id = e.id JOIN resource_resource r ON e.resource_id = r.id LEFT JOIN res_users u ON r.user_id = u.id LEFT JOIN res_partner p ON u.partner_id = p.id WHERE action in ('sign_in', 'sign_out') group by (a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC'))::date, s.id, timezone )) AS foo GROUP BY name, sheet_id, timezone )) AS bar""") class hr_timesheet_sheet_sheet_account(osv.osv): _name = "hr_timesheet_sheet.sheet.account" _description = "Timesheets by Period" _auto = False _order='name' _columns = { 'name': fields.many2one('account.analytic.account', 'Project / Analytic Account', readonly=True), 'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True), 'total': fields.float('Total Time', digits=(16,2), readonly=True), 'invoice_rate': fields.many2one('hr_timesheet_invoice.factor', 'Invoice rate', readonly=True), } _depends = { 'account.analytic.line': ['account_id', 'date', 'to_invoice', 'unit_amount', 'user_id'], 'hr.analytic.timesheet': ['line_id'], 'hr_timesheet_sheet.sheet': ['date_from', 'date_to', 'user_id'], } def init(self, cr): cr.execute("""create or replace view hr_timesheet_sheet_sheet_account as ( select min(hrt.id) as id, l.account_id as name, s.id as sheet_id, sum(l.unit_amount) as total, l.to_invoice as invoice_rate from hr_analytic_timesheet hrt left join (account_analytic_line l LEFT JOIN hr_timesheet_sheet_sheet s ON (s.date_to >= l.date AND s.date_from <= l.date AND s.user_id = l.user_id)) on (l.id = hrt.line_id) group by l.account_id, s.id, l.to_invoice )""") class res_company(osv.osv): _inherit = 'res.company' _columns = { 'timesheet_range': fields.selection( [('day','Day'),('week','Week'),('month','Month')], 'Timesheet range', help="Periodicity on which you validate your timesheets."), 'timesheet_max_difference': fields.float('Timesheet allowed difference(Hours)', help="Allowed difference in hours between the sign in/out and the timesheet " \ "computation for one sheet. Set this to 0 if you do not want any control."), } _defaults = { 'timesheet_range': lambda *args: 'week', 'timesheet_max_difference': lambda *args: 0.0 } class hr_employee(osv.osv): ''' Employee ''' _inherit = 'hr.employee' _description = 'Employee' def _timesheet_count(self, cr, uid, ids, field_name, arg, context=None): Sheet = self.pool['hr_timesheet_sheet.sheet'] return { employee_id: Sheet.search_count(cr,uid, [('employee_id', '=', employee_id)], context=context) for employee_id in ids } _columns = { 'timesheet_count': fields.function(_timesheet_count, type='integer', string='Timesheets'), } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
wso2/product-private-paas
components/org.wso2.ppaas.python.cartridge.agent/src/main/python/cartridge.agent/cartridge.agent/entity.py
9
21198
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import constants import json class Topology: """ Represents the topology provided by the Cloud Controller """ def __init__(self): self.service_map = {} """ :type : dict[str, Service] """ self.initialized = False """ :type : bool """ self.json_str = None """ :type : str """ def get_services(self): """ Provides the list of services on the topology :return: The list of Service objects :rtype: list[Service] """ return self.service_map.values() def get_service(self, service_name): """ Provides the service information for the given service name :param str service_name: service name to be retrieved :return: Service object of the service, None if the provided service name is invalid :rtype: Service """ if service_name in self.service_map: return self.service_map[service_name] return None def add_service(self, service): """ Adds a service to the list of services on the topology :param Service service: :return: void """ self.service_map[service.service_name] = service def add_services(self, services): """ :param list[Service] services: :return: void """ for service in services: self.add_service(service) def remove_service(self, service_name): """ Removes the service of the provided service name :param str service_name: :return: void """ if service_name in self.service_map: self.service_map.pop(service_name) def service_exists(self, service_name): """ Checks if the service of the provided service name exists :param str service_name: :return: True if the service exists, False if otherwise :rtype: bool """ return service_name in self.service_map def clear(self): """ Clears the service information list :return: void """ self.service_map = {} def __str__(self): """ to string override :return: """ return "Topology [serviceMap= %r , initialized= %r ]" % (self.service_map, self.initialized) class Service: """ Represents a service on the topology """ def __init__(self, service_name, service_type): self.service_name = service_name """ :type : str """ self.service_type = service_type """ :type : str """ self.cluster_id_cluster_map = {} """ :type : dict[str, Cluster] """ self.port_map = {} """ :type : dict[str, Port] """ self.properties = {} """ :type : dict[str, str] """ def get_clusters(self): """ Provides the list of clusters in the particular service :return: The list of Cluster objects :rtype: list[Cluster] """ return self.cluster_id_cluster_map.values() def add_cluster(self, cluster): """ Adds a cluster to the service :param Cluster cluster: the cluster to be added :return: void """ self.cluster_id_cluster_map[cluster.cluster_id] = cluster def remove_cluster(self, cluster_id): if cluster_id in self.cluster_id_cluster_map: self.cluster_id_cluster_map.pop(cluster_id) def cluster_exists(self, cluster_id): """ Checks if the cluster with the given cluster id exists for ther service :param str cluster_id: :return: True if the cluster for the given cluster id exists, False if otherwise :rtype: bool """ return cluster_id in self.cluster_id_cluster_map def get_cluster(self, cluster_id): """ Provides the Cluster information for the provided cluster id :param str cluster_id: the cluster id to search for :return: Cluster object for the given cluster id, None if the cluster id is invalid :rtype: Cluster """ if cluster_id in self.cluster_id_cluster_map: return self.cluster_id_cluster_map[cluster_id] return None def get_ports(self): """ Returns the list of ports in the particular service :return: The list of Port object :rtype: list[Port] """ return self.port_map.values() def get_port(self, proxy_port): """ Provides the port information for the provided proxy port :param str proxy_port: :return: Port object for the provided port, None if port is invalid :rtype: Port """ if proxy_port in self.port_map: return self.port_map[proxy_port] return None def add_port(self, port): self.port_map[port.proxy] = port def add_ports(self, ports): for port in ports: self.add_port(port) class Cluster: """ Represents a cluster for a service """ def __init__(self, service_name="", cluster_id="", deployment_policy_name="", autoscale_policy_name=""): self.service_name = service_name """ :type : str """ self.cluster_id = cluster_id """ :type : str """ self.deployment_policy_name = deployment_policy_name """ :type : str """ self.autoscale_policy_name = autoscale_policy_name """ :type : str """ self.hostnames = [] """ :type : list[str] """ self.member_map = {} """ :type : dict[str, Member] """ self.tenant_range = None """ :type : str """ self.is_lb_cluster = False """ :type : bool """ self.is_kubernetes_cluster = False """ :type : bool """ # self.status = None # """ :type : str """ self.load_balancer_algorithm_name = None """ :type : str """ self.properties = {} """ :type : dict[str, str] """ self.member_list_json = None """ :type : str """ self.app_id = "" """ :type : str """ self.kubernetesService_map = {} """ :type : dict[str, KubernetesService] """ # Not relevant to cartridge agent # self.instance_id_instance_context_map = {} # """ :type : dict[str, ClusterInstance] """ def add_hostname(self, hostname): self.hostnames.append(hostname) def set_tenant_range(self, tenant_range): Cluster.validate_tenant_range(tenant_range) self.tenant_range = tenant_range def get_members(self): """ Provides the list of member information in the cluster :return: The list of Member object :rtype: list[Member] """ return self.member_map.values() def get_kubernetesServices(self): """ Provides the list of kubernetes Services in the cluster :return: The list of KubernetesService object :rtype: list[KubernetesService] """ return self.kubernetesService_map.values() def add_kubernetesService(self, kubernetesService): self.kubernetesService_map[kubernetesService.id] = kubernetesService def add_member(self, member): self.member_map[member.member_id] = member def remove_member(self, member_id): if self.member_exists(member_id): self.member_map.pop(member_id) def get_member(self, member_id): """ Provides the member information for the provided member id :param str member_id: :return: Member object for the provided member id, None if member id is invalid :rtype: Member """ if self.member_exists(member_id): return self.member_map[member_id] return None def member_exists(self, member_id): """ Checks if the member for the provided member id exists in this cluster :param str member_id: member id to be searched :return: True if the member exists, False if otherwise :rtype: bool """ return member_id in self.member_map def __str__(self): return "Cluster [serviceName=" + self.service_name + ", clusterId=" + self.cluster_id \ + ", autoscalePolicyName=" + self.autoscale_policy_name + ", deploymentPolicyName=" \ + self.deployment_policy_name + ", hostNames=" + self.hostnames + ", tenantRange=" + self.tenant_range \ + ", isLbCluster=" + self.is_lb_cluster + ", properties=" + self.properties + "]" def tenant_id_in_range(self, tenant_id): """ Check whether a given tenant id is in tenant range of the cluster. :param str tenant_id: tenant id to be checked :return: True if the tenant id is in tenant id range, False if otherwise :rtype: bool """ if self.tenant_range is None: return False if self.tenant_range == "*": return True else: arr = self.tenant_range.split(constants.TENANT_RANGE_DELIMITER) tenant_start = int(arr[0]) if tenant_start <= tenant_id: tenant_end = arr[1] if tenant_end == "*": return True else: if tenant_id <= int(tenant_end): return True return False @staticmethod def validate_tenant_range(tenant_range): """ Validates the tenant range to be either '*' or a delimeted range of numbers :param str tenant_range: The tenant range string to be validated :return: void if the provided tenant range is valid, RuntimeError if otherwise :exception: RuntimeError if the tenant range is invalid """ valid = False if tenant_range == "*": valid = True else: arr = tenant_range.split(constants.TENANT_RANGE_DELIMITER) if len(arr) == 2: if arr[0].isdigit() and arr[1].isdigit(): valid = True elif arr[0].isdigit() and arr[1] == "*": valid = True if not valid: raise RuntimeError("Tenant range %r is not valid" % tenant_range) class Member: """ Represents a member on a particular cluster """ def __init__(self, service_name="", cluster_id="", network_partition_id="", partition_id="", member_id="", cluster_instance_id=""): self.service_name = service_name """ :type : str """ self.cluster_id = cluster_id """ :type : str """ self.network_partition_id = network_partition_id """ :type : str """ self.cluster_instance_id = cluster_instance_id """ :type : str """ self.partition_id = partition_id """ :type : str """ self.member_id = member_id """ :type : str """ self.port_map = {} """ :type : dict[str, Port] """ self.init_time = None """ :type : int """ self.member_public_ips = None """ :type : str """ self.member_default_public_ip = None """ :type : str """ self.status = None """ :type : str """ self.member_private_ips = None """ :type : str """ self.member_default_private_ip = None """ :type : str """ self.properties = {} """ :type : dict[str, str] """ self.lb_cluster_id = None """ :type : str """ self.json_str = None """ :type : str """ def is_active(self): """ Checks if the member is in active state :return: True if active, False if otherwise :rtype: bool """ return self.status == MemberStatus.Active def get_ports(self): """ Provides the list of the ports in the member :return: List of Port objects :rtype: list[Port] """ return self.port_map.values() def get_port(self, proxy): """ Provides the port information for the given port id :param str proxy: The port id :return: Port object of the provided port id, None if otherwise :rtype: Port """ if proxy in self.port_map: return self.port_map[proxy] return None def add_port(self, port): self.port_map[port.proxy] = port def add_ports(self, ports): for port in ports: self.add_port(port) def to_json(self): return "{memberId: " + self.member_id + ", status: " + self.status + "}" class KubernetesService: """ Represents a kubernetes service on a particular cluster """ def __init__(self, id, portalIP, protocol, port, containerPort, serviceType, portName): self.id = id """ :type : str """ self.portalIP = portalIP """ :type : str """ self.protocol = protocol """ :type : str """ self.port = port """ :type : str """ self.containerPort = containerPort """ :type : str """ self.serviceType = serviceType """ :type : str """ self.portName = portName """ :type : str """ self.publicIPs = [] """ :type : list[str] """ def add_public_ips(self, public_ip): self.publicIPs.append(public_ip) class Port: """ Represents a port on a particular member """ def __init__(self, protocol, value, proxy): self.protocol = protocol """ :type : str """ self.value = value """ :type : str """ self.proxy = proxy """ :type : str """ def __str__(self): return "Port [protocol=%r, value=%r proxy=%r]" % (self.protocol, self.value, self.proxy) class ServiceType: """ ServiceType enum """ SingleTenant = 1 MultiTenant = 2 class ClusterStatus: """ ClusterStatus enum """ Created = 1 In_Maintenance = 2 Removed = 3 class MemberStatus: """ MemberStatus enum """ Created = "Created" Initialized = "Initialized" Starting = "Starting" Active = "Active" In_Maintenance = "In_Maintenance" ReadyToShutDown = "ReadyToShutDown" Suspended = "Suspended" Terminated = "Terminated" class TopologyContext: """ Handles and maintains a model of the topology provided by the Cloud Controller """ topology = Topology() initialized = False @staticmethod def get_topology(): if TopologyContext.topology is None: TopologyContext.topology = Topology() return TopologyContext.topology @staticmethod def update(topology): TopologyContext.topology = topology class Tenant: """ Object type representing the tenant details of a single tenant """ def __init__(self, tenant_id, tenant_domain): self.tenant_id = tenant_id """ :type : int """ self.tenant_domain = tenant_domain """ :type : str """ self.service_name_subscription_map = {} """ :type : dict[str, Subscription] """ def get_subscription(self, service_name): """ Returns the Subscription object related to the provided service name :param str service_name: service name to be retrieved :return: Subscription of the service or None if the service name doesn't exist :rtype: Subscription """ if service_name in self.service_name_subscription_map: return self.service_name_subscription_map[service_name] return None def is_subscribed(self, service_name): """ Checks if the given service name has a subscription from this tenant :param str service_name: name of the service to check :return: True if the tenant is subscribed to the given service name, False if not :rtype: bool """ return service_name in self.service_name_subscription_map def add_subscription(self, subscription): """ Adds a subscription information entry on the subscription list for this tenant :param Subscription subscription: Subscription information to be added :return: void :rtype: void """ self.service_name_subscription_map[subscription.service_name] = subscription def remove_subscription(self, service_name): """ Removes the specified subscription details from the subscription list :param str service_name: The service name of the subscription to be removed :return: void :rtype: void """ if service_name in self.service_name_subscription_map: self.service_name_subscription_map.pop(service_name) class Subscription: """ Subscription information of a particular subscription to a service """ def __init__(self, service_name, cluster_ids): self.service_name = service_name """ :type : str """ self.cluster_ids = cluster_ids """ :type : list[str] """ self.subscription_domain_map = {} """ :type : dict[str, SubscriptionDomain] """ def add_subscription_domain(self, domain_name, application_context): """ Adds a subscription domain :param str domain_name: :param str application_context: :return: void :rtype: void """ self.subscription_domain_map[domain_name] = SubscriptionDomain(domain_name, application_context) def remove_subscription_domain(self, domain_name): """ Removes the subscription domain of the specified domain name :param str domain_name: :return: void :rtype: void """ if domain_name in self.subscription_domain_map: self.subscription_domain_map.pop(domain_name) def subscription_domain_exists(self, domain_name): """ Returns the SubscriptionDomain information of the specified domain name :param str domain_name: :return: SubscriptionDomain :rtype: SubscriptionDomain """ return domain_name in self.subscription_domain_map def get_subscription_domains(self): """ Returns the list of subscription domains of this subscription :return: List of SubscriptionDomain objects :rtype: list[SubscriptionDomain] """ return self.subscription_domain_map.values() class SubscriptionDomain: """ Represents a Subscription Domain """ def __init__(self, domain_name, application_context): self.domain_name = domain_name """ :type : str """ self.application_context = application_context """ :type : str """ class TenantContext: """ Handles and maintains a model of all the information related to tenants within this instance """ tenants = {} initialized = False tenant_domains = {"carbon.super": Tenant(-1234, "carbon.super")} @staticmethod def add_tenant(tenant): TenantContext.tenants[tenant.tenant_id] = tenant TenantContext.tenant_domains[tenant.tenant_domain] = tenant @staticmethod def remove_tenant(tenant_id): if tenant_id in TenantContext.tenants: tenant = TenantContext.get_tenant(tenant_id) TenantContext.tenants.pop(tenant.tenant_id) TenantContext.tenant_domains.pop(tenant.tenant_domain) @staticmethod def update(tenants): for tenant in tenants: TenantContext.add_tenant(tenant) @staticmethod def get_tenant(tenant_id): """ Gets the Tenant object of the provided tenant ID :param int tenant_id: :return: Tenant object of the provided tenant ID :rtype: Tenant """ if tenant_id in TenantContext.tenants: return TenantContext.tenants[tenant_id] return None @staticmethod def get_tenant_by_domain(tenant_domain): """ Gets the Tenant object of the provided tenant domain :param str tenant_domain: :return: Tenant object of the provided tenant domain :rtype: str """ if tenant_domain in TenantContext.tenant_domains: return TenantContext.tenant_domains[tenant_domain] return None
apache-2.0
namccart/gnuradio
gr-filter/examples/gr_filtdes_callback.py
47
1653
#!/usr/bin/env python # # Copyright 2012 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio.filter import filter_design import sys try: from PyQt4 import Qt, QtCore, QtGui except ImportError: print "Please install PyQt4 to run this script (http://www.riverbankcomputing.co.uk/software/pyqt/download)" raise SystemExit, 1 ''' Callback example Function called when "design" button is pressed or pole-zero plot is changed launch function returns gr_filter_design mainwindow object when callback is not None ''' def print_params(filtobj): print "Filter Count:", filtobj.get_filtercount() print "Filter type:", filtobj.get_restype() print "Filter params", filtobj.get_params() print "Filter Coefficients", filtobj.get_taps() app = Qt.QApplication(sys.argv) #launch function returns gr_filter_design mainwindow object main_win = filter_design.launch(sys.argv, print_params) main_win.show() app.exec_()
gpl-3.0
antivirtel/Flexget
tests/test_thetvdb.py
1
14284
from __future__ import unicode_literals, division, absolute_import import re from datetime import datetime, timedelta import mock import pytest from flexget.manager import Session from flexget.plugins.api_tvdb import persist, TVDBSearchResult, lookup_series, mark_expired, TVDBRequest, TVDBEpisode from flexget.plugins.input.thetvdb_favorites import TVDBUserFavorite @mock.patch('flexget.plugins.api_tvdb.mark_expired') @pytest.mark.online class TestTVDBLookup(object): config = """ templates: global: thetvdb_lookup: yes # Access a tvdb field to cause lazy loading to occur set: afield: "{{ tvdb_id }}{{ tvdb_ep_name }}" tasks: test: mock: - {title: 'House.S01E02.HDTV.XViD-FlexGet'} - {title: 'Breaking.Bad.S01E02.HDTV.XViD-FlexGet'} - {title: 'Doctor.Who.2005.S02E03.PDTV.XViD-FlexGet'} series: - House - Doctor Who 2005 test_search_cache: mock: - {title: 'House.S01E02.HDTV.XViD-FlexGet'} series: - House test_unknown_series: mock: - {title: 'Aoeu.Htns.S01E01.htvd'} series: - Aoeu Htns test_date: mock: - title: the daily show 2012-6-6 series: - the daily show (with jon stewart) test_absolute: mock: - title: naruto 128 series: - naruto test_no_poster_actors: mock: - {title: 'Sex.House.S01E02.HDTV.XViD-FlexGet'} series: - Sex House - The Blacklist """ def test_lookup(self, mocked_expired, execute_task): """thetvdb: Test Lookup (ONLINE)""" persist['auth_tokens'] = {'default': None} task = execute_task('test') assert task.find_entry(tvdb_ep_name='School Reunion'), 'Failed imdb lookup Doctor Who 2005 S02E03' entry = task.find_entry(title='House.S01E02.HDTV.XViD-FlexGet') assert entry['tvdb_id'] == 73255 assert entry['tvdb_absolute_number'] == 3 assert entry['tvdb_rating'] == 9.1 assert entry['tvdb_runtime'] == 45 assert entry['tvdb_season'] == 1 assert entry['tvdb_series_name'] == 'House' assert entry['tvdb_status'] == 'Ended' assert entry['tvdb_air_time'] == '' assert entry['tvdb_airs_day_of_week'] == '' assert re.match('http://thetvdb.com/banners/graphical/73255-g[0-9]+.jpg', entry['tvdb_banner']) assert 'http://thetvdb.com/banners/posters/73255-1.jpg' in entry['tvdb_posters'] assert entry['tvdb_content_rating'] == 'TV-14' assert entry['tvdb_episode'] == 2 assert entry['tvdb_first_air_date'] == datetime(2004, 11, 16, 0, 0) assert entry['tvdb_network'] == 'FOX (US)' assert entry['tvdb_genres'] == ['Drama', 'Mystery'] assert 'Jesse Spencer' in entry['tvdb_actors'] assert entry['tvdb_overview'] == 'Go deeper into the medical mysteries of House, TV\'s most compelling ' \ 'drama. Hugh Laurie stars as the brilliant but sarcastic Dr. Gregory' \ ' House, a maverick physician who is devoid of bedside manner. While' \ ' his behavior can border on antisocial, Dr. House thrives on the' \ ' challenge of solving the medical puzzles that other doctors give up on.' \ ' Together with his hand-picked team of young medical experts, he\'ll' \ ' do whatever it takes in the race against the clock to solve the case.' assert entry['tvdb_ep_air_date'] == datetime(2004, 11, 23, 0, 0) assert entry['tvdb_ep_directors'] == 'Peter O\'Fallon' assert entry['tvdb_ep_id'] == 'S01E02' assert entry['tvdb_ep_image'] == 'http://thetvdb.com/banners/episodes/73255/110995.jpg' assert entry['tvdb_ep_name'] == 'Paternity' assert entry['tvdb_ep_overview'] == 'When a teenage lacrosse player is stricken with an unidentifiable brain ' \ 'disease, Dr. House and the team hustle to give his parents answers. ' \ 'Chase breaks the bad news, the kid has MS, but the boy\'s night-terror' \ ' hallucinations disprove the diagnosis and send House and his team back ' \ 'to square one. As the boy\'s health deteriorates. House\'s side-bet on ' \ 'the paternity of the patient infuriates Dr. Cuddy and the teenager\'s ' \ 'parents, but may just pay off in spades.' assert entry['tvdb_ep_rating'] == 7.8 def test_no_posters_actors(self, mocked_expired, execute_task): task = execute_task('test_no_poster_actors') entry = task.find_entry(tvdb_series_name='Sex House') assert entry['tvdb_posters'] == [] assert entry['tvdb_actors'] == [] def test_cache(self, mocked_expired, execute_task): persist['auth_tokens'] = {'default': None} task = execute_task('test_search_cache') entry = task.find_entry(tvdb_id=73255) # Force tvdb lazy eval assert entry['afield'] with Session() as session: # Ensure search cache was added search_results = session.query(TVDBSearchResult).all() assert len(search_results) == 3 aliases = ['house', 'house m.d.', 'house md'] for search_result in search_results: assert search_result.series assert search_result.search in aliases # No requests should be sent as we restore from cache with mock.patch('requests.sessions.Session.request', side_effect=Exception('TVDB should restore from cache')) as _: lookup_series('house m.d.', session=session) def test_unknown_series(self, mocked_expired, execute_task): persist['auth_tokens'] = {'default': None} # Test an unknown series does not cause any exceptions task = execute_task('test_unknown_series') # Make sure it didn't make a false match entry = task.find_entry('accepted', title='Aoeu.Htns.S01E01.htvd') assert entry.get('tvdb_id') is None, 'should not have populated tvdb data' def test_absolute(self, mocked_expired, execute_task): persist['auth_tokens'] = {'default': None} task = execute_task('test_absolute') entry = task.find_entry(title='naruto 128') assert entry assert entry['tvdb_ep_name'] == 'A Cry on Deaf Ears' @pytest.mark.online class TestTVDBExpire(object): config = """ templates: global: thetvdb_lookup: yes # Access a tvdb field to cause lazy loading to occur set: afield: "{{ tvdb_id }}{{ tvdb_ep_name }}" tasks: test_mark_expired: mock: - {title: 'House.S02E02.hdtv'} metainfo_series: yes accept_all: yes disable: [seen] """ def test_expire_no_check(self, execute_task): persist['auth_tokens'] = {'default': None} def test_run(): # Run the task and check tvdb data was populated. task = execute_task('test_mark_expired') entry = task.find_entry(title='House.S02E02.hdtv') assert entry['tvdb_ep_name'] == 'Autopsy' # Run the task once, this populates data from tvdb test_run() # Should not expire as it was checked less then an hour ago persist['last_check'] = datetime.utcnow() - timedelta(hours=1) with mock.patch('requests.sessions.Session.request', side_effect=Exception('Tried to expire or lookup, less then an hour since last check')) as _: # Ensure series is not marked as expired mark_expired() with Session() as session: ep = session.query(TVDBEpisode)\ .filter(TVDBEpisode.series_id == 73255)\ .filter(TVDBEpisode.episode_number == 2)\ .filter(TVDBEpisode.season_number == 2)\ .first() assert not ep.expired assert not ep.series.expired def test_expire_check(self, execute_task): persist['auth_tokens'] = {'default': None} def test_run(): # Run the task and check tvdb data was populated. task = execute_task('test_mark_expired') entry = task.find_entry(title='House.S02E02.hdtv') assert entry['tvdb_ep_name'] == 'Autopsy' # Run the task once, this populates data from tvdb test_run() # Should expire persist['last_check'] = datetime.utcnow() - timedelta(hours=3) expired_data = [ { "id": 73255, "lastUpdated": 1458186055 }, { "id": 295743, "lastUpdated": 1458186088 } ] # Ensure series is marked as expired with mock.patch.object(TVDBRequest, 'get', side_effect=[expired_data]) as _: mark_expired() with Session() as session: ep = session.query(TVDBEpisode)\ .filter(TVDBEpisode.series_id == 73255)\ .filter(TVDBEpisode.episode_number == 2)\ .filter(TVDBEpisode.season_number == 2)\ .first() assert ep.expired assert ep.series.expired @mock.patch('flexget.plugins.api_tvdb.mark_expired') @pytest.mark.online class TestTVDBFavorites(object): """ Tests thetvdb favorites plugin with a test user at thetvdb. Test user info: username: flexget password: flexget Favorites: House, Doctor Who 2005, Penn & Teller: Bullshit, Hawaii Five-0 (2010) """ config = """ tasks: test: mock: - {title: 'House.S01E02.HDTV.XViD-FlexGet'} - {title: 'Doctor.Who.2005.S02E03.PDTV.XViD-FlexGet'} - {title: 'Lost.S03E02.720p-FlexGet'} - {title: 'Breaking.Bad.S02E02.720p.x264'} configure_series: from: thetvdb_favorites: username: flexget password: flexget test_strip_dates: thetvdb_favorites: username: flexget password: flexget strip_dates: yes """ def test_favorites(self, mocked_expired, execute_task): persist['auth_tokens'] = {'default': None} task = execute_task('test') assert task.find_entry('accepted', title='House.S01E02.HDTV.XViD-FlexGet'), \ 'series House should have been accepted' assert task.find_entry('accepted', title='Doctor.Who.2005.S02E03.PDTV.XViD-FlexGet'), \ 'series Doctor Who 2005 should have been accepted' assert task.find_entry('accepted', title='Breaking.Bad.S02E02.720p.x264'), \ 'series Breaking Bad should have been accepted' entry = task.find_entry(title='Lost.S03E02.720p-FlexGet') assert entry, 'Entry not found?' assert entry not in task.accepted, \ 'series Lost should not have been accepted' with Session() as session: user = session.query(TVDBUserFavorite).filter(TVDBUserFavorite.username == 'flexget').first() assert user assert len(user.series_ids) > 0 assert user.series_ids == [78804, 84946, 164541, 73255, 81189] def test_strip_date(self, mocked_expired, execute_task): persist['auth_tokens'] = {'default': None} task = execute_task('test_strip_dates') assert task.find_entry(title='Hawaii Five-0'), \ 'series Hawaii Five-0 (2010) should have date stripped' @mock.patch('flexget.plugins.api_tvdb.mark_expired') @pytest.mark.online class TestTVDBSubmit(object): config = """ tasks: add: mock: - {title: 'House.S01E02.HDTV.XViD-FlexGet'} accept_all: true thetvdb_lookup: yes thetvdb_add: username: flexget password: flexget series: - House delete: mock: - {title: 'The.Big.Bang.Theory.S02E02.XVID-Flexget'} accept_all: true thetvdb_lookup: yes thetvdb_remove: username: flexget password: flexget series: - The Big Bang Theory """ def test_add(self, mocked_expired, execute_task): persist['auth_tokens'] = {'default': None} task = execute_task('add') task = task.find_entry(title='House.S01E02.HDTV.XViD-FlexGet') assert task assert task.accepted with Session() as session: user_favs = session.query(TVDBUserFavorite).filter(TVDBUserFavorite.username == 'flexget').first() assert user_favs assert 73255 in user_favs.series_ids def test_delete(self, mocked_expired, execute_task): persist['auth_tokens'] = {'default': None} with Session() as session: user_favs = TVDBUserFavorite(username='flexget') user_favs.series_ids = ['80379'] session.add(user_favs) task = execute_task('delete') task = task.find_entry(title='The.Big.Bang.Theory.S02E02.XVID-Flexget') assert task assert task.accepted with Session() as session: user_favs = session.query(TVDBUserFavorite).filter(TVDBUserFavorite.username == 'flexget').first() assert user_favs assert 80379 not in user_favs.series_ids
mit
GhostThrone/django
tests/settings_tests/tests.py
74
17758
import os import sys import unittest import warnings from types import ModuleType from django.conf import ENVIRONMENT_VARIABLE, LazySettings, Settings, settings from django.core.exceptions import ImproperlyConfigured from django.http import HttpRequest from django.test import ( SimpleTestCase, TestCase, TransactionTestCase, modify_settings, override_settings, signals, ) from django.utils import six @modify_settings(ITEMS={ 'prepend': ['b'], 'append': ['d'], 'remove': ['a', 'e'] }) @override_settings(ITEMS=['a', 'c', 'e'], ITEMS_OUTER=[1, 2, 3], TEST='override', TEST_OUTER='outer') class FullyDecoratedTranTestCase(TransactionTestCase): available_apps = [] def test_override(self): self.assertListEqual(settings.ITEMS, ['b', 'c', 'd']) self.assertListEqual(settings.ITEMS_OUTER, [1, 2, 3]) self.assertEqual(settings.TEST, 'override') self.assertEqual(settings.TEST_OUTER, 'outer') @modify_settings(ITEMS={ 'append': ['e', 'f'], 'prepend': ['a'], 'remove': ['d', 'c'], }) def test_method_list_override(self): self.assertListEqual(settings.ITEMS, ['a', 'b', 'e', 'f']) self.assertListEqual(settings.ITEMS_OUTER, [1, 2, 3]) @modify_settings(ITEMS={ 'append': ['b'], 'prepend': ['d'], 'remove': ['a', 'c', 'e'], }) def test_method_list_override_no_ops(self): self.assertListEqual(settings.ITEMS, ['b', 'd']) @modify_settings(ITEMS={ 'append': 'e', 'prepend': 'a', 'remove': 'c', }) def test_method_list_override_strings(self): self.assertListEqual(settings.ITEMS, ['a', 'b', 'd', 'e']) @modify_settings(ITEMS={'remove': ['b', 'd']}) @modify_settings(ITEMS={'append': ['b'], 'prepend': ['d']}) def test_method_list_override_nested_order(self): self.assertListEqual(settings.ITEMS, ['d', 'c', 'b']) @override_settings(TEST='override2') def test_method_override(self): self.assertEqual(settings.TEST, 'override2') self.assertEqual(settings.TEST_OUTER, 'outer') def test_decorated_testcase_name(self): self.assertEqual(FullyDecoratedTranTestCase.__name__, 'FullyDecoratedTranTestCase') def test_decorated_testcase_module(self): self.assertEqual(FullyDecoratedTranTestCase.__module__, __name__) @modify_settings(ITEMS={ 'prepend': ['b'], 'append': ['d'], 'remove': ['a', 'e'] }) @override_settings(ITEMS=['a', 'c', 'e'], TEST='override') class FullyDecoratedTestCase(TestCase): def test_override(self): self.assertListEqual(settings.ITEMS, ['b', 'c', 'd']) self.assertEqual(settings.TEST, 'override') @modify_settings(ITEMS={ 'append': 'e', 'prepend': 'a', 'remove': 'c', }) @override_settings(TEST='override2') def test_method_override(self): self.assertListEqual(settings.ITEMS, ['a', 'b', 'd', 'e']) self.assertEqual(settings.TEST, 'override2') class ClassDecoratedTestCaseSuper(TestCase): """ Dummy class for testing max recursion error in child class call to super(). Refs #17011. """ def test_max_recursion_error(self): pass @override_settings(TEST='override') class ClassDecoratedTestCase(ClassDecoratedTestCaseSuper): @classmethod def setUpClass(cls): super(ClassDecoratedTestCase, cls).setUpClass() cls.foo = getattr(settings, 'TEST', 'BUG') def test_override(self): self.assertEqual(settings.TEST, 'override') def test_setupclass_override(self): """Test that settings are overridden within setUpClass -- refs #21281""" self.assertEqual(self.foo, 'override') @override_settings(TEST='override2') def test_method_override(self): self.assertEqual(settings.TEST, 'override2') def test_max_recursion_error(self): """ Overriding a method on a super class and then calling that method on the super class should not trigger infinite recursion. See #17011. """ try: super(ClassDecoratedTestCase, self).test_max_recursion_error() except RuntimeError: self.fail() @modify_settings(ITEMS={'append': 'mother'}) @override_settings(ITEMS=['father'], TEST='override-parent') class ParentDecoratedTestCase(TestCase): pass @modify_settings(ITEMS={'append': ['child']}) @override_settings(TEST='override-child') class ChildDecoratedTestCase(ParentDecoratedTestCase): def test_override_settings_inheritance(self): self.assertEqual(settings.ITEMS, ['father', 'mother', 'child']) self.assertEqual(settings.TEST, 'override-child') class SettingsTests(SimpleTestCase): def setUp(self): self.testvalue = None signals.setting_changed.connect(self.signal_callback) def tearDown(self): signals.setting_changed.disconnect(self.signal_callback) def signal_callback(self, sender, setting, value, **kwargs): if setting == 'TEST': self.testvalue = value def test_override(self): settings.TEST = 'test' self.assertEqual('test', settings.TEST) with self.settings(TEST='override'): self.assertEqual('override', settings.TEST) self.assertEqual('test', settings.TEST) del settings.TEST def test_override_change(self): settings.TEST = 'test' self.assertEqual('test', settings.TEST) with self.settings(TEST='override'): self.assertEqual('override', settings.TEST) settings.TEST = 'test2' self.assertEqual('test', settings.TEST) del settings.TEST def test_override_doesnt_leak(self): self.assertRaises(AttributeError, getattr, settings, 'TEST') with self.settings(TEST='override'): self.assertEqual('override', settings.TEST) settings.TEST = 'test' self.assertRaises(AttributeError, getattr, settings, 'TEST') @override_settings(TEST='override') def test_decorator(self): self.assertEqual('override', settings.TEST) def test_context_manager(self): self.assertRaises(AttributeError, getattr, settings, 'TEST') override = override_settings(TEST='override') self.assertRaises(AttributeError, getattr, settings, 'TEST') override.enable() self.assertEqual('override', settings.TEST) override.disable() self.assertRaises(AttributeError, getattr, settings, 'TEST') def test_class_decorator(self): # SimpleTestCase can be decorated by override_settings, but not ut.TestCase class SimpleTestCaseSubclass(SimpleTestCase): pass class UnittestTestCaseSubclass(unittest.TestCase): pass decorated = override_settings(TEST='override')(SimpleTestCaseSubclass) self.assertIsInstance(decorated, type) self.assertTrue(issubclass(decorated, SimpleTestCase)) with six.assertRaisesRegex(self, Exception, "Only subclasses of Django SimpleTestCase*"): decorated = override_settings(TEST='override')(UnittestTestCaseSubclass) def test_signal_callback_context_manager(self): self.assertRaises(AttributeError, getattr, settings, 'TEST') with self.settings(TEST='override'): self.assertEqual(self.testvalue, 'override') self.assertEqual(self.testvalue, None) @override_settings(TEST='override') def test_signal_callback_decorator(self): self.assertEqual(self.testvalue, 'override') # # Regression tests for #10130: deleting settings. # def test_settings_delete(self): settings.TEST = 'test' self.assertEqual('test', settings.TEST) del settings.TEST self.assertRaises(AttributeError, getattr, settings, 'TEST') def test_settings_delete_wrapped(self): self.assertRaises(TypeError, delattr, settings, '_wrapped') def test_override_settings_delete(self): """ Allow deletion of a setting in an overridden settings set (#18824) """ previous_i18n = settings.USE_I18N previous_l10n = settings.USE_L10N with self.settings(USE_I18N=False): del settings.USE_I18N self.assertRaises(AttributeError, getattr, settings, 'USE_I18N') # Should also work for a non-overridden setting del settings.USE_L10N self.assertRaises(AttributeError, getattr, settings, 'USE_L10N') self.assertEqual(settings.USE_I18N, previous_i18n) self.assertEqual(settings.USE_L10N, previous_l10n) def test_override_settings_nested(self): """ Test that override_settings uses the actual _wrapped attribute at runtime, not when it was instantiated. """ self.assertRaises(AttributeError, getattr, settings, 'TEST') self.assertRaises(AttributeError, getattr, settings, 'TEST2') inner = override_settings(TEST2='override') with override_settings(TEST='override'): self.assertEqual('override', settings.TEST) with inner: self.assertEqual('override', settings.TEST) self.assertEqual('override', settings.TEST2) # inner's __exit__ should have restored the settings of the outer # context manager, not those when the class was instantiated self.assertEqual('override', settings.TEST) self.assertRaises(AttributeError, getattr, settings, 'TEST2') self.assertRaises(AttributeError, getattr, settings, 'TEST') self.assertRaises(AttributeError, getattr, settings, 'TEST2') class TestComplexSettingOverride(SimpleTestCase): def setUp(self): self.old_warn_override_settings = signals.COMPLEX_OVERRIDE_SETTINGS.copy() signals.COMPLEX_OVERRIDE_SETTINGS.add('TEST_WARN') def tearDown(self): signals.COMPLEX_OVERRIDE_SETTINGS = self.old_warn_override_settings self.assertNotIn('TEST_WARN', signals.COMPLEX_OVERRIDE_SETTINGS) def test_complex_override_warning(self): """Regression test for #19031""" with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") with override_settings(TEST_WARN='override'): self.assertEqual(settings.TEST_WARN, 'override') self.assertEqual(len(w), 1) # File extension may by .py, .pyc, etc. Compare only basename. self.assertEqual(os.path.splitext(w[0].filename)[0], os.path.splitext(__file__)[0]) self.assertEqual(str(w[0].message), 'Overriding setting TEST_WARN can lead to unexpected behavior.') class TrailingSlashURLTests(SimpleTestCase): """ Tests for the MEDIA_URL and STATIC_URL settings. They must end with a slash to ensure there's a deterministic way to build paths in templates. """ settings_module = settings def setUp(self): self._original_media_url = self.settings_module.MEDIA_URL self._original_static_url = self.settings_module.STATIC_URL def tearDown(self): self.settings_module.MEDIA_URL = self._original_media_url self.settings_module.STATIC_URL = self._original_static_url def test_blank(self): """ The empty string is accepted, even though it doesn't end in a slash. """ self.settings_module.MEDIA_URL = '' self.assertEqual('', self.settings_module.MEDIA_URL) self.settings_module.STATIC_URL = '' self.assertEqual('', self.settings_module.STATIC_URL) def test_end_slash(self): """ It works if the value ends in a slash. """ self.settings_module.MEDIA_URL = '/foo/' self.assertEqual('/foo/', self.settings_module.MEDIA_URL) self.settings_module.MEDIA_URL = 'http://media.foo.com/' self.assertEqual('http://media.foo.com/', self.settings_module.MEDIA_URL) self.settings_module.STATIC_URL = '/foo/' self.assertEqual('/foo/', self.settings_module.STATIC_URL) self.settings_module.STATIC_URL = 'http://static.foo.com/' self.assertEqual('http://static.foo.com/', self.settings_module.STATIC_URL) def test_no_end_slash(self): """ An ImproperlyConfigured exception is raised if the value doesn't end in a slash. """ with self.assertRaises(ImproperlyConfigured): self.settings_module.MEDIA_URL = '/foo' with self.assertRaises(ImproperlyConfigured): self.settings_module.MEDIA_URL = 'http://media.foo.com' with self.assertRaises(ImproperlyConfigured): self.settings_module.STATIC_URL = '/foo' with self.assertRaises(ImproperlyConfigured): self.settings_module.STATIC_URL = 'http://static.foo.com' def test_double_slash(self): """ If the value ends in more than one slash, presume they know what they're doing. """ self.settings_module.MEDIA_URL = '/wrong//' self.assertEqual('/wrong//', self.settings_module.MEDIA_URL) self.settings_module.MEDIA_URL = 'http://media.foo.com/wrong//' self.assertEqual('http://media.foo.com/wrong//', self.settings_module.MEDIA_URL) self.settings_module.STATIC_URL = '/wrong//' self.assertEqual('/wrong//', self.settings_module.STATIC_URL) self.settings_module.STATIC_URL = 'http://static.foo.com/wrong//' self.assertEqual('http://static.foo.com/wrong//', self.settings_module.STATIC_URL) class SecureProxySslHeaderTest(SimpleTestCase): settings_module = settings def setUp(self): self._original_setting = self.settings_module.SECURE_PROXY_SSL_HEADER def tearDown(self): self.settings_module.SECURE_PROXY_SSL_HEADER = self._original_setting def test_none(self): self.settings_module.SECURE_PROXY_SSL_HEADER = None req = HttpRequest() self.assertEqual(req.is_secure(), False) def test_set_without_xheader(self): self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https') req = HttpRequest() self.assertEqual(req.is_secure(), False) def test_set_with_xheader_wrong(self): self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https') req = HttpRequest() req.META['HTTP_X_FORWARDED_PROTOCOL'] = 'wrongvalue' self.assertEqual(req.is_secure(), False) def test_set_with_xheader_right(self): self.settings_module.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https') req = HttpRequest() req.META['HTTP_X_FORWARDED_PROTOCOL'] = 'https' self.assertEqual(req.is_secure(), True) class IsOverriddenTest(SimpleTestCase): def test_configure(self): s = LazySettings() s.configure(SECRET_KEY='foo') self.assertTrue(s.is_overridden('SECRET_KEY')) def test_module(self): settings_module = ModuleType('fake_settings_module') settings_module.SECRET_KEY = 'foo' sys.modules['fake_settings_module'] = settings_module try: s = Settings('fake_settings_module') self.assertTrue(s.is_overridden('SECRET_KEY')) self.assertFalse(s.is_overridden('ALLOWED_HOSTS')) finally: del sys.modules['fake_settings_module'] def test_override(self): self.assertFalse(settings.is_overridden('ALLOWED_HOSTS')) with override_settings(ALLOWED_HOSTS=[]): self.assertTrue(settings.is_overridden('ALLOWED_HOSTS')) def test_unevaluated_lazysettings_repr(self): lazy_settings = LazySettings() expected = '<LazySettings [Unevaluated]>' self.assertEqual(repr(lazy_settings), expected) def test_evaluated_lazysettings_repr(self): lazy_settings = LazySettings() module = os.environ.get(ENVIRONMENT_VARIABLE) expected = '<LazySettings "%s">' % module # Force evaluation of the lazy object. lazy_settings.APPEND_SLASH self.assertEqual(repr(lazy_settings), expected) def test_usersettingsholder_repr(self): lazy_settings = LazySettings() lazy_settings.configure(APPEND_SLASH=False) expected = '<UserSettingsHolder>' self.assertEqual(repr(lazy_settings._wrapped), expected) def test_settings_repr(self): module = os.environ.get(ENVIRONMENT_VARIABLE) lazy_settings = Settings(module) expected = '<Settings "%s">' % module self.assertEqual(repr(lazy_settings), expected) class TestListSettings(unittest.TestCase): """ Make sure settings that should be lists or tuples throw ImproperlyConfigured if they are set to a string instead of a list or tuple. """ list_or_tuple_settings = ( "INSTALLED_APPS", "TEMPLATE_DIRS", "LOCALE_PATHS", ) def test_tuple_settings(self): settings_module = ModuleType('fake_settings_module') settings_module.SECRET_KEY = 'foo' for setting in self.list_or_tuple_settings: setattr(settings_module, setting, ('non_list_or_tuple_value')) sys.modules['fake_settings_module'] = settings_module try: with self.assertRaises(ImproperlyConfigured): Settings('fake_settings_module') finally: del sys.modules['fake_settings_module'] delattr(settings_module, setting)
bsd-3-clause
pombredanne/1trillioneuros
libs/relevance/processor.py
3
3087
#!/usr/bin/env python # Encoding: utf-8 # ----------------------------------------------------------------------------- # Project : OKF - Spending Stories # ----------------------------------------------------------------------------- # Author : Edouard Richard <edou4rd@gmail.com> # ----------------------------------------------------------------------------- # License : GNU General Public License # ----------------------------------------------------------------------------- # Creation : 21-Aug-2013 # Last mod : 11-Oct-2013 # ----------------------------------------------------------------------------- # This file is part of Spending Stories. # # Spending Stories is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Spending Stories is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Spending Stories. If not, see <http://www.gnu.org/licenses/>. from relevance import Relevance class Processor(object): """ Base class to compute a relevance """ def compute(self, amount, compared_to, *args, **kwargs): """ Should be implemented and return a Relevance instance """ return self.__nice_equivalence(amount, compared_to) def __nice_equivalence(self, amount, compared_to): ratio = amount/compared_to * 100 relevance = None if 90 <= ratio <= 110: relevance = Relevance(10, Relevance.RELEVANCE_TYPE_EQUIVALENT, 1) elif 49 < ratio < 51: relevance = Relevance(9, Relevance.RELEVANCE_TYPE_HALF, 0.5) elif ratio > 100: # x200, x500, x1000. For instance: the query is twice the amount nice_multiple = False ratio_rounded = round(ratio) relevance_score = 6 for i in range(1, 10): # we try to find with 4 percent of tolerance the nearest factor # as <factor> * <value> = relevance_for hundred_mult = i * 100 tolerance = 4 nice_range = range(hundred_mult-tolerance, hundred_mult+tolerance) if ratio_rounded in nice_range: nice_multiple = i break if nice_multiple: if nice_multiple in [2, 5, 10]: relevance_score = 8 elif nice_multiple in range(3, 9): relevance_score = 7 elif nice_multiple > 10: relevance_score = 5 relevance = Relevance(relevance_score, Relevance.RELEVANCE_TYPE_MULTIPLE, nice_multiple) return relevance # EOF
gpl-3.0
GeoDaCenter/CAST
stars/visualization/maps/DynamicLocalG.py
1
25412
__author__ = "Xun Li <xunli@asu.edu> " __all__ = ["DynamicLocalG", "DynamicLocalGQueryDialog", "ShowDynamicLocalGMap"] import os,math, datetime import wx import numpy as np import pysal import stars from ShapeMap import * from DynamicLisaMap import DynamicLISAMap, DynamicLISAQueryDialog from stars.visualization.DynamicControl import DynamicMapControl from stars.visualization.DynamicWidget import DynamicMapWidget from stars.visualization.PlotWidget import PlottingCanvas from stars.visualization import PlotWidget, AbstractData from stars.visualization.utils import View2ScreenTransform, GetDateTimeIntervals, FilterShapeList from stars.visualization.plots.SubTrendGraph import SubTrendGraph from stars.visualization.dialogs import choose_local_g_settings class DynamicLocalG(ShapeMap): """ """ def __init__(self, parent, layers, **kwargs): ShapeMap.__init__(self,parent, layers) try: self.weight_file = kwargs["weight"] self.cs_data_dict = kwargs["query_data"] self.bufferWidth, self.bufferHeight = kwargs["size"] self.step, self.step_by = kwargs["step"] ,kwargs["step_by"] self.start_date, self.end_date = kwargs["start"],kwargs["end"] self.nav_left = None self.nav_right = None self.bStrip = True # preprocessing parameters self.parent = parent self.layer = layers[0] self.data_sel_keys = sorted(self.cs_data_dict.keys()) self.data_sel_values = [self.cs_data_dict[i] for i in self.data_sel_keys] self.weight = pysal.open(self.weight_file).read() self.t = len(self.cs_data_dict) # number of data slices self.n = len(self.data_sel_values[0]) # number of shape objects self.extent = self.layer.extent self.view = View2ScreenTransform( self.extent, self.bufferWidth, self.bufferHeight - self.bufferHeight/3.0 ) self.tick = 0 self.datetime_intervals, self.interval_labels = GetDateTimeIntervals(self.start_date, self.end_date,self.t, self.step, self.step_by) self.setupDynamicControls() ttl = "" if "title" not in kwargs else kwargs["title"] self.parentFrame.SetTitle('Local G Map-%s %s' % (self.layer.name, ttl)) self.dynamic_control = DynamicMapControl(self.parentFrame,self.t+1,self.updateDraw) self.trendgraphWidget = None self.popupTrendGraph = None # preprocessing Gi* SpaceTime maps self.processDynamicLocalG() except Exception as err: detail_message = err.message if err.message == "dimension mismatch": detail_message = "The number of time intervals doesn't match time weights and space-time query." message = """Dynamic Local G map could not be created. Please re-select appropriate parameters and weights file. Details:""" + detail_message self.ShowMsgBox(message) self.UnRegister() if self.trendgraphWidget: self.trendgraphWidget.Close(True) if self.popupTrendGraph: self.popupTrendGraph.Close(True) self.parentFrame.Close(True) if os.name == 'nt': self.Destroy() return None def OnClose(self, event): self.UnRegister() if self.trendgraphWidget: self.trendgraphWidget.Close(True) if self.popupTrendGraph: self.popupTrendGraph.Close(True) event.Skip() def setupDynamicControls(self): """ assign labels of dynamic controls """ try: self.parentWidget = self.parent.GetParent() self.slider = self.parentWidget.animate_slider if isinstance(self.start_date, datetime.date): self.parentWidget.label_start.SetLabel('%2d/%2d/%4d'% (self.start_date.day,self.start_date.month,self.start_date.year)) self.parentWidget.label_end.SetLabel('%2d/%2d/%4d'% (self.end_date.day,self.end_date.month,self.end_date.year)) else: self.parentWidget.label_start.SetLabel('%d'% self.start_date) self.parentWidget.label_end.SetLabel('%4d'% self.end_date) self.parentWidget.label_current.SetLabel('current: %d (%d-%s period)' % (1,self.step, self.step_by)) except: raise Exception("Setup dynamic controls in toolbar failed!") def processDynamicLocalG(self): b_gstar, b_binary = choose_local_g_settings(self) map_type = 'Gi*' if b_gstar else 'Gi' add_type = 'binary' if b_binary else 'row-standardized' self.parentFrame.SetTitle('Local G Map (%s,%s)-%s' % (map_type,add_type,self.layer.name)) self.space_gstar = dict() self.space_gstar_z= dict() for tid,obs in self.cs_data_dict.iteritems(): y = np.array(obs) if b_binary == False: lg = pysal.esda.getisord.G_Local(y,self.weight,star=b_gstar) else: lg = pysal.esda.getisord.G_Local(y,self.weight,star=b_gstar,transform='B') self.space_gstar[tid] = lg.p_sim self.space_gstar_z[tid] = lg.Zs trendgraph_data = dict() for i in range(self.n): data = [] for j in range(self.t): data.append(self.cs_data_dict[j][i]) trendgraph_data[i] = data self.trendgraph_data = trendgraph_data # default color schema for Gi* self.HH_color = stars.LISA_HH_COLOR self.LL_color = stars.LISA_LL_COLOR self.NOT_SIG_color = stars.LISA_NOT_SIG_COLOR #self.OBSOLETE_color = stars.LISA_OBSOLETE_COLOR color_group =[self.NOT_SIG_color,self.HH_color,self.LL_color] label_group = ["Not Significant","High-High","Low-Low"] self.color_schema_dict[self.layer.name] = ColorSchema(color_group,label_group) self.gi_color_group = color_group self.updateDraw(0) # Thread-based controller for dynamic LISA self.dynamic_control = DynamicMapControl(self.parentFrame,self.t,self.updateDraw) def draw_selected_by_ids(self, shape_ids_dict, dc=None): super(DynamicLocalG, self).draw_selected_by_ids(shape_ids_dict,dc) self.selected_shape_ids = shape_ids_dict def draw_selected_by_region(self,dc, region, isEvtResponse=False, isScreenCoordinates=False): super(DynamicLocalG, self).draw_selected_by_region( dc, region, isEvtResponse, isScreenCoordinates) def OnSize(self,event): """ overwrite OnSize in ShapeMap.py """ self.bufferWidth,self.bufferHeight = self.GetClientSize() if self.bufferHeight > 0: if self.bStrip == False: self.view.pixel_height = self.bufferHeight else: self.view.pixel_height = self.bufferHeight - self.bufferHeight/3.0 self.view.pixel_width = self.bufferWidth self.view.init() if self.bStrip: self.stripBuffer = None self.reInitBuffer = True def OnMotion(self, event): """ """ if self.bStrip: mouse_end_x, mouse_end_y = (event.GetX(), event.GetY()) # check for left if self.nav_left: if self.nav_left[0] <= mouse_end_x <= self.nav_left[2] and \ self.nav_left[1] <= mouse_end_y <= self.nav_left[3]: return # determine for right if self.nav_right: if self.nav_right[0] <= mouse_end_x <= self.nav_right[2] and \ self.nav_right[1] <= mouse_end_y <= self.nav_right[3]: return if event.Dragging() and event.LeftIsDown() and self.isMouseDrawing: x, y = event.GetX(), event.GetY() # while mouse is down and moving if self.map_operation_type == stars.MAP_OP_PAN: # disable PAN (not support in this version) return # give the rest task to super class super(DynamicLocalG,self).OnMotion(event) def Update(self, tick): """ When SLIDER is dragged """ self.updateDraw(tick) def updateDraw(self,tick): """ Called for dynamic updating the map content """ self.tick = tick p_values = self.space_gstar[tick] z_values = self.space_gstar_z[tick] # 0 not significant, 6 significant change not_sig = list(np.where(p_values>0.05)[0]) sig = set(np.where(p_values<=0.05)[0]) hotspots = list(sig.intersection(set(np.where(z_values>=0)[0])) ) coldspots = list(sig.intersection(set(np.where(z_values<0)[0])) ) id_groups = [not_sig,hotspots,coldspots] self.id_groups = id_groups self.draw_layers[self.layer].set_data_group(id_groups) self.draw_layers[self.layer].set_fill_color_group(self.gi_color_group) edge_clr = self.color_schema_dict[self.layer.name].edge_color self.draw_layers[self.layer].set_edge_color(edge_clr) # trigger to draw self.reInitBuffer = True self.parentWidget.label_current.SetLabel('current: %d (%d-%s period)' % (tick+1,self.step, self.step_by)) def DoDraw(self, dc): """ Overwrite this function from base class for customized drawing """ super(DynamicLocalG, self).DoDraw(dc) if self.bStrip: self.drawStripView(dc) def OnLeftUp(self, event): """ override for click on strip view """ if self.bStrip: mouse_end_x, mouse_end_y = (event.GetX(), event.GetY()) # check for left if self.nav_left: if self.nav_left[0] <= mouse_end_x <= self.nav_left[0] + self.nav_left[2] and \ self.nav_left[1] <= mouse_end_y <= self.nav_left[1] + self.nav_left[3]: self.tick = self.tick -1 if self.tick>0 else 0 self.updateDraw(self.tick) # determine for right if self.nav_right: if self.nav_right[0] <= mouse_end_x <= self.nav_right[0] + self.nav_right[2] and \ self.nav_right[1] <= mouse_end_y <= self.nav_right[1] + self.nav_right[3]: self.tick = self.tick +1 if self.tick<=self.n else self.tick self.updateDraw(self.tick) # give the rest task to super class super(DynamicLocalG,self).OnLeftUp(event) def drawStripView(self,dc): """ For each Gi map at T_i, two related Gi maps at T_(i-1) ant T_(i+1) will be displayed in this strip area """ n = len(self.data_sel_keys) if n <= 1: return start = self.tick if start+1 > n: return end = start + 2 # flag for drawing navigation arrow b2LeftArrow = True if self.tick > 0 else False b2RightArrow = True if self.tick < n-2 else False # at area: 0,self.bufferHeight * 2/3.0 # draw a light gray area at the bottom first font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT) dc.SetFont(font) dc.SetPen(wx.TRANSPARENT_PEN) brush = wx.Brush(stars.STRIP_VIEW_BG_COLOR) dc.SetBrush(brush) framePos = 0, self.bufferHeight * 2.0/3.0 dc.DrawRectangle(framePos[0],framePos[1], self.bufferWidth, self.bufferHeight/3.0) # calculate width and height for each bmp bmpFrameWidth = self.bufferWidth / 2.0 # frame is divided into 2 parts bmpFrameHeight = self.bufferHeight / 3.0 bmpWidth = bmpFrameWidth * 0.6 bmpHeight = bmpFrameHeight * 0.8 bmpOffsetX = (bmpFrameWidth - bmpWidth )/2.0 bmpOffsetY = (bmpFrameHeight- bmpHeight)/2.0 # draw text for center large graph start_date, end_date = self.datetime_intervals[self.tick] if isinstance(start_date, datetime.date): info_tip = "t%d:(%d/%d/%d-%d/%d/%d)" % \ (self.tick+1,start_date.month,start_date.day,start_date.year, end_date.month, end_date.day, end_date.year) else: info_tip = "t%d - t%d" % (start_date, end_date) txt_w,txt_h = dc.GetTextExtent(info_tip) dc.DrawText(info_tip, (self.bufferWidth - txt_w)/2, framePos[1] - txt_h) # draw two related Gi* maps in strip area dc.SetBrush(wx.Brush(stars.STRIP_VIEW_MAP_BG_COLOR)) #for i in range(start, end): if self.tick - 1 >= 0: start_pos = bmpOffsetX, framePos[1]+bmpOffsetY dc.DrawRectangle(start_pos[0], start_pos[1], bmpWidth, bmpHeight) bmp = wx.EmptyBitmapRGBA( bmpFrameWidth, bmpFrameHeight, red = stars.STRIP_VIEW_BG_COLOR.red, green = stars.STRIP_VIEW_BG_COLOR.green, blue = stars.STRIP_VIEW_BG_COLOR.blue, alpha = stars.STRIP_VIEW_BG_COLOR.alpha ) bmp = self.drawSubGiMap(self.tick-1,bmpWidth, bmpHeight, bmp) dc.DrawBitmap(bmp, start_pos[0], start_pos[1]) start_date, end_date = self.datetime_intervals[self.tick-1] if isinstance(start_date, datetime.date): info_tip = "t%d:(%d/%d/%d-%d/%d/%d)" % \ (self.tick,start_date.month,start_date.day,start_date.year, end_date.month, end_date.day, end_date.year) else: info_tip = "t%d - t%d" % (start_date, end_date) txt_w,txt_h = dc.GetTextExtent(info_tip) dc.DrawText(info_tip, start_pos[0] + (bmpWidth - txt_w)/2, start_pos[1]+bmpHeight+2) if self.tick + 1 < self.t: start_pos = bmpFrameWidth + bmpOffsetX , framePos[1]+bmpOffsetY dc.DrawRectangle(start_pos[0], start_pos[1], bmpWidth, bmpHeight) bmp = wx.EmptyBitmapRGBA( bmpFrameWidth, bmpFrameHeight, red = stars.STRIP_VIEW_BG_COLOR.red, green = stars.STRIP_VIEW_BG_COLOR.green, blue = stars.STRIP_VIEW_BG_COLOR.blue, alpha = stars.STRIP_VIEW_BG_COLOR.alpha ) bmp = self.drawSubGiMap(self.tick+1,bmpWidth, bmpHeight, bmp) dc.DrawBitmap(bmp, start_pos[0], start_pos[1]) start_date, end_date = self.datetime_intervals[self.tick+1] if isinstance(start_date, datetime.date): info_tip = "t%d:(%d/%d/%d-%d/%d/%d)" % \ (self.tick+2,start_date.month,start_date.day,start_date.year, end_date.month, end_date.day, end_date.year) else: info_tip = "t%d - t%d" % (start_date, end_date) txt_w,txt_h = dc.GetTextExtent(info_tip) dc.DrawText(info_tip, start_pos[0] + (bmpWidth - txt_w)/2, start_pos[1]+bmpHeight+2) # draw navigation arrows arrow_y = framePos[1] + bmpFrameHeight/2.0 dc.SetFont(wx.Font(stars.NAV_ARROW_FONT_SIZE, wx.NORMAL, wx.NORMAL, wx.NORMAL)) dc.SetBrush(wx.Brush(stars.STRIP_VIEW_NAV_BAR_BG_COLOR)) dc.SetPen(wx.WHITE_PEN) if b2LeftArrow: self.nav_left = framePos[0], framePos[1], 20, self.bufferHeight/3.0 dc.DrawRectangle(self.nav_left[0], self.nav_left[1], self.nav_left[2], self.nav_left[3]) dc.SetPen(wx.WHITE_PEN) dc.DrawText("<<", framePos[0]+3, arrow_y) else: self.nav_left = None if b2RightArrow: self.nav_right = framePos[0]+self.bufferWidth - 20,framePos[1], 20, self.bufferHeight/3.0 dc.DrawRectangle(self.nav_right[0], self.nav_right[1], self.nav_right[2], self.nav_right[3]) dc.SetPen(wx.WHITE_PEN) dc.DrawText(">>", self.bufferWidth-15, arrow_y) else: self.nav_right = None def drawSubGiMap(self, idx, bufferWidth, bufferHeight,bmp): """ Draw two relative Gi* maps for current Gi* map """ dc = wx.BufferedDC(None, bmp) dc.SetBrush(wx.WHITE_BRUSH) dc.SetPen(wx.TRANSPARENT_PEN) dc.DrawRectangle(0,0,bufferWidth,bufferHeight) if not "Linux" in stars.APP_PLATFORM: # not good drawing effect using GCDC in linux dc = wx.GCDC(dc) view = View2ScreenTransform( self.extent, bufferWidth, bufferHeight ) p_values = self.space_gstar[idx] z_values = self.space_gstar_z[idx] not_sig = list(np.where(p_values>0.05)[0]) sig = set(np.where(p_values<=0.05)[0]) hotspots = list(sig.intersection(set(np.where(z_values>=0)[0])) ) coldspots = list(sig.intersection(set(np.where(z_values<0)[0])) ) id_groups = [not_sig,hotspots,coldspots] from stars.visualization.maps.BaseMap import PolygonLayer draw_layer = PolygonLayer(self, self.layer, build_spatial_index=False) #edge_clr = wx.Colour(200,200,200, self.opaque) edge_clr = self.color_schema_dict[self.layer.name].edge_color draw_layer.set_edge_color(edge_clr) draw_layer.set_data_group(id_groups) draw_layer.set_fill_color_group(self.gi_color_group) draw_layer.draw(dc, view) return bmp def OnRightUp(self,event): menu = wx.Menu() menu.Append(210, "Select Neighbors", "") menu.Append(211, "Cancel Select Neighbors", "") #menu.Append(212, "Toggle internal popup window", "") #menu.Append(212, "Show external popup time LISA", "") menu.UpdateUI() menu.Bind(wx.EVT_MENU, self.select_by_weights, id=210) menu.Bind(wx.EVT_MENU, self.cancel_select_by_weights, id=211) #menu.Bind(wx.EVT_MENU, self.showInternalPopupTimeLISA, id=212) #menu.Bind(wx.EVT_MENU, self.showExtPopupTimeLISA, id=212) self.PopupMenu(menu) event.Skip() class DynamicLocalGQueryDialog(DynamicLISAQueryDialog): """ """ def Add_Customized_Controls(self): x2,y2 = 20, 350 wx.StaticBox(self.panel, -1, "Local G setting:",pos=(x2,y2),size=(325,70)) wx.StaticText(self.panel, -1, "Weights file:",pos =(x2+10,y2+30),size=(90,-1)) self.txt_weight_path = wx.TextCtrl(self.panel, -1, "",pos=(x2+100,y2+30), size=(180,-1) ) #open_bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR, (16,16)) open_bmp = wx.BitmapFromImage(stars.OPEN_ICON_IMG) self.btn_weight_path = wx.BitmapButton(self.panel,-1, open_bmp, pos=(x2+292,y2+32), style=wx.NO_BORDER) self.Bind(wx.EVT_BUTTON, self.BrowseWeightFile, self.btn_weight_path) def OnQuery(self,event): if self._check_time_itv_input() == False or\ self._check_weight_path() == False or\ self._check_space_input() == False: return self.current_selected = range(self.dbf.n_records) self._filter_by_query_field() self.query_date = None self._filter_by_date_interval() self._filter_by_tod() self.query_data = self.gen_date_by_step() if self.query_data == None or len(self.query_data) <= 1: self.ShowMsgBox("Dynamic Local G Map requires at least 2 time intervals, please reselect step-by parameters.") return title = "" if self.query_field.lower() != "all fields": title = "(%s:%s)"%(self.query_field,self.query_range) # LISA layer (only one) g_layer = [self.background_shps[self.background_shp_idx]] gi_widget = DynamicMapWidget( self.parent, g_layer, DynamicLocalG, weight=self.weight_path, query_data=self.query_data, size=(800,650), start= self._wxdate2pydate(self.itv_start_date.GetValue()), end= self._wxdate2pydate(self.itv_end_date.GetValue()), step_by=self.step_by, step=self.step+1, title=title ) gi_widget.Show() # (enable) save LISA Markov to new shp/dbf files #self.btn_save.Enable(True) #self.lisa_layer = lisa_layer[0] #self.lisa_markov_map = gi_widget.map_canvas def OnSaveQueryToDBF(self, event): """ Save Markov type in each interval for each record to dbf file. """ if self.query_data == None: return dlg = wx.FileDialog( self, message="Save Markov LISA type to new dbf file...", defaultDir=os.getcwd(), defaultFile='%s.shp' % (self.lisa_layer.name + '_markov_lisa'), wildcard="shape file (*.shp)|*.shp|All files (*.*)|*.*", style=wx.SAVE ) if dlg.ShowModal() != wx.ID_OK: return path = dlg.GetPath() dbf = self.lisa_layer.dbf try: n_intervals = self.lisa_markov_map.t -1 n_objects = len(dbf) lisa_markov_mt = self.lisa_markov_map.lisa_markov_mt newDBF= pysal.open('%s.dbf'%path[:-4],'w') newDBF.header = [] newDBF.field_spec = [] for i in dbf.header: newDBF.header.append(i) for i in dbf.field_spec: newDBF.field_spec.append(i) for i in range(n_intervals): newDBF.header.append('MARKOV_ITV%d'%(i+1)) newDBF.field_spec.append(('N',4,0)) for i in range(n_objects): newRow = [] newRow = [item for item in dbf[i][0]] for j in range(n_intervals): move_type = lisa_markov_mt[i][j] newRow.append(move_type) newDBF.write(newRow) newDBF.close() self.ShowMsgBox("Query results have been saved to new dbf file.", mtype='CAST Information', micon=wx.ICON_INFORMATION) except: self.ShowMsgBox("Saving query results to dbf file failed! Please check if the dbf file already exists.") def ShowDynamicLocalGMap(self): # self is Main.py if not self.shapefiles or len(self.shapefiles) < 1: return shp_list = [shp.name for shp in self.shapefiles] dlg = wx.SingleChoiceDialog( self, 'Select a POINT or Polygon(with time field) shape file:', 'Dynamic Local G Map', shp_list, wx.CHOICEDLG_STYLE) if dlg.ShowModal() == wx.ID_OK: idx = dlg.GetSelection() shp = self.shapefiles[idx] background_shapes = FilterShapeList(self.shapefiles, stars.SHP_POLYGON) if shp.shape_type == stars.SHP_POINT: # create Dynamic Local G from points gi_dlg = DynamicLocalGQueryDialog( self,"Dynamic Local G:" + shp.name, shp, background_shps=background_shapes, size=stars.DIALOG_SIZE_QUERY_DYNAMIC_LISA ) gi_dlg.Show() elif shp.shape_type == stars.SHP_POLYGON: # bring up a dialog and let user select # the time field in POLYGON shape file dbf_field_list = shp.dbf.header timedlg = wx.MultiChoiceDialog( self, 'Select TIME fields to generate Dynamic Local G map:', 'DBF fields view', dbf_field_list ) if timedlg.ShowModal() == wx.ID_OK: selections = timedlg.GetSelections() # compose lisa_data_dict dbf = shp.dbf lisa_data_dict = {} count = 0 for idx in selections: lisa_data_dict[count] = np.array(dbf.by_col(dbf.header[idx])) count += 1 # select weight file wdlg = wx.FileDialog( self, message="Select a weights file", wildcard="Weights file (*.gal,*.gwt)|*.gal;*.gwt|All files (*.*)|*.*", style=wx.OPEN | wx.CHANGE_DIR ) if wdlg.ShowModal() == wx.ID_OK: # todo: select filter weight_path = wdlg.GetPath() gi_spacetime_widget= DynamicMapWidget( self, [shp], DynamicLocalG, weight = weight_path, query_data = lisa_data_dict, size =stars.MAP_SIZE_MARKOV_LISA, start=1, end=count-1, step_by='', step=1 ) gi_spacetime_widget.Show() wdlg.Destroy() timedlg.Destroy() else: self.ShowMsgBox("File type error. Should be a POINT or POLYGON shapefile.") dlg.Destroy() return dlg.Destroy()
gpl-3.0
zouyapeng/horizon-newtouch
openstack_dashboard/dashboards/identity/domains/views.py
15
3574
# Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.core.urlresolvers import reverse from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import messages from horizon import tables from horizon import workflows from openstack_dashboard import api from openstack_dashboard import policy from openstack_dashboard.dashboards.identity.domains import constants from openstack_dashboard.dashboards.identity.domains \ import tables as project_tables from openstack_dashboard.dashboards.identity.domains \ import workflows as project_workflows class IndexView(tables.DataTableView): table_class = project_tables.DomainsTable template_name = constants.DOMAINS_INDEX_VIEW_TEMPLATE def get_data(self): domains = [] domain_context = self.request.session.get('domain_context', None) if policy.check((("identity", "identity:list_domains"),), self.request): try: if domain_context: domain = api.keystone.domain_get(self.request, domain_context) domains.append(domain) else: domains = api.keystone.domain_list(self.request) except Exception: exceptions.handle(self.request, _('Unable to retrieve domain list.')) elif policy.check((("identity", "identity:get_domain"),), self.request): try: domain = api.keystone.domain_get(self.request, self.request.user.domain_id) domains.append(domain) except Exception: exceptions.handle(self.request, _('Unable to retrieve domain information.')) else: msg = _("Insufficient privilege level to view domain information.") messages.info(self.request, msg) return domains class CreateDomainView(workflows.WorkflowView): workflow_class = project_workflows.CreateDomain class UpdateDomainView(workflows.WorkflowView): workflow_class = project_workflows.UpdateDomain def get_initial(self): initial = super(UpdateDomainView, self).get_initial() domain_id = self.kwargs['domain_id'] initial['domain_id'] = domain_id try: # get initial domain info domain_info = api.keystone.domain_get(self.request, domain_id) for field in constants.DOMAIN_INFO_FIELDS: initial[field] = getattr(domain_info, field, None) except Exception: exceptions.handle(self.request, _('Unable to retrieve domain details.'), redirect=reverse(constants.DOMAINS_INDEX_URL)) return initial
apache-2.0
quantiply-fork/coopr
coopr-docs/docs/source/conf.py
1
9402
# -*- coding: utf-8 -*- # # Coopr documentation build configuration file, created by # sphinx-quickstart on Thu Dec 5 11:56:37 2013. Modified manually 02/01/2014 # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import subprocess def get_sdk_version(): # Sets the Build Version via maven mvn_version_cmd = "mvn help:evaluate -o -Dexpression=project.version -f ../../../pom.xml | grep -v '^\['" version = None try: version = subprocess.check_output(mvn_version_cmd, shell=True).strip().replace("-SNAPSHOT", "") except: print "Could not get version from maven" sys.exit(1) return version # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinxcontrib.fulltoc', 'sphinxcontrib.googleanalytics', 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', ] # Google analytics configuration googleanalytics_id = 'UA-27787617-1' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Coopr' copyright = u'2014 Cask Data, Inc.' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.9.9' # The full version, including alpha/beta/rc tags. release = '0.9.9 Beta' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. language = 'en_CDAP' locale_dirs = ['_locale/'] # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # The default language to highlight source code in. highlight_language = 'java' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'nature' html_theme = 'cdap' # html_style = 'style.css' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] html_theme_path = ['_themes'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None html_favicon = '_static/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} html_sidebars = {'**': ['globaltoc.html', 'relations.html', 'searchbox.html'],} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_domain_indices = True # If false, no index is generated. html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False html_copy_source = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. html_show_sphinx = False # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Cooprdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('misc/jboss-automator-script', 'JBoss-Automator-Script.tex', u'JBoss Automator Script Documentation', u'Cask Data, Inc.', 'howto'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. latex_logo = '_static/logo.png' # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'coopr', u'Documentation', [u'Cask Data, Inc.'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Coopr', u'Documentation', u'Cask Data, Inc.', 'Coopr', 'Moder cluster provisioning and lifecycle management system.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'http://docs.python.org/': None}
apache-2.0
mthornhill/django-pressroom
src/pressroom/migrations/0010_auto__add_field_article_translation_of.py
1
11792
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Article.translation_of' db.add_column('pressroom_article', 'translation_of', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['pressroom.Article'], null=True, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'Article.translation_of' db.delete_column('pressroom_article', 'translation_of_id') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'photologue.photo': { 'Meta': {'ordering': "['-date_added']", 'object_name': 'Photo'}, 'caption': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photo_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), 'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'tags': ('tagging.fields.TagField', [], {}), 'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'title_slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}), 'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, 'photologue.photoeffect': { 'Meta': {'object_name': 'PhotoEffect'}, 'background_color': ('django.db.models.fields.CharField', [], {'default': "'#FFFFFF'", 'max_length': '7'}), 'brightness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}), 'color': ('django.db.models.fields.FloatField', [], {'default': '1.0'}), 'contrast': ('django.db.models.fields.FloatField', [], {'default': '1.0'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'filters': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}), 'reflection_size': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'reflection_strength': ('django.db.models.fields.FloatField', [], {'default': '0.6'}), 'sharpness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}), 'transpose_method': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}) }, 'pressroom.article': { 'Meta': {'ordering': "['-pub_date']", 'object_name': 'Article'}, 'author': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'body': ('django.db.models.fields.TextField', [], {}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'documents': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'articles'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['pressroom.Document']"}), 'enable_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'headline': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '10'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modified_by'", 'null': 'True', 'to': "orm['auth.User']"}), 'photos': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'articles'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['photologue.Photo']"}), 'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'sections': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'articles'", 'symmetrical': 'False', 'to': "orm['pressroom.Section']"}), 'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('headline',)", 'overwrite': 'False'}), 'summary': ('django.db.models.fields.TextField', [], {'default': "u''", 'null': 'True', 'blank': 'True'}), 'translation_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pressroom.Article']", 'null': 'True', 'blank': 'True'}), 'uid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}) }, 'pressroom.document': { 'Meta': {'ordering': "['-pub_date']", 'object_name': 'Document'}, 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('title',)", 'overwrite': 'False'}), 'summary': ('django.db.models.fields.TextField', [], {}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'uid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}) }, 'pressroom.section': { 'Meta': {'ordering': "['title']", 'object_name': 'Section'}, 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('title',)", 'overwrite': 'False'}), 'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'uid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}) }, 'taggit.tag': { 'Meta': {'object_name': 'Tag'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}) }, 'taggit.taggeditem': { 'Meta': {'object_name': 'TaggedItem'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"}) } } complete_apps = ['pressroom']
bsd-3-clause
VaibhavAgarwalVA/sympy
sympy/stats/crv_types.py
21
60502
""" Continuous Random Variables - Prebuilt variables Contains ======== Arcsin Benini Beta BetaPrime Cauchy Chi ChiNoncentral ChiSquared Dagum Erlang Exponential FDistribution FisherZ Frechet Gamma GammaInverse Kumaraswamy Laplace Logistic LogNormal Maxwell Nakagami Normal Pareto QuadraticU RaisedCosine Rayleigh StudentT Triangular Uniform UniformSum VonMises Weibull WignerSemicircle """ from __future__ import print_function, division from sympy import (log, sqrt, pi, S, Dummy, Interval, sympify, gamma, Piecewise, And, Eq, binomial, factorial, Sum, floor, Abs, Lambda, Basic) from sympy import beta as beta_fn from sympy import cos, exp, besseli from sympy.stats.crv import (SingleContinuousPSpace, SingleContinuousDistribution, ContinuousDistributionHandmade) from sympy.stats.rv import _value_check import random oo = S.Infinity __all__ = ['ContinuousRV', 'Arcsin', 'Benini', 'Beta', 'BetaPrime', 'Cauchy', 'Chi', 'ChiNoncentral', 'ChiSquared', 'Dagum', 'Erlang', 'Exponential', 'FDistribution', 'FisherZ', 'Frechet', 'Gamma', 'GammaInverse', 'Kumaraswamy', 'Laplace', 'Logistic', 'LogNormal', 'Maxwell', 'Nakagami', 'Normal', 'Pareto', 'QuadraticU', 'RaisedCosine', 'Rayleigh', 'StudentT', 'Triangular', 'Uniform', 'UniformSum', 'VonMises', 'Weibull', 'WignerSemicircle' ] def ContinuousRV(symbol, density, set=Interval(-oo, oo)): """ Create a Continuous Random Variable given the following: -- a symbol -- a probability density function -- set on which the pdf is valid (defaults to entire real line) Returns a RandomSymbol. Many common continuous random variable types are already implemented. This function should be necessary only very rarely. Examples ======== >>> from sympy import Symbol, sqrt, exp, pi >>> from sympy.stats import ContinuousRV, P, E >>> x = Symbol("x") >>> pdf = sqrt(2)*exp(-x**2/2)/(2*sqrt(pi)) # Normal distribution >>> X = ContinuousRV(x, pdf) >>> E(X) 0 >>> P(X>0) 1/2 """ pdf = Lambda(symbol, density) dist = ContinuousDistributionHandmade(pdf, set) return SingleContinuousPSpace(symbol, dist).value def rv(symbol, cls, args): args = list(map(sympify, args)) dist = cls(*args) dist.check(*args) return SingleContinuousPSpace(symbol, dist).value ######################################## # Continuous Probability Distributions # ######################################## #------------------------------------------------------------------------------- # Arcsin distribution ---------------------------------------------------------- class ArcsinDistribution(SingleContinuousDistribution): _argnames = ('a', 'b') def pdf(self, x): return 1/(pi*sqrt((x - self.a)*(self.b - x))) def Arcsin(name, a=0, b=1): r""" Create a Continuous Random Variable with an arcsin distribution. The density of the arcsin distribution is given by .. math:: f(x) := \frac{1}{\pi\sqrt{(x-a)(b-x)}} with :math:`x \in [a,b]`. It must hold that :math:`-\infty < a < b < \infty`. Parameters ========== a : Real number, the left interval boundary b : Real number, the right interval boundary Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import Arcsin, density >>> from sympy import Symbol, simplify >>> a = Symbol("a", real=True) >>> b = Symbol("b", real=True) >>> z = Symbol("z") >>> X = Arcsin("x", a, b) >>> density(X)(z) 1/(pi*sqrt((-a + z)*(b - z))) References ========== .. [1] http://en.wikipedia.org/wiki/Arcsine_distribution """ return rv(name, ArcsinDistribution, (a, b)) #------------------------------------------------------------------------------- # Benini distribution ---------------------------------------------------------- class BeniniDistribution(SingleContinuousDistribution): _argnames = ('alpha', 'beta', 'sigma') @property def set(self): return Interval(self.sigma, oo) def pdf(self, x): alpha, beta, sigma = self.alpha, self.beta, self.sigma return (exp(-alpha*log(x/sigma) - beta*log(x/sigma)**2) *(alpha/x + 2*beta*log(x/sigma)/x)) def Benini(name, alpha, beta, sigma): r""" Create a Continuous Random Variable with a Benini distribution. The density of the Benini distribution is given by .. math:: f(x) := e^{-\alpha\log{\frac{x}{\sigma}} -\beta\log^2\left[{\frac{x}{\sigma}}\right]} \left(\frac{\alpha}{x}+\frac{2\beta\log{\frac{x}{\sigma}}}{x}\right) This is a heavy-tailed distrubtion and is also known as the log-Rayleigh distribution. Parameters ========== alpha : Real number, `\alpha > 0`, a shape beta : Real number, `\beta > 0`, a shape sigma : Real number, `\sigma > 0`, a scale Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import Benini, density >>> from sympy import Symbol, simplify, pprint >>> alpha = Symbol("alpha", positive=True) >>> beta = Symbol("beta", positive=True) >>> sigma = Symbol("sigma", positive=True) >>> z = Symbol("z") >>> X = Benini("x", alpha, beta, sigma) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) / / z \\ / z \ 2/ z \ | 2*beta*log|-----|| - alpha*log|-----| - beta*log |-----| |alpha \sigma/| \sigma/ \sigma/ |----- + -----------------|*e \ z z / References ========== .. [1] http://en.wikipedia.org/wiki/Benini_distribution .. [2] http://reference.wolfram.com/legacy/v8/ref/BeniniDistribution.html """ return rv(name, BeniniDistribution, (alpha, beta, sigma)) #------------------------------------------------------------------------------- # Beta distribution ------------------------------------------------------------ class BetaDistribution(SingleContinuousDistribution): _argnames = ('alpha', 'beta') set = Interval(0, 1) @staticmethod def check(alpha, beta): _value_check(alpha > 0, "Alpha must be positive") _value_check(beta > 0, "Beta must be positive") def pdf(self, x): alpha, beta = self.alpha, self.beta return x**(alpha - 1) * (1 - x)**(beta - 1) / beta_fn(alpha, beta) def sample(self): return random.betavariate(self.alpha, self.beta) def Beta(name, alpha, beta): r""" Create a Continuous Random Variable with a Beta distribution. The density of the Beta distribution is given by .. math:: f(x) := \frac{x^{\alpha-1}(1-x)^{\beta-1}} {\mathrm{B}(\alpha,\beta)} with :math:`x \in [0,1]`. Parameters ========== alpha : Real number, `\alpha > 0`, a shape beta : Real number, `\beta > 0`, a shape Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import Beta, density, E, variance >>> from sympy import Symbol, simplify, pprint, expand_func >>> alpha = Symbol("alpha", positive=True) >>> beta = Symbol("beta", positive=True) >>> z = Symbol("z") >>> X = Beta("x", alpha, beta) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) alpha - 1 beta - 1 z *(-z + 1) --------------------------- beta(alpha, beta) >>> expand_func(simplify(E(X, meijerg=True))) alpha/(alpha + beta) >>> simplify(variance(X, meijerg=True)) #doctest: +SKIP alpha*beta/((alpha + beta)**2*(alpha + beta + 1)) References ========== .. [1] http://en.wikipedia.org/wiki/Beta_distribution .. [2] http://mathworld.wolfram.com/BetaDistribution.html """ return rv(name, BetaDistribution, (alpha, beta)) #------------------------------------------------------------------------------- # Beta prime distribution ------------------------------------------------------ class BetaPrimeDistribution(SingleContinuousDistribution): _argnames = ('alpha', 'beta') set = Interval(0, oo) def pdf(self, x): alpha, beta = self.alpha, self.beta return x**(alpha - 1)*(1 + x)**(-alpha - beta)/beta_fn(alpha, beta) def BetaPrime(name, alpha, beta): r""" Create a continuous random variable with a Beta prime distribution. The density of the Beta prime distribution is given by .. math:: f(x) := \frac{x^{\alpha-1} (1+x)^{-\alpha -\beta}}{B(\alpha,\beta)} with :math:`x > 0`. Parameters ========== alpha : Real number, `\alpha > 0`, a shape beta : Real number, `\beta > 0`, a shape Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import BetaPrime, density >>> from sympy import Symbol, pprint >>> alpha = Symbol("alpha", positive=True) >>> beta = Symbol("beta", positive=True) >>> z = Symbol("z") >>> X = BetaPrime("x", alpha, beta) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) alpha - 1 -alpha - beta z *(z + 1) ------------------------------- beta(alpha, beta) References ========== .. [1] http://en.wikipedia.org/wiki/Beta_prime_distribution .. [2] http://mathworld.wolfram.com/BetaPrimeDistribution.html """ return rv(name, BetaPrimeDistribution, (alpha, beta)) #------------------------------------------------------------------------------- # Cauchy distribution ---------------------------------------------------------- class CauchyDistribution(SingleContinuousDistribution): _argnames = ('x0', 'gamma') def pdf(self, x): return 1/(pi*self.gamma*(1 + ((x - self.x0)/self.gamma)**2)) def Cauchy(name, x0, gamma): r""" Create a continuous random variable with a Cauchy distribution. The density of the Cauchy distribution is given by .. math:: f(x) := \frac{1}{\pi} \arctan\left(\frac{x-x_0}{\gamma}\right) +\frac{1}{2} Parameters ========== x0 : Real number, the location gamma : Real number, `\gamma > 0`, the scale Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import Cauchy, density >>> from sympy import Symbol >>> x0 = Symbol("x0") >>> gamma = Symbol("gamma", positive=True) >>> z = Symbol("z") >>> X = Cauchy("x", x0, gamma) >>> density(X)(z) 1/(pi*gamma*(1 + (-x0 + z)**2/gamma**2)) References ========== .. [1] http://en.wikipedia.org/wiki/Cauchy_distribution .. [2] http://mathworld.wolfram.com/CauchyDistribution.html """ return rv(name, CauchyDistribution, (x0, gamma)) #------------------------------------------------------------------------------- # Chi distribution ------------------------------------------------------------- class ChiDistribution(SingleContinuousDistribution): _argnames = ('k',) set = Interval(0, oo) def pdf(self, x): return 2**(1 - self.k/2)*x**(self.k - 1)*exp(-x**2/2)/gamma(self.k/2) def Chi(name, k): r""" Create a continuous random variable with a Chi distribution. The density of the Chi distribution is given by .. math:: f(x) := \frac{2^{1-k/2}x^{k-1}e^{-x^2/2}}{\Gamma(k/2)} with :math:`x \geq 0`. Parameters ========== k : A positive Integer, `k > 0`, the number of degrees of freedom Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import Chi, density, E, std >>> from sympy import Symbol, simplify >>> k = Symbol("k", integer=True) >>> z = Symbol("z") >>> X = Chi("x", k) >>> density(X)(z) 2**(-k/2 + 1)*z**(k - 1)*exp(-z**2/2)/gamma(k/2) References ========== .. [1] http://en.wikipedia.org/wiki/Chi_distribution .. [2] http://mathworld.wolfram.com/ChiDistribution.html """ return rv(name, ChiDistribution, (k,)) #------------------------------------------------------------------------------- # Non-central Chi distribution ------------------------------------------------- class ChiNoncentralDistribution(SingleContinuousDistribution): _argnames = ('k', 'l') set = Interval(0, oo) def pdf(self, x): k, l = self.k, self.l return exp(-(x**2+l**2)/2)*x**k*l / (l*x)**(k/2) * besseli(k/2-1, l*x) def ChiNoncentral(name, k, l): r""" Create a continuous random variable with a non-central Chi distribution. The density of the non-central Chi distribution is given by .. math:: f(x) := \frac{e^{-(x^2+\lambda^2)/2} x^k\lambda} {(\lambda x)^{k/2}} I_{k/2-1}(\lambda x) with `x \geq 0`. Here, `I_\nu (x)` is the :ref:`modified Bessel function of the first kind <besseli>`. Parameters ========== k : A positive Integer, `k > 0`, the number of degrees of freedom l : Shift parameter Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import ChiNoncentral, density, E, std >>> from sympy import Symbol, simplify >>> k = Symbol("k", integer=True) >>> l = Symbol("l") >>> z = Symbol("z") >>> X = ChiNoncentral("x", k, l) >>> density(X)(z) l*z**k*(l*z)**(-k/2)*exp(-l**2/2 - z**2/2)*besseli(k/2 - 1, l*z) References ========== .. [1] http://en.wikipedia.org/wiki/Noncentral_chi_distribution """ return rv(name, ChiNoncentralDistribution, (k, l)) #------------------------------------------------------------------------------- # Chi squared distribution ----------------------------------------------------- class ChiSquaredDistribution(SingleContinuousDistribution): _argnames = ('k',) set = Interval(0, oo) def pdf(self, x): k = self.k return 1/(2**(k/2)*gamma(k/2))*x**(k/2 - 1)*exp(-x/2) def ChiSquared(name, k): r""" Create a continuous random variable with a Chi-squared distribution. The density of the Chi-squared distribution is given by .. math:: f(x) := \frac{1}{2^{\frac{k}{2}}\Gamma\left(\frac{k}{2}\right)} x^{\frac{k}{2}-1} e^{-\frac{x}{2}} with :math:`x \geq 0`. Parameters ========== k : A positive Integer, `k > 0`, the number of degrees of freedom Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import ChiSquared, density, E, variance >>> from sympy import Symbol, simplify, combsimp, expand_func >>> k = Symbol("k", integer=True, positive=True) >>> z = Symbol("z") >>> X = ChiSquared("x", k) >>> density(X)(z) 2**(-k/2)*z**(k/2 - 1)*exp(-z/2)/gamma(k/2) >>> combsimp(E(X)) k >>> simplify(expand_func(variance(X))) 2*k References ========== .. [1] http://en.wikipedia.org/wiki/Chi_squared_distribution .. [2] http://mathworld.wolfram.com/Chi-SquaredDistribution.html """ return rv(name, ChiSquaredDistribution, (k, )) #------------------------------------------------------------------------------- # Dagum distribution ----------------------------------------------------------- class DagumDistribution(SingleContinuousDistribution): _argnames = ('p', 'a', 'b') def pdf(self, x): p, a, b = self.p, self.a, self.b return a*p/x*((x/b)**(a*p)/(((x/b)**a + 1)**(p + 1))) def Dagum(name, p, a, b): r""" Create a continuous random variable with a Dagum distribution. The density of the Dagum distribution is given by .. math:: f(x) := \frac{a p}{x} \left( \frac{\left(\tfrac{x}{b}\right)^{a p}} {\left(\left(\tfrac{x}{b}\right)^a + 1 \right)^{p+1}} \right) with :math:`x > 0`. Parameters ========== p : Real number, `p > 0`, a shape a : Real number, `a > 0`, a shape b : Real number, `b > 0`, a scale Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import Dagum, density >>> from sympy import Symbol, simplify >>> p = Symbol("p", positive=True) >>> b = Symbol("b", positive=True) >>> a = Symbol("a", positive=True) >>> z = Symbol("z") >>> X = Dagum("x", p, a, b) >>> density(X)(z) a*p*(z/b)**(a*p)*((z/b)**a + 1)**(-p - 1)/z References ========== .. [1] http://en.wikipedia.org/wiki/Dagum_distribution """ return rv(name, DagumDistribution, (p, a, b)) #------------------------------------------------------------------------------- # Erlang distribution ---------------------------------------------------------- def Erlang(name, k, l): r""" Create a continuous random variable with an Erlang distribution. The density of the Erlang distribution is given by .. math:: f(x) := \frac{\lambda^k x^{k-1} e^{-\lambda x}}{(k-1)!} with :math:`x \in [0,\infty]`. Parameters ========== k : Integer l : Real number, `\lambda > 0`, the rate Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import Erlang, density, cdf, E, variance >>> from sympy import Symbol, simplify, pprint >>> k = Symbol("k", integer=True, positive=True) >>> l = Symbol("l", positive=True) >>> z = Symbol("z") >>> X = Erlang("x", k, l) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) k k - 1 -l*z l *z *e --------------- gamma(k) >>> C = cdf(X, meijerg=True)(z) >>> pprint(C, use_unicode=False) / -2*I*pi*k -2*I*pi*k | k*e *lowergamma(k, 0) k*e *lowergamma(k, l*z) |- ----------------------------- + ------------------------------- for z >= 0 < gamma(k + 1) gamma(k + 1) | | 0 otherwise \ >>> simplify(E(X)) k/l >>> simplify(variance(X)) k/l**2 References ========== .. [1] http://en.wikipedia.org/wiki/Erlang_distribution .. [2] http://mathworld.wolfram.com/ErlangDistribution.html """ return rv(name, GammaDistribution, (k, 1/l)) #------------------------------------------------------------------------------- # Exponential distribution ----------------------------------------------------- class ExponentialDistribution(SingleContinuousDistribution): _argnames = ('rate',) set = Interval(0, oo) @staticmethod def check(rate): _value_check(rate > 0, "Rate must be positive.") def pdf(self, x): return self.rate * exp(-self.rate*x) def sample(self): return random.expovariate(self.rate) def Exponential(name, rate): r""" Create a continuous random variable with an Exponential distribution. The density of the exponential distribution is given by .. math:: f(x) := \lambda \exp(-\lambda x) with `x > 0`. Note that the expected value is `1/\lambda`. Parameters ========== rate : A positive Real number, `\lambda > 0`, the rate (or inverse scale/inverse mean) Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import Exponential, density, cdf, E >>> from sympy.stats import variance, std, skewness >>> from sympy import Symbol >>> l = Symbol("lambda", positive=True) >>> z = Symbol("z") >>> X = Exponential("x", l) >>> density(X)(z) lambda*exp(-lambda*z) >>> cdf(X)(z) Piecewise((1 - exp(-lambda*z), z >= 0), (0, True)) >>> E(X) 1/lambda >>> variance(X) lambda**(-2) >>> skewness(X) 2 >>> X = Exponential('x', 10) >>> density(X)(z) 10*exp(-10*z) >>> E(X) 1/10 >>> std(X) 1/10 References ========== .. [1] http://en.wikipedia.org/wiki/Exponential_distribution .. [2] http://mathworld.wolfram.com/ExponentialDistribution.html """ return rv(name, ExponentialDistribution, (rate, )) #------------------------------------------------------------------------------- # F distribution --------------------------------------------------------------- class FDistributionDistribution(SingleContinuousDistribution): _argnames = ('d1', 'd2') set = Interval(0, oo) def pdf(self, x): d1, d2 = self.d1, self.d2 return (sqrt((d1*x)**d1*d2**d2 / (d1*x+d2)**(d1+d2)) / (x * beta_fn(d1/2, d2/2))) def FDistribution(name, d1, d2): r""" Create a continuous random variable with a F distribution. The density of the F distribution is given by .. math:: f(x) := \frac{\sqrt{\frac{(d_1 x)^{d_1} d_2^{d_2}} {(d_1 x + d_2)^{d_1 + d_2}}}} {x \mathrm{B} \left(\frac{d_1}{2}, \frac{d_2}{2}\right)} with :math:`x > 0`. .. TODO - What do these parameters mean? Parameters ========== d1 : `d_1 > 0` a parameter d2 : `d_2 > 0` a parameter Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import FDistribution, density >>> from sympy import Symbol, simplify, pprint >>> d1 = Symbol("d1", positive=True) >>> d2 = Symbol("d2", positive=True) >>> z = Symbol("z") >>> X = FDistribution("x", d1, d2) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) d2 -- ______________________________ 2 / d1 -d1 - d2 d2 *\/ (d1*z) *(d1*z + d2) -------------------------------------- /d1 d2\ z*beta|--, --| \2 2 / References ========== .. [1] http://en.wikipedia.org/wiki/F-distribution .. [2] http://mathworld.wolfram.com/F-Distribution.html """ return rv(name, FDistributionDistribution, (d1, d2)) #------------------------------------------------------------------------------- # Fisher Z distribution -------------------------------------------------------- class FisherZDistribution(SingleContinuousDistribution): _argnames = ('d1', 'd2') def pdf(self, x): d1, d2 = self.d1, self.d2 return (2*d1**(d1/2)*d2**(d2/2) / beta_fn(d1/2, d2/2) * exp(d1*x) / (d1*exp(2*x)+d2)**((d1+d2)/2)) def FisherZ(name, d1, d2): r""" Create a Continuous Random Variable with an Fisher's Z distribution. The density of the Fisher's Z distribution is given by .. math:: f(x) := \frac{2d_1^{d_1/2} d_2^{d_2/2}} {\mathrm{B}(d_1/2, d_2/2)} \frac{e^{d_1z}}{\left(d_1e^{2z}+d_2\right)^{\left(d_1+d_2\right)/2}} .. TODO - What is the difference between these degrees of freedom? Parameters ========== d1 : `d_1 > 0`, degree of freedom d2 : `d_2 > 0`, degree of freedom Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import FisherZ, density >>> from sympy import Symbol, simplify, pprint >>> d1 = Symbol("d1", positive=True) >>> d2 = Symbol("d2", positive=True) >>> z = Symbol("z") >>> X = FisherZ("x", d1, d2) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) d1 d2 d1 d2 - -- - -- -- -- 2 2 2 2 / 2*z \ d1*z 2*d1 *d2 *\d1*e + d2/ *e ----------------------------------------- /d1 d2\ beta|--, --| \2 2 / References ========== .. [1] http://en.wikipedia.org/wiki/Fisher%27s_z-distribution .. [2] http://mathworld.wolfram.com/Fishersz-Distribution.html """ return rv(name, FisherZDistribution, (d1, d2)) #------------------------------------------------------------------------------- # Frechet distribution --------------------------------------------------------- class FrechetDistribution(SingleContinuousDistribution): _argnames = ('a', 's', 'm') set = Interval(0, oo) def __new__(cls, a, s=1, m=0): a, s, m = list(map(sympify, (a, s, m))) return Basic.__new__(cls, a, s, m) def pdf(self, x): a, s, m = self.a, self.s, self.m return a/s * ((x-m)/s)**(-1-a) * exp(-((x-m)/s)**(-a)) def Frechet(name, a, s=1, m=0): r""" Create a continuous random variable with a Frechet distribution. The density of the Frechet distribution is given by .. math:: f(x) := \frac{\alpha}{s} \left(\frac{x-m}{s}\right)^{-1-\alpha} e^{-(\frac{x-m}{s})^{-\alpha}} with :math:`x \geq m`. Parameters ========== a : Real number, :math:`a \in \left(0, \infty\right)` the shape s : Real number, :math:`s \in \left(0, \infty\right)` the scale m : Real number, :math:`m \in \left(-\infty, \infty\right)` the minimum Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import Frechet, density, E, std >>> from sympy import Symbol, simplify >>> a = Symbol("a", positive=True) >>> s = Symbol("s", positive=True) >>> m = Symbol("m", real=True) >>> z = Symbol("z") >>> X = Frechet("x", a, s, m) >>> density(X)(z) a*((-m + z)/s)**(-a - 1)*exp(-((-m + z)/s)**(-a))/s References ========== .. [1] http://en.wikipedia.org/wiki/Fr%C3%A9chet_distribution """ return rv(name, FrechetDistribution, (a, s, m)) #------------------------------------------------------------------------------- # Gamma distribution ----------------------------------------------------------- class GammaDistribution(SingleContinuousDistribution): _argnames = ('k', 'theta') set = Interval(0, oo) @staticmethod def check(k, theta): _value_check(k > 0, "k must be positive") _value_check(theta > 0, "Theta must be positive") def pdf(self, x): k, theta = self.k, self.theta return x**(k - 1) * exp(-x/theta) / (gamma(k)*theta**k) def sample(self): return random.gammavariate(self.k, self.theta) def Gamma(name, k, theta): r""" Create a continuous random variable with a Gamma distribution. The density of the Gamma distribution is given by .. math:: f(x) := \frac{1}{\Gamma(k) \theta^k} x^{k - 1} e^{-\frac{x}{\theta}} with :math:`x \in [0,1]`. Parameters ========== k : Real number, `k > 0`, a shape theta : Real number, `\theta > 0`, a scale Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import Gamma, density, cdf, E, variance >>> from sympy import Symbol, pprint, simplify >>> k = Symbol("k", positive=True) >>> theta = Symbol("theta", positive=True) >>> z = Symbol("z") >>> X = Gamma("x", k, theta) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) -z ----- -k k - 1 theta theta *z *e --------------------- gamma(k) >>> C = cdf(X, meijerg=True)(z) >>> pprint(C, use_unicode=False) / / z \ | k*lowergamma|k, -----| | k*lowergamma(k, 0) \ theta/ <- ------------------ + ---------------------- for z >= 0 | gamma(k + 1) gamma(k + 1) | \ 0 otherwise >>> E(X) theta*gamma(k + 1)/gamma(k) >>> V = simplify(variance(X)) >>> pprint(V, use_unicode=False) 2 k*theta References ========== .. [1] http://en.wikipedia.org/wiki/Gamma_distribution .. [2] http://mathworld.wolfram.com/GammaDistribution.html """ return rv(name, GammaDistribution, (k, theta)) #------------------------------------------------------------------------------- # Inverse Gamma distribution --------------------------------------------------- class GammaInverseDistribution(SingleContinuousDistribution): _argnames = ('a', 'b') set = Interval(0, oo) @staticmethod def check(a, b): _value_check(a > 0, "alpha must be positive") _value_check(b > 0, "beta must be positive") def pdf(self, x): a, b = self.a, self.b return b**a/gamma(a) * x**(-a-1) * exp(-b/x) def GammaInverse(name, a, b): r""" Create a continuous random variable with an inverse Gamma distribution. The density of the inverse Gamma distribution is given by .. math:: f(x) := \frac{\beta^\alpha}{\Gamma(\alpha)} x^{-\alpha - 1} \exp\left(\frac{-\beta}{x}\right) with :math:`x > 0`. Parameters ========== a : Real number, `a > 0` a shape b : Real number, `b > 0` a scale Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import GammaInverse, density, cdf, E, variance >>> from sympy import Symbol, pprint >>> a = Symbol("a", positive=True) >>> b = Symbol("b", positive=True) >>> z = Symbol("z") >>> X = GammaInverse("x", a, b) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) -b --- a -a - 1 z b *z *e --------------- gamma(a) References ========== .. [1] http://en.wikipedia.org/wiki/Inverse-gamma_distribution """ return rv(name, GammaInverseDistribution, (a, b)) #------------------------------------------------------------------------------- # Kumaraswamy distribution ----------------------------------------------------- class KumaraswamyDistribution(SingleContinuousDistribution): _argnames = ('a', 'b') set = Interval(0, oo) @staticmethod def check(a, b): _value_check(a > 0, "a must be positive") _value_check(b > 0, "b must be positive") def pdf(self, x): a, b = self.a, self.b return a * b * x**(a-1) * (1-x**a)**(b-1) def Kumaraswamy(name, a, b): r""" Create a Continuous Random Variable with a Kumaraswamy distribution. The density of the Kumaraswamy distribution is given by .. math:: f(x) := a b x^{a-1} (1-x^a)^{b-1} with :math:`x \in [0,1]`. Parameters ========== a : Real number, `a > 0` a shape b : Real number, `b > 0` a shape Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import Kumaraswamy, density, E, variance >>> from sympy import Symbol, simplify, pprint >>> a = Symbol("a", positive=True) >>> b = Symbol("b", positive=True) >>> z = Symbol("z") >>> X = Kumaraswamy("x", a, b) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) b - 1 a - 1 / a \ a*b*z *\- z + 1/ References ========== .. [1] http://en.wikipedia.org/wiki/Kumaraswamy_distribution """ return rv(name, KumaraswamyDistribution, (a, b)) #------------------------------------------------------------------------------- # Laplace distribution --------------------------------------------------------- class LaplaceDistribution(SingleContinuousDistribution): _argnames = ('mu', 'b') def pdf(self, x): mu, b = self.mu, self.b return 1/(2*b)*exp(-Abs(x - mu)/b) def Laplace(name, mu, b): r""" Create a continuous random variable with a Laplace distribution. The density of the Laplace distribution is given by .. math:: f(x) := \frac{1}{2 b} \exp \left(-\frac{|x-\mu|}b \right) Parameters ========== mu : Real number, the location (mean) b : Real number, `b > 0`, a scale Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import Laplace, density >>> from sympy import Symbol >>> mu = Symbol("mu") >>> b = Symbol("b", positive=True) >>> z = Symbol("z") >>> X = Laplace("x", mu, b) >>> density(X)(z) exp(-Abs(mu - z)/b)/(2*b) References ========== .. [1] http://en.wikipedia.org/wiki/Laplace_distribution .. [2] http://mathworld.wolfram.com/LaplaceDistribution.html """ return rv(name, LaplaceDistribution, (mu, b)) #------------------------------------------------------------------------------- # Logistic distribution -------------------------------------------------------- class LogisticDistribution(SingleContinuousDistribution): _argnames = ('mu', 's') def pdf(self, x): mu, s = self.mu, self.s return exp(-(x - mu)/s)/(s*(1 + exp(-(x - mu)/s))**2) def Logistic(name, mu, s): r""" Create a continuous random variable with a logistic distribution. The density of the logistic distribution is given by .. math:: f(x) := \frac{e^{-(x-\mu)/s}} {s\left(1+e^{-(x-\mu)/s}\right)^2} Parameters ========== mu : Real number, the location (mean) s : Real number, `s > 0` a scale Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import Logistic, density >>> from sympy import Symbol >>> mu = Symbol("mu", real=True) >>> s = Symbol("s", positive=True) >>> z = Symbol("z") >>> X = Logistic("x", mu, s) >>> density(X)(z) exp((mu - z)/s)/(s*(exp((mu - z)/s) + 1)**2) References ========== .. [1] http://en.wikipedia.org/wiki/Logistic_distribution .. [2] http://mathworld.wolfram.com/LogisticDistribution.html """ return rv(name, LogisticDistribution, (mu, s)) #------------------------------------------------------------------------------- # Log Normal distribution ------------------------------------------------------ class LogNormalDistribution(SingleContinuousDistribution): _argnames = ('mean', 'std') set = Interval(0, oo) def pdf(self, x): mean, std = self.mean, self.std return exp(-(log(x) - mean)**2 / (2*std**2)) / (x*sqrt(2*pi)*std) def sample(self): return random.lognormvariate(self.mean, self.std) def LogNormal(name, mean, std): r""" Create a continuous random variable with a log-normal distribution. The density of the log-normal distribution is given by .. math:: f(x) := \frac{1}{x\sqrt{2\pi\sigma^2}} e^{-\frac{\left(\ln x-\mu\right)^2}{2\sigma^2}} with :math:`x \geq 0`. Parameters ========== mu : Real number, the log-scale sigma : Real number, :math:`\sigma^2 > 0` a shape Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import LogNormal, density >>> from sympy import Symbol, simplify, pprint >>> mu = Symbol("mu", real=True) >>> sigma = Symbol("sigma", positive=True) >>> z = Symbol("z") >>> X = LogNormal("x", mu, sigma) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) 2 -(-mu + log(z)) ----------------- 2 ___ 2*sigma \/ 2 *e ------------------------ ____ 2*\/ pi *sigma*z >>> X = LogNormal('x', 0, 1) # Mean 0, standard deviation 1 >>> density(X)(z) sqrt(2)*exp(-log(z)**2/2)/(2*sqrt(pi)*z) References ========== .. [1] http://en.wikipedia.org/wiki/Lognormal .. [2] http://mathworld.wolfram.com/LogNormalDistribution.html """ return rv(name, LogNormalDistribution, (mean, std)) #------------------------------------------------------------------------------- # Maxwell distribution --------------------------------------------------------- class MaxwellDistribution(SingleContinuousDistribution): _argnames = ('a',) set = Interval(0, oo) def pdf(self, x): a = self.a return sqrt(2/pi)*x**2*exp(-x**2/(2*a**2))/a**3 def Maxwell(name, a): r""" Create a continuous random variable with a Maxwell distribution. The density of the Maxwell distribution is given by .. math:: f(x) := \sqrt{\frac{2}{\pi}} \frac{x^2 e^{-x^2/(2a^2)}}{a^3} with :math:`x \geq 0`. .. TODO - what does the parameter mean? Parameters ========== a : Real number, `a > 0` Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import Maxwell, density, E, variance >>> from sympy import Symbol, simplify >>> a = Symbol("a", positive=True) >>> z = Symbol("z") >>> X = Maxwell("x", a) >>> density(X)(z) sqrt(2)*z**2*exp(-z**2/(2*a**2))/(sqrt(pi)*a**3) >>> E(X) 2*sqrt(2)*a/sqrt(pi) >>> simplify(variance(X)) a**2*(-8 + 3*pi)/pi References ========== .. [1] http://en.wikipedia.org/wiki/Maxwell_distribution .. [2] http://mathworld.wolfram.com/MaxwellDistribution.html """ return rv(name, MaxwellDistribution, (a, )) #------------------------------------------------------------------------------- # Nakagami distribution -------------------------------------------------------- class NakagamiDistribution(SingleContinuousDistribution): _argnames = ('mu', 'omega') set = Interval(0, oo) def pdf(self, x): mu, omega = self.mu, self.omega return 2*mu**mu/(gamma(mu)*omega**mu)*x**(2*mu - 1)*exp(-mu/omega*x**2) def Nakagami(name, mu, omega): r""" Create a continuous random variable with a Nakagami distribution. The density of the Nakagami distribution is given by .. math:: f(x) := \frac{2\mu^\mu}{\Gamma(\mu)\omega^\mu} x^{2\mu-1} \exp\left(-\frac{\mu}{\omega}x^2 \right) with :math:`x > 0`. Parameters ========== mu : Real number, `\mu \geq \frac{1}{2}` a shape omega : Real number, `\omega > 0`, the spread Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import Nakagami, density, E, variance >>> from sympy import Symbol, simplify, pprint >>> mu = Symbol("mu", positive=True) >>> omega = Symbol("omega", positive=True) >>> z = Symbol("z") >>> X = Nakagami("x", mu, omega) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) 2 -mu*z ------- mu -mu 2*mu - 1 omega 2*mu *omega *z *e ---------------------------------- gamma(mu) >>> simplify(E(X, meijerg=True)) sqrt(mu)*sqrt(omega)*gamma(mu + 1/2)/gamma(mu + 1) >>> V = simplify(variance(X, meijerg=True)) >>> pprint(V, use_unicode=False) 2 omega*gamma (mu + 1/2) omega - ----------------------- gamma(mu)*gamma(mu + 1) References ========== .. [1] http://en.wikipedia.org/wiki/Nakagami_distribution """ return rv(name, NakagamiDistribution, (mu, omega)) #------------------------------------------------------------------------------- # Normal distribution ---------------------------------------------------------- class NormalDistribution(SingleContinuousDistribution): _argnames = ('mean', 'std') @staticmethod def check(mean, std): _value_check(std > 0, "Standard deviation must be positive") def pdf(self, x): return exp(-(x - self.mean)**2 / (2*self.std**2)) / (sqrt(2*pi)*self.std) def sample(self): return random.normalvariate(self.mean, self.std) def Normal(name, mean, std): r""" Create a continuous random variable with a Normal distribution. The density of the Normal distribution is given by .. math:: f(x) := \frac{1}{\sigma\sqrt{2\pi}} e^{ -\frac{(x-\mu)^2}{2\sigma^2} } Parameters ========== mu : Real number, the mean sigma : Real number, :math:`\sigma^2 > 0` the variance Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import Normal, density, E, std, cdf, skewness >>> from sympy import Symbol, simplify, pprint, factor, together, factor_terms >>> mu = Symbol("mu") >>> sigma = Symbol("sigma", positive=True) >>> z = Symbol("z") >>> X = Normal("x", mu, sigma) >>> density(X)(z) sqrt(2)*exp(-(-mu + z)**2/(2*sigma**2))/(2*sqrt(pi)*sigma) >>> C = simplify(cdf(X))(z) # it needs a little more help... >>> pprint(C, use_unicode=False) / ___ \ |\/ 2 *(-mu + z)| erf|---------------| \ 2*sigma / 1 -------------------- + - 2 2 >>> simplify(skewness(X)) 0 >>> X = Normal("x", 0, 1) # Mean 0, standard deviation 1 >>> density(X)(z) sqrt(2)*exp(-z**2/2)/(2*sqrt(pi)) >>> E(2*X + 1) 1 >>> simplify(std(2*X + 1)) 2 References ========== .. [1] http://en.wikipedia.org/wiki/Normal_distribution .. [2] http://mathworld.wolfram.com/NormalDistributionFunction.html """ return rv(name, NormalDistribution, (mean, std)) #------------------------------------------------------------------------------- # Pareto distribution ---------------------------------------------------------- class ParetoDistribution(SingleContinuousDistribution): _argnames = ('xm', 'alpha') @property def set(self): return Interval(self.xm, oo) @staticmethod def check(xm, alpha): _value_check(xm > 0, "Xm must be positive") _value_check(alpha > 0, "Alpha must be positive") def pdf(self, x): xm, alpha = self.xm, self.alpha return alpha * xm**alpha / x**(alpha + 1) def sample(self): return random.paretovariate(self.alpha) def Pareto(name, xm, alpha): r""" Create a continuous random variable with the Pareto distribution. The density of the Pareto distribution is given by .. math:: f(x) := \frac{\alpha\,x_m^\alpha}{x^{\alpha+1}} with :math:`x \in [x_m,\infty]`. Parameters ========== xm : Real number, `x_m > 0`, a scale alpha : Real number, `\alpha > 0`, a shape Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import Pareto, density >>> from sympy import Symbol >>> xm = Symbol("xm", positive=True) >>> beta = Symbol("beta", positive=True) >>> z = Symbol("z") >>> X = Pareto("x", xm, beta) >>> density(X)(z) beta*xm**beta*z**(-beta - 1) References ========== .. [1] http://en.wikipedia.org/wiki/Pareto_distribution .. [2] http://mathworld.wolfram.com/ParetoDistribution.html """ return rv(name, ParetoDistribution, (xm, alpha)) #------------------------------------------------------------------------------- # QuadraticU distribution ------------------------------------------------------ class QuadraticUDistribution(SingleContinuousDistribution): _argnames = ('a', 'b') @property def set(self): return Interval(self.a, self.b) def pdf(self, x): a, b = self.a, self.b alpha = 12 / (b-a)**3 beta = (a+b) / 2 return Piecewise( (alpha * (x-beta)**2, And(a<=x, x<=b)), (S.Zero, True)) def QuadraticU(name, a, b): r""" Create a Continuous Random Variable with a U-quadratic distribution. The density of the U-quadratic distribution is given by .. math:: f(x) := \alpha (x-\beta)^2 with :math:`x \in [a,b]`. Parameters ========== a : Real number b : Real number, :math:`a < b` Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import QuadraticU, density, E, variance >>> from sympy import Symbol, simplify, factor, pprint >>> a = Symbol("a", real=True) >>> b = Symbol("b", real=True) >>> z = Symbol("z") >>> X = QuadraticU("x", a, b) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) / 2 | / a b \ |12*|- - - - + z| | \ 2 2 / <----------------- for And(a <= z, z <= b) | 3 | (-a + b) | \ 0 otherwise References ========== .. [1] http://en.wikipedia.org/wiki/U-quadratic_distribution """ return rv(name, QuadraticUDistribution, (a, b)) #------------------------------------------------------------------------------- # RaisedCosine distribution ---------------------------------------------------- class RaisedCosineDistribution(SingleContinuousDistribution): _argnames = ('mu', 's') @property def set(self): return Interval(self.mu - self.s, self.mu + self.s) @staticmethod def check(mu, s): _value_check(s > 0, "s must be positive") def pdf(self, x): mu, s = self.mu, self.s return Piecewise( ((1+cos(pi*(x-mu)/s)) / (2*s), And(mu-s<=x, x<=mu+s)), (S.Zero, True)) def RaisedCosine(name, mu, s): r""" Create a Continuous Random Variable with a raised cosine distribution. The density of the raised cosine distribution is given by .. math:: f(x) := \frac{1}{2s}\left(1+\cos\left(\frac{x-\mu}{s}\pi\right)\right) with :math:`x \in [\mu-s,\mu+s]`. Parameters ========== mu : Real number s : Real number, `s > 0` Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import RaisedCosine, density, E, variance >>> from sympy import Symbol, simplify, pprint >>> mu = Symbol("mu", real=True) >>> s = Symbol("s", positive=True) >>> z = Symbol("z") >>> X = RaisedCosine("x", mu, s) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) / /pi*(-mu + z)\ |cos|------------| + 1 | \ s / <--------------------- for And(z <= mu + s, mu - s <= z) | 2*s | \ 0 otherwise References ========== .. [1] http://en.wikipedia.org/wiki/Raised_cosine_distribution """ return rv(name, RaisedCosineDistribution, (mu, s)) #------------------------------------------------------------------------------- # Rayleigh distribution -------------------------------------------------------- class RayleighDistribution(SingleContinuousDistribution): _argnames = ('sigma',) set = Interval(0, oo) def pdf(self, x): sigma = self.sigma return x/sigma**2*exp(-x**2/(2*sigma**2)) def Rayleigh(name, sigma): r""" Create a continuous random variable with a Rayleigh distribution. The density of the Rayleigh distribution is given by .. math :: f(x) := \frac{x}{\sigma^2} e^{-x^2/2\sigma^2} with :math:`x > 0`. Parameters ========== sigma : Real number, `\sigma > 0` Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import Rayleigh, density, E, variance >>> from sympy import Symbol, simplify >>> sigma = Symbol("sigma", positive=True) >>> z = Symbol("z") >>> X = Rayleigh("x", sigma) >>> density(X)(z) z*exp(-z**2/(2*sigma**2))/sigma**2 >>> E(X) sqrt(2)*sqrt(pi)*sigma/2 >>> variance(X) -pi*sigma**2/2 + 2*sigma**2 References ========== .. [1] http://en.wikipedia.org/wiki/Rayleigh_distribution .. [2] http://mathworld.wolfram.com/RayleighDistribution.html """ return rv(name, RayleighDistribution, (sigma, )) #------------------------------------------------------------------------------- # StudentT distribution -------------------------------------------------------- class StudentTDistribution(SingleContinuousDistribution): _argnames = ('nu',) def pdf(self, x): nu = self.nu return 1/(sqrt(nu)*beta_fn(S(1)/2, nu/2))*(1 + x**2/nu)**(-(nu + 1)/2) def StudentT(name, nu): r""" Create a continuous random variable with a student's t distribution. The density of the student's t distribution is given by .. math:: f(x) := \frac{\Gamma \left(\frac{\nu+1}{2} \right)} {\sqrt{\nu\pi}\Gamma \left(\frac{\nu}{2} \right)} \left(1+\frac{x^2}{\nu} \right)^{-\frac{\nu+1}{2}} Parameters ========== nu : Real number, `\nu > 0`, the degrees of freedom Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import StudentT, density, E, variance >>> from sympy import Symbol, simplify, pprint >>> nu = Symbol("nu", positive=True) >>> z = Symbol("z") >>> X = StudentT("x", nu) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) nu 1 - -- - - 2 2 / 2\ | z | |1 + --| \ nu/ -------------------- ____ / nu\ \/ nu *beta|1/2, --| \ 2 / References ========== .. [1] http://en.wikipedia.org/wiki/Student_t-distribution .. [2] http://mathworld.wolfram.com/Studentst-Distribution.html """ return rv(name, StudentTDistribution, (nu, )) #------------------------------------------------------------------------------- # Triangular distribution ------------------------------------------------------ class TriangularDistribution(SingleContinuousDistribution): _argnames = ('a', 'b', 'c') def pdf(self, x): a, b, c = self.a, self.b, self.c return Piecewise( (2*(x - a)/((b - a)*(c - a)), And(a <= x, x < c)), (2/(b - a), Eq(x, c)), (2*(b - x)/((b - a)*(b - c)), And(c < x, x <= b)), (S.Zero, True)) def Triangular(name, a, b, c): r""" Create a continuous random variable with a triangular distribution. The density of the triangular distribution is given by .. math:: f(x) := \begin{cases} 0 & \mathrm{for\ } x < a, \\ \frac{2(x-a)}{(b-a)(c-a)} & \mathrm{for\ } a \le x < c, \\ \frac{2}{b-a} & \mathrm{for\ } x = c, \\ \frac{2(b-x)}{(b-a)(b-c)} & \mathrm{for\ } c < x \le b, \\ 0 & \mathrm{for\ } b < x. \end{cases} Parameters ========== a : Real number, :math:`a \in \left(-\infty, \infty\right)` b : Real number, :math:`a < b` c : Real number, :math:`a \leq c \leq b` Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import Triangular, density, E >>> from sympy import Symbol, pprint >>> a = Symbol("a") >>> b = Symbol("b") >>> c = Symbol("c") >>> z = Symbol("z") >>> X = Triangular("x", a,b,c) >>> pprint(density(X)(z), use_unicode=False) / -2*a + 2*z |----------------- for And(a <= z, z < c) |(-a + b)*(-a + c) | | 2 | ------ for z = c < -a + b | | 2*b - 2*z |---------------- for And(z <= b, c < z) |(-a + b)*(b - c) | \ 0 otherwise References ========== .. [1] http://en.wikipedia.org/wiki/Triangular_distribution .. [2] http://mathworld.wolfram.com/TriangularDistribution.html """ return rv(name, TriangularDistribution, (a, b, c)) #------------------------------------------------------------------------------- # Uniform distribution --------------------------------------------------------- class UniformDistribution(SingleContinuousDistribution): _argnames = ('left', 'right') def pdf(self, x): left, right = self.left, self.right return Piecewise( (S.One/(right - left), And(left <= x, x <= right)), (S.Zero, True)) def compute_cdf(self, **kwargs): from sympy import Lambda, Min z = Dummy('z', real=True, finite=True) result = SingleContinuousDistribution.compute_cdf(self, **kwargs)(z) reps = { Min(z, self.right): z, Min(z, self.left, self.right): self.left, Min(z, self.left): self.left} result = result.subs(reps) return Lambda(z, result) def expectation(self, expr, var, **kwargs): from sympy import Max, Min kwargs['evaluate'] = True result = SingleContinuousDistribution.expectation(self, expr, var, **kwargs) result = result.subs({Max(self.left, self.right): self.right, Min(self.left, self.right): self.left}) return result def sample(self): return random.uniform(self.left, self.right) def Uniform(name, left, right): r""" Create a continuous random variable with a uniform distribution. The density of the uniform distribution is given by .. math:: f(x) := \begin{cases} \frac{1}{b - a} & \text{for } x \in [a,b] \\ 0 & \text{otherwise} \end{cases} with :math:`x \in [a,b]`. Parameters ========== a : Real number, :math:`-\infty < a` the left boundary b : Real number, :math:`a < b < \infty` the right boundary Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import Uniform, density, cdf, E, variance, skewness >>> from sympy import Symbol, simplify >>> a = Symbol("a", negative=True) >>> b = Symbol("b", positive=True) >>> z = Symbol("z") >>> X = Uniform("x", a, b) >>> density(X)(z) Piecewise((1/(-a + b), And(a <= z, z <= b)), (0, True)) >>> cdf(X)(z) # doctest: +SKIP -a/(-a + b) + z/(-a + b) >>> simplify(E(X)) a/2 + b/2 >>> simplify(variance(X)) a**2/12 - a*b/6 + b**2/12 References ========== .. [1] http://en.wikipedia.org/wiki/Uniform_distribution_%28continuous%29 .. [2] http://mathworld.wolfram.com/UniformDistribution.html """ return rv(name, UniformDistribution, (left, right)) #------------------------------------------------------------------------------- # UniformSum distribution ------------------------------------------------------ class UniformSumDistribution(SingleContinuousDistribution): _argnames = ('n',) @property def set(self): return Interval(0, self.n) def pdf(self, x): n = self.n k = Dummy("k") return 1/factorial( n - 1)*Sum((-1)**k*binomial(n, k)*(x - k)**(n - 1), (k, 0, floor(x))) def UniformSum(name, n): r""" Create a continuous random variable with an Irwin-Hall distribution. The probability distribution function depends on a single parameter `n` which is an integer. The density of the Irwin-Hall distribution is given by .. math :: f(x) := \frac{1}{(n-1)!}\sum_{k=0}^{\lfloor x\rfloor}(-1)^k \binom{n}{k}(x-k)^{n-1} Parameters ========== n : A positive Integer, `n > 0` Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import UniformSum, density >>> from sympy import Symbol, pprint >>> n = Symbol("n", integer=True) >>> z = Symbol("z") >>> X = UniformSum("x", n) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) floor(z) ___ \ ` \ k n - 1 /n\ ) (-1) *(-k + z) *| | / \k/ /__, k = 0 -------------------------------- (n - 1)! References ========== .. [1] http://en.wikipedia.org/wiki/Uniform_sum_distribution .. [2] http://mathworld.wolfram.com/UniformSumDistribution.html """ return rv(name, UniformSumDistribution, (n, )) #------------------------------------------------------------------------------- # VonMises distribution -------------------------------------------------------- class VonMisesDistribution(SingleContinuousDistribution): _argnames = ('mu', 'k') set = Interval(0, 2*pi) @staticmethod def check(mu, k): _value_check(k > 0, "k must be positive") def pdf(self, x): mu, k = self.mu, self.k return exp(k*cos(x-mu)) / (2*pi*besseli(0, k)) def VonMises(name, mu, k): r""" Create a Continuous Random Variable with a von Mises distribution. The density of the von Mises distribution is given by .. math:: f(x) := \frac{e^{\kappa\cos(x-\mu)}}{2\pi I_0(\kappa)} with :math:`x \in [0,2\pi]`. Parameters ========== mu : Real number, measure of location k : Real number, measure of concentration Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import VonMises, density, E, variance >>> from sympy import Symbol, simplify, pprint >>> mu = Symbol("mu") >>> k = Symbol("k", positive=True) >>> z = Symbol("z") >>> X = VonMises("x", mu, k) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) k*cos(mu - z) e ------------------ 2*pi*besseli(0, k) References ========== .. [1] http://en.wikipedia.org/wiki/Von_Mises_distribution .. [2] http://mathworld.wolfram.com/vonMisesDistribution.html """ return rv(name, VonMisesDistribution, (mu, k)) #------------------------------------------------------------------------------- # Weibull distribution --------------------------------------------------------- class WeibullDistribution(SingleContinuousDistribution): _argnames = ('alpha', 'beta') set = Interval(0, oo) @staticmethod def check(alpha, beta): _value_check(alpha > 0, "Alpha must be positive") _value_check(beta > 0, "Beta must be positive") def pdf(self, x): alpha, beta = self.alpha, self.beta return beta * (x/alpha)**(beta - 1) * exp(-(x/alpha)**beta) / alpha def sample(self): return random.weibullvariate(self.alpha, self.beta) def Weibull(name, alpha, beta): r""" Create a continuous random variable with a Weibull distribution. The density of the Weibull distribution is given by .. math:: f(x) := \begin{cases} \frac{k}{\lambda}\left(\frac{x}{\lambda}\right)^{k-1} e^{-(x/\lambda)^{k}} & x\geq0\\ 0 & x<0 \end{cases} Parameters ========== lambda : Real number, :math:`\lambda > 0` a scale k : Real number, `k > 0` a shape Returns ======= A RandomSymbol. Examples ======== >>> from sympy.stats import Weibull, density, E, variance >>> from sympy import Symbol, simplify >>> l = Symbol("lambda", positive=True) >>> k = Symbol("k", positive=True) >>> z = Symbol("z") >>> X = Weibull("x", l, k) >>> density(X)(z) k*(z/lambda)**(k - 1)*exp(-(z/lambda)**k)/lambda >>> simplify(E(X)) lambda*gamma(1 + 1/k) >>> simplify(variance(X)) lambda**2*(-gamma(1 + 1/k)**2 + gamma(1 + 2/k)) References ========== .. [1] http://en.wikipedia.org/wiki/Weibull_distribution .. [2] http://mathworld.wolfram.com/WeibullDistribution.html """ return rv(name, WeibullDistribution, (alpha, beta)) #------------------------------------------------------------------------------- # Wigner semicircle distribution ----------------------------------------------- class WignerSemicircleDistribution(SingleContinuousDistribution): _argnames = ('R',) @property def set(self): return Interval(-self.R, self.R) def pdf(self, x): R = self.R return 2/(pi*R**2)*sqrt(R**2 - x**2) def WignerSemicircle(name, R): r""" Create a continuous random variable with a Wigner semicircle distribution. The density of the Wigner semicircle distribution is given by .. math:: f(x) := \frac2{\pi R^2}\,\sqrt{R^2-x^2} with :math:`x \in [-R,R]`. Parameters ========== R : Real number, `R > 0`, the radius Returns ======= A `RandomSymbol`. Examples ======== >>> from sympy.stats import WignerSemicircle, density, E >>> from sympy import Symbol, simplify >>> R = Symbol("R", positive=True) >>> z = Symbol("z") >>> X = WignerSemicircle("x", R) >>> density(X)(z) 2*sqrt(R**2 - z**2)/(pi*R**2) >>> E(X) 0 References ========== .. [1] http://en.wikipedia.org/wiki/Wigner_semicircle_distribution .. [2] http://mathworld.wolfram.com/WignersSemicircleLaw.html """ return rv(name, WignerSemicircleDistribution, (R,))
bsd-3-clause
IAMATinyCoder/SocialEDU
node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/easy_xml_test.py
2698
3270
#!/usr/bin/env python # Copyright (c) 2011 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Unit tests for the easy_xml.py file. """ import gyp.easy_xml as easy_xml import unittest import StringIO class TestSequenceFunctions(unittest.TestCase): def setUp(self): self.stderr = StringIO.StringIO() def test_EasyXml_simple(self): self.assertEqual( easy_xml.XmlToString(['test']), '<?xml version="1.0" encoding="utf-8"?><test/>') self.assertEqual( easy_xml.XmlToString(['test'], encoding='Windows-1252'), '<?xml version="1.0" encoding="Windows-1252"?><test/>') def test_EasyXml_simple_with_attributes(self): self.assertEqual( easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]), '<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>') def test_EasyXml_escaping(self): original = '<test>\'"\r&\nfoo' converted = '&lt;test&gt;\'&quot;&#xD;&amp;&#xA;foo' converted_apos = converted.replace("'", '&apos;') self.assertEqual( easy_xml.XmlToString(['test3', {'a': original}, original]), '<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' % (converted, converted_apos)) def test_EasyXml_pretty(self): self.assertEqual( easy_xml.XmlToString( ['test3', ['GrandParent', ['Parent1', ['Child'] ], ['Parent2'] ] ], pretty=True), '<?xml version="1.0" encoding="utf-8"?>\n' '<test3>\n' ' <GrandParent>\n' ' <Parent1>\n' ' <Child/>\n' ' </Parent1>\n' ' <Parent2/>\n' ' </GrandParent>\n' '</test3>\n') def test_EasyXml_complex(self): # We want to create: target = ( '<?xml version="1.0" encoding="utf-8"?>' '<Project>' '<PropertyGroup Label="Globals">' '<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>' '<Keyword>Win32Proj</Keyword>' '<RootNamespace>automated_ui_tests</RootNamespace>' '</PropertyGroup>' '<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>' '<PropertyGroup ' 'Condition="\'$(Configuration)|$(Platform)\'==' '\'Debug|Win32\'" Label="Configuration">' '<ConfigurationType>Application</ConfigurationType>' '<CharacterSet>Unicode</CharacterSet>' '</PropertyGroup>' '</Project>') xml = easy_xml.XmlToString( ['Project', ['PropertyGroup', {'Label': 'Globals'}, ['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'], ['Keyword', 'Win32Proj'], ['RootNamespace', 'automated_ui_tests'] ], ['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}], ['PropertyGroup', {'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'", 'Label': 'Configuration'}, ['ConfigurationType', 'Application'], ['CharacterSet', 'Unicode'] ] ]) self.assertEqual(xml, target) if __name__ == '__main__': unittest.main()
gpl-3.0
MingdaZhou/gnuradio
grc/grc_gnuradio/blks2/error_rate.py
33
5254
# Copyright 2008 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # default_win_size = 1000 from gnuradio import gr from gnuradio import blocks import gnuradio.gr.gr_threading as _threading import numpy #generate 1s counts array _1s_counts = [sum([1&(i>>j) for j in range(8)]) for i in range(2**8)] class input_watcher(_threading.Thread): """ Read samples from the message queue and hand them to the callback. """ def __init__(self, msgq, callback): self._msgq = msgq self._callback = callback _threading.Thread.__init__(self) self.setDaemon(1) self.keep_running = True self.start() def run(self): r = '' while True: msg = self._msgq.delete_head() itemsize = int(msg.arg1()) nitems = int(msg.arg2()) s = r + msg.to_string() i = (nitems-nitems%2)*itemsize r = s[i:] s = s[:i] samples = numpy.fromstring(s, numpy.int8) self._callback(samples) class error_rate(gr.hier_block2): """ Sample the incoming data streams (byte) and calculate the bit or symbol error rate. Write the running rate to the output data stream (float). """ def __init__(self, type='BER', win_size=default_win_size, bits_per_symbol=2): """ Error rate constructor. Args: type: a string 'BER' or 'SER' win_size: the number of samples to calculate over bits_per_symbol: the number of information bits per symbol (BER only) """ #init gr.hier_block2.__init__( self, 'error_rate', gr.io_signature(2, 2, gr.sizeof_char), gr.io_signature(1, 1, gr.sizeof_float), ) assert type in ('BER', 'SER') self._max_samples = win_size self._bits_per_symbol = bits_per_symbol #setup message queue msg_source = blocks.message_source(gr.sizeof_float, 1) self._msgq_source = msg_source.msgq() msgq_sink = gr.msg_queue(2) msg_sink = blocks.message_sink(gr.sizeof_char, msgq_sink, False) #False -> blocking inter = blocks.interleave(gr.sizeof_char) #start thread self._num_errs = 0 self._err_index = 0 self._num_samps = 0 self._err_array = numpy.zeros(self._max_samples, numpy.int8) if type == 'BER': input_watcher(msgq_sink, self._handler_ber) elif type == 'SER': input_watcher(msgq_sink, self._handler_ser) #connect self.connect(msg_source, self) self.connect((self, 0), (inter, 0)) self.connect((self, 1), (inter, 1)) self.connect(inter, msg_sink) def _handler_ber(self, samples): num = len(samples)/2 arr = numpy.zeros(num, numpy.float32) for i in range(num): old_err = self._err_array[self._err_index] #record error self._err_array[self._err_index] = _1s_counts[samples[i*2] ^ samples[i*2 + 1]] self._num_errs = self._num_errs + self._err_array[self._err_index] - old_err #increment index self._err_index = (self._err_index + 1)%self._max_samples self._num_samps = min(self._num_samps + 1, self._max_samples) #write sample arr[i] = float(self._num_errs)/float(self._num_samps*self._bits_per_symbol) #write message msg = gr.message_from_string(arr.tostring(), 0, gr.sizeof_float, num) self._msgq_source.insert_tail(msg) def _handler_ser(self, samples): num = len(samples)/2 arr = numpy.zeros(num, numpy.float32) for i in range(num): old_err = self._err_array[self._err_index] #record error ref = samples[i*2] res = samples[i*2 + 1] if ref == res: self._err_array[self._err_index] = 0 else: self._err_array[self._err_index] = 1 #update number of errors self._num_errs = self._num_errs + self._err_array[self._err_index] - old_err #increment index self._err_index = (self._err_index + 1)%self._max_samples self._num_samps = min(self._num_samps + 1, self._max_samples) #write sample arr[i] = float(self._num_errs)/float(self._num_samps) #write message msg = gr.message_from_string(arr.tostring(), 0, gr.sizeof_float, num) self._msgq_source.insert_tail(msg)
gpl-3.0
cysuncn/python
study/machinelearning/tensorflow/faceSensor/PR/face_train_use_keras.py
1
10408
#-*- coding: utf-8 -*- import random import numpy as np from sklearn.model_selection import train_test_split from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Convolution2D, MaxPooling2D from keras.optimizers import SGD from keras.utils import np_utils from keras.models import load_model from keras import backend as K from load_face_dataset import load_dataset, resize_image, IMAGE_SIZE class Dataset: def __init__(self, path_name): # 训练集 self.train_images = None self.train_labels = None # 验证集 self.valid_images = None self.valid_labels = None # 测试集 self.test_images = None self.test_labels = None # 数据集加载路径 self.path_name = path_name # 当前库采用的维度顺序 self.input_shape = None # 加载数据集并按照交叉验证的原则划分数据集并进行相关预处理工作 def load(self, img_rows=IMAGE_SIZE, img_cols=IMAGE_SIZE, img_channels=3, nb_classes=2): # 加载数据集到内存 images, labels = load_dataset(self.path_name) train_images, valid_images, train_labels, valid_labels = train_test_split(images, labels, test_size=0.3, random_state=random.randint(0, 100)) _, test_images, _, test_labels = train_test_split(images, labels, test_size=0.5, random_state=random.randint(0, 100)) # 当前的维度顺序如果为'th',则输入图片数据时的顺序为:channels,rows,cols,否则:rows,cols,channels # 这部分代码就是根据keras库要求的维度顺序重组训练数据集 if K.image_dim_ordering() == 'th': train_images = train_images.reshape(train_images.shape[0], img_channels, img_rows, img_cols) valid_images = valid_images.reshape(valid_images.shape[0], img_channels, img_rows, img_cols) test_images = test_images.reshape(test_images.shape[0], img_channels, img_rows, img_cols) self.input_shape = (img_channels, img_rows, img_cols) else: train_images = train_images.reshape(train_images.shape[0], img_rows, img_cols, img_channels) valid_images = valid_images.reshape(valid_images.shape[0], img_rows, img_cols, img_channels) test_images = test_images.reshape(test_images.shape[0], img_rows, img_cols, img_channels) self.input_shape = (img_rows, img_cols, img_channels) # 输出训练集、验证集、测试集的数量 print(train_images.shape[0], 'train samples') print(valid_images.shape[0], 'valid samples') print(test_images.shape[0], 'test samples') # 我们的模型使用categorical_crossentropy作为损失函数,因此需要根据类别数量nb_classes将 # 类别标签进行one-hot编码使其向量化,在这里我们的类别只有两种,经过转化后标签数据变为二维 train_labels = np_utils.to_categorical(train_labels, nb_classes) valid_labels = np_utils.to_categorical(valid_labels, nb_classes) test_labels = np_utils.to_categorical(test_labels, nb_classes) # 像素数据浮点化以便归一化 train_images = train_images.astype('float32') valid_images = valid_images.astype('float32') test_images = test_images.astype('float32') # 将其归一化,图像的各像素值归一化到0~1区间 train_images /= 255 valid_images /= 255 test_images /= 255 self.train_images = train_images self.valid_images = valid_images self.test_images = test_images self.train_labels = train_labels self.valid_labels = valid_labels self.test_labels = test_labels # CNN网络模型类 class Model: def __init__(self): self.model = None def build_model(self, dataset, nb_classes=2): # 构建一个空的网络模型,它是一个线性堆叠模型,各神经网络层会被顺序添加,专业名称为序贯模型或线性堆叠模型 self.model = Sequential() # 以下代码将顺序添加CNN网络需要的各层,一个add就是一个网络层 self.model.add(Convolution2D(32, 3, 3, border_mode='same', input_shape=dataset.input_shape)) # 1 2维卷积层 self.model.add(Activation('relu')) # 2 激活函数层 self.model.add(Convolution2D(32, 3, 3)) # 3 2维卷积层 self.model.add(Activation('relu')) # 4 激活函数层 self.model.add(MaxPooling2D(pool_size=(2, 2))) # 5 池化层 self.model.add(Dropout(0.25)) # 6 Dropout层 self.model.add(Convolution2D(64, 3, 3, border_mode='same')) # 7 2维卷积层 self.model.add(Activation('relu')) # 8 激活函数层 self.model.add(Convolution2D(64, 3, 3)) # 9 2维卷积层 self.model.add(Activation('relu')) # 10 激活函数层 self.model.add(MaxPooling2D(pool_size=(2, 2))) # 11 池化层 self.model.add(Dropout(0.25)) # 12 Dropout层 self.model.add(Flatten()) # 13 Flatten层 self.model.add(Dense(512)) # 14 Dense层,又被称作全连接层 self.model.add(Activation('relu')) # 15 激活函数层 self.model.add(Dropout(0.5)) # 16 Dropout层 self.model.add(Dense(nb_classes)) # 17 Dense层 self.model.add(Activation('softmax')) # 18 分类层,输出最终结果 # 输出模型概况 self.model.summary() def train(self, dataset, batch_size=20, nb_epoch=10, data_augmentation=True): sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) # 采用SGD+momentum的优化器进行训练,首先生成一个优化器对象 self.model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) # 完成实际的模型配置工作 # 不使用数据提升,所谓的提升就是从我们提供的训练数据中利用旋转、翻转、加噪声等方法创造新的 # 训练数据,有意识的提升训练数据规模,增加模型训练量 if not data_augmentation: self.model.fit(dataset.train_images, dataset.train_labels, batch_size=batch_size, nb_epoch=nb_epoch, validation_data=(dataset.valid_images, dataset.valid_labels), shuffle=True) # 使用实时数据提升 else: # 定义数据生成器用于数据提升,其返回一个生成器对象datagen,datagen每被调用一 # 次其生成一组数据(顺序生成),节省内存,其实就是python的数据生成器 datagen = ImageDataGenerator(featurewise_center=False, # 是否使输入数据去中心化(均值为0), samplewise_center=False, # 是否使输入数据的每个样本均值为0 featurewise_std_normalization=False, # 是否数据标准化(输入数据除以数据集的标准差) samplewise_std_normalization=False, # 是否将每个样本数据除以自身的标准差 zca_whitening=False, # 是否对输入数据施以ZCA白化 rotation_range=20, # 数据提升时图片随机转动的角度(范围为0~180) width_shift_range=0.2, # 数据提升时图片水平偏移的幅度(单位为图片宽度的占比,0~1之间的浮点数) height_shift_range=0.2, # 同上,只不过这里是垂直 horizontal_flip=True, # 是否进行随机水平翻转 vertical_flip=False) # 是否进行随机垂直翻转 # 计算整个训练样本集的数量以用于特征值归一化、ZCA白化等处理 datagen.fit(dataset.train_images) # 利用生成器开始训练模型 self.model.fit_generator(datagen.flow(dataset.train_images, dataset.train_labels, batch_size=batch_size), samples_per_epoch=dataset.train_images.shape[0], nb_epoch=nb_epoch, validation_data=(dataset.valid_images, dataset.valid_labels)) MODEL_PATH = 'D:\model\me.face.model.h5' def save_model(self, file_path=MODEL_PATH): self.model.save(file_path) def load_model(self, file_path=MODEL_PATH): self.model = load_model(file_path) def evaluate(self, dataset): score = self.model.evaluate(dataset.test_images, dataset.test_labels, verbose=1) print("%s: %.2f%%" % (self.model.metrics_names[1], score[1] * 100)) # 识别人脸 def face_predict(self, image): # 依然是根据后端系统确定维度顺序 if K.image_dim_ordering() == 'th' and image.shape != (1, 3, IMAGE_SIZE, IMAGE_SIZE): image = resize_image(image) # 尺寸必须与训练集一致都应该是IMAGE_SIZE x IMAGE_SIZE image = image.reshape((1, 3, IMAGE_SIZE, IMAGE_SIZE)) # 与模型训练不同,这次只是针对1张图片进行预测 elif K.image_dim_ordering() == 'tf' and image.shape != (1, IMAGE_SIZE, IMAGE_SIZE, 3): image = resize_image(image) image = image.reshape((1, IMAGE_SIZE, IMAGE_SIZE, 3)) # 浮点并归一化 image = image.astype('float32') image /= 255 # 给出输入属于各个类别的概率,我们是二值类别,则该函数会给出输入图像属于0和1的概率各为多少 result = self.model.predict_proba(image) print('result:', result) # 给出类别预测:0或者1 result = self.model.predict_classes_customerize(image) # 返回类别预测结果 return result[0] if __name__ == '__main__': dataset = Dataset('D:/data') dataset.load() model = Model() model.build_model(dataset) model.train(dataset) model.save_model(file_path='D:\model\me.face.model.h5') #评估模型 model = Model() model.load_model(file_path='D:\model\me.face.model.h5') model.evaluate(dataset)
gpl-3.0
georgekis/salary
main/api/v1/user.py
1
1982
# coding: utf-8 from __future__ import absolute_import from google.appengine.ext import ndb from flask.ext import restful import flask from api import helpers import auth import model import util from main import api @api.resource('/api/v1/users/', endpoint='api.users') class UsersAPI(restful.Resource): @auth.admin_required def get(self): user_keys = util.param('user_keys', list) if user_keys: user_db_keys = [ndb.Key(urlsafe=k) for k in user_keys] user_dbs = ndb.get_multi(user_db_keys) return helpers.make_response(user_dbs, model.User.FIELDS) user_dbs, user_cursor = model.User.get_dbs() return helpers.make_response(user_dbs, model.User.FIELDS, user_cursor) @auth.admin_required def delete(self): user_keys = util.param('user_keys', list) if not user_keys: helpers.make_not_found_exception('User(s) %s not found' % user_keys) user_db_keys = [ndb.Key(urlsafe=k) for k in user_keys] delete_user_dbs(user_db_keys) return flask.jsonify({ 'result': user_keys, 'status': 'success', }) @api.resource('/api/v1/user/<string:user_key>/', endpoint='api.user') class UserAPI(restful.Resource): @auth.admin_required def get(self, user_key): user_db = ndb.Key(urlsafe=user_key).get() if not user_db: helpers.make_not_found_exception('User %s not found' % user_key) return helpers.make_response(user_db, model.User.FIELDS) @auth.admin_required def delete(self, user_key): user_db = ndb.Key(urlsafe=user_key).get() if not user_db: helpers.make_not_found_exception('User %s not found' % user_key) user_db.key.delete() return helpers.make_response(user_db, model.User.FIELDS) ############################################################################### # Helpers ############################################################################### @ndb.transactional(xg=True) def delete_user_dbs(user_db_keys): ndb.delete_multi(user_db_keys)
mit
markgw/pimlico
src/python/pimlico/cli/clean.py
1
2994
# This file is part of Pimlico # Copyright (C) 2020 Mark Granroth-Wilding # Licensed under the GNU LGPL v3.0 - https://www.gnu.org/licenses/lgpl-3.0.en.html from __future__ import print_function from builtins import input import os import shutil from pimlico.cli.subcommands import PimlicoCLISubcommand class CleanCmd(PimlicoCLISubcommand): """ Cleans up module output directories that have got left behind. Often, when developing a pipeline incrementally, you try out some modules, but then remove them, or rename them to something else. The directory in the Pimlico output store that was created to contain their metadata, status and output data is then left behind and no longer associated with any module. Run this command to check all storage locations for such directories. If it finds any, it prompts you to confirm before deleting them. (If there are things in the list that don't look like they were left behind by the sort of things mentioned above, don't delete them! I don't want you to lose your precious output data if I've made a mistake in this command.) Note that the operation of this command is specific to the loaded pipeline variant. If you have multiple variants, make sure to select the one you want to clean with the general `--variant` option. """ command_name = "clean" command_help = "Remove all module directories that do not correspond to a module in the pipeline " \ "in all storage locations. This is useful when modules have been renamed or removed and output " \ "directories have got left behind. Note that it is specific to the selected variant" command_desc = "Remove all module output directories that do not correspond to a module in the pipeline" def run_command(self, pipeline, opts): to_clean = set() for store_name, root_dir in pipeline.storage_locations: if os.path.exists(root_dir): for dirname in os.listdir(root_dir): if os.path.isdir(os.path.join(root_dir, dirname)): # Check whether this dir name corresponds to a module in the pipeline if dirname not in pipeline: # No module called by this name: this dir probably shouldn't be here to_clean.add(os.path.join(root_dir, dirname)) if len(to_clean) == 0: print("Found no directories to clean") else: print("Directories that do not seem to correspond to pipeline modules:") print("\n".join(" - %s" % path for path in to_clean)) print() answer = input("Do you want to remove these directories? [y/N]: ") if answer.lower() == "y": for path in to_clean: shutil.rmtree(path) print("All unnecessary data directories cleaned up") else: print("Cancelled")
gpl-3.0
mattrobenolt/django-sudo
tests/signals.py
3
1087
from .base import BaseTestCase from sudo.signals import grant, revoke from sudo.utils import has_sudo_privileges, grant_sudo_privileges from django.contrib.auth.models import User from django.contrib.auth.signals import user_logged_in, user_logged_out class SignalsTestCase(BaseTestCase): def test_grant(self): self.login() grant(User, self.request) self.assertTrue(has_sudo_privileges(self.request)) def test_revoke(self): self.login() grant(User, self.request) revoke(User, self.request) self.assertFalse(has_sudo_privileges(self.request)) def test_user_logged_in(self): self.login() user_logged_in.send_robust(sender=User, request=self.request) self.assertTrue(has_sudo_privileges(self.request)) def test_user_logged_out(self): self.login() grant_sudo_privileges(self.request) self.assertTrue(has_sudo_privileges(self.request)) user_logged_out.send_robust(sender=User, request=self.request) self.assertFalse(has_sudo_privileges(self.request))
bsd-3-clause
suchow/psiTurk
psiturk/command_line.py
2
3143
''' This module supports commandline functionality ''' import argparse import sys import os from psiturk.version import version_number from psiturk.psiturk_org_services import ExperimentExchangeServices def process(): ''' Figure out how we were invoked ''' invoked_as = os.path.basename(sys.argv[0]) if invoked_as == "psiturk": launch_shell() elif invoked_as == "psiturk-server": launch_server() elif invoked_as == "psiturk-shell": launch_shell() elif invoked_as == "psiturk-setup-example": setup_example() elif invoked_as == "psiturk-install": install_from_exchange() def install_from_exchange(): ''' Install from experiment exchange. ''' parser = argparse.ArgumentParser( description='Download experiment from the psiturk.org experiment\ exchange (http://psiturk.org/ee).' ) parser.add_argument( 'exp_id', metavar='exp_id', type=str, help='the id number of the\ experiment in the exchange' ) args = parser.parse_args() exp_exch = ExperimentExchangeServices() exp_exch.download_experiment(args.exp_id) def setup_example(): ''' Add commands for testing, etc. ''' parser = argparse.ArgumentParser( description='Creates a simple default project (stroop) in the current\ directory with the necessary psiTurk files.' ) # Optional flags parser.add_argument( '-v', '--version', help='Print version number.', action="store_true" ) args = parser.parse_args() # If requested version just print and quite if args.version: print version_number else: import psiturk.setup_example as se se.setup_example() def launch_server(): ''' Add commands for testing, etc.. ''' parser = argparse.ArgumentParser( description='Launch psiTurk experiment webserver process on the\ host/port defined in config.txt.' ) # Optional flags parser.add_argument( '-v', '--version', help='Print version number.', action="store_true" ) args = parser.parse_args() # If requested version just print and quite if args.version: print version_number else: import psiturk.experiment_server as es es.launch() def launch_shell(): ''' Add commands for testing, etc.. ''' parser = argparse.ArgumentParser( description='Launch the psiTurk interactive shell.' ) # Optional flags parser.add_argument( '-v', '--version', help='Print version number.', action="store_true" ) parser.add_argument( '-c', '--cabinmode', help='Launch psiturk in cabin (offline) mode', action="store_true" ) parser.add_argument( '-s', '--script', help='Run commands from a script file' ) args = parser.parse_args() # If requested version just print and quite if args.version: print version_number else: import psiturk.psiturk_shell as ps if args.script: ps.run(cabinmode=args.cabinmode, script=args.script) else: ps.run(cabinmode=args.cabinmode)
mit
joshzarrabi/e-mission-server
emission/tests/storageTests/TestPlaceQueries.py
1
1537
# Standard imports import unittest import datetime as pydt import logging import uuid import json # Our imports import emission.storage.decorations.place_queries as esdp import emission.core.get_database as edb class TestTripQueries(unittest.TestCase): def setUp(self): self.testUserId = uuid.uuid4() edb.get_place_db().remove() def testCreateNew(self): new_place = esdp.create_new_place(self.testUserId) self.assertIsNotNone(new_place.get_id()) self.assertEqual(new_place.user_id, self.testUserId) def testSavePlace(self): new_place = esdp.create_new_place(self.testUserId) new_place.enter_ts = 5 esdp.save_place(new_place) self.assertEqual(edb.get_place_db().find({"enter_ts": 5}).count(), 1) self.assertEqual(edb.get_place_db().find_one({"enter_ts": 5})["_id"], new_place.get_id()) self.assertEqual(edb.get_place_db().find_one({"enter_ts": 5})["user_id"], self.testUserId) def testGetLastPlace(self): self.testSavePlace() # The place saved in the previous step has no exit_ts set, so it is the # last place new_place = esdp.get_last_place(self.testUserId) new_place.exit_ts = 6 esdp.save_place(new_place) # Now that I have set the exit_ts and saved it, there is no last place new_place = esdp.get_last_place(self.testUserId) self.assertIsNone(new_place) if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) unittest.main()
bsd-3-clause
amchoukir/YouCompleteMe
python/ycm/client/command_request.py
17
5464
#!/usr/bin/env python # # Copyright (C) 2013 Google Inc. # # This file is part of YouCompleteMe. # # YouCompleteMe is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # YouCompleteMe is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>. import vim from ycm.client.base_request import BaseRequest, BuildRequestData, ServerError from ycm import vimsupport from ycmd.utils import ToUtf8IfNeeded def _EnsureBackwardsCompatibility( arguments ): if arguments and arguments[ 0 ] == 'GoToDefinitionElseDeclaration': arguments[ 0 ] = 'GoTo' return arguments class CommandRequest( BaseRequest ): def __init__( self, arguments, completer_target = None ): super( CommandRequest, self ).__init__() self._arguments = _EnsureBackwardsCompatibility( arguments ) self._completer_target = ( completer_target if completer_target else 'filetype_default' ) self._is_goto_command = ( self._arguments and self._arguments[ 0 ].startswith( 'GoTo' ) ) self._is_fixit_command = ( self._arguments and self._arguments[ 0 ].startswith( 'FixIt' ) ) self._response = None def Start( self ): request_data = BuildRequestData() request_data.update( { 'completer_target': self._completer_target, 'command_arguments': self._arguments } ) try: self._response = self.PostDataToHandler( request_data, 'run_completer_command' ) except ServerError as e: vimsupport.PostMultiLineNotice( e ) def Response( self ): return self._response def RunPostCommandActionsIfNeeded( self ): if not self.Done() or not self._response: return if self._is_goto_command: self._HandleGotoResponse() elif self._is_fixit_command: self._HandleFixitResponse() elif 'message' in self._response: self._HandleMessageResponse() def _HandleGotoResponse( self ): if isinstance( self._response, list ): defs = [ _BuildQfListItem( x ) for x in self._response ] vim.eval( 'setqflist( %s )' % repr( defs ) ) vim.eval( 'youcompleteme#OpenGoToList()' ) else: vimsupport.JumpToLocation( self._response[ 'filepath' ], self._response[ 'line_num' ], self._response[ 'column_num' ] ) def _HandleFixitResponse( self ): if not len( self._response[ 'fixits' ] ): vimsupport.EchoText( "No fixits found for current line" ) else: fixit = self._response[ 'fixits' ][ 0 ] # We need to track the difference in length, but ensuring we apply fixes # in ascending order of insertion point. fixit[ 'chunks' ].sort( key = lambda chunk: ( str(chunk[ 'range' ][ 'start' ][ 'line_num' ]) + ',' + str(chunk[ 'range' ][ 'start' ][ 'column_num' ]) )) # Remember the line number we're processing. Negative line number means we # haven't processed any lines yet (by nature of being not equal to any # real line number). last_line = -1 # Counter of changes applied, so the user has a mental picture of the # undo history this change is creating. num_fixed = 0 line_delta = 0 for chunk in fixit[ 'chunks' ]: if chunk[ 'range' ][ 'start' ][ 'line_num' ] != last_line: # If this chunk is on a different line than the previous chunk, # then ignore previous deltas (as offsets won't have changed). last_line = chunk[ 'range' ][ 'end' ][ 'line_num' ] char_delta = 0 (new_line_delta, new_char_delta) = vimsupport.ReplaceChunk( chunk[ 'range' ][ 'start' ], chunk[ 'range' ][ 'end' ], chunk[ 'replacement_text' ], line_delta, char_delta ) line_delta += new_line_delta char_delta += new_char_delta num_fixed = num_fixed + 1 vimsupport.EchoTextVimWidth("FixIt applied " + str(num_fixed) + " changes") def _HandleMessageResponse( self ): vimsupport.EchoText( self._response[ 'message' ] ) def SendCommandRequest( arguments, completer ): request = CommandRequest( arguments, completer ) # This is a blocking call. request.Start() request.RunPostCommandActionsIfNeeded() return request.Response() def _BuildQfListItem( goto_data_item ): qf_item = {} if 'filepath' in goto_data_item: qf_item[ 'filename' ] = ToUtf8IfNeeded( goto_data_item[ 'filepath' ] ) if 'description' in goto_data_item: qf_item[ 'text' ] = ToUtf8IfNeeded( goto_data_item[ 'description' ] ) if 'line_num' in goto_data_item: qf_item[ 'lnum' ] = goto_data_item[ 'line_num' ] if 'column_num' in goto_data_item: qf_item[ 'col' ] = goto_data_item[ 'column_num' ] - 1 return qf_item
gpl-3.0
jaor/bigmler
bigmler/analyze/dispatcher.py
1
5237
# -*- coding: utf-8 -*- # # Copyright 2014-2020 BigML # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """BigMLer analyze main processing Functions to process the analyze options """ import sys import os import bigmler.processing.args as a import bigmler.utils as u from bigmler.analyze.k_fold_cv import (create_kfold_cv, create_features_analysis, create_nodes_analysis, create_candidates_analysis) from bigmler.dispatcher import SESSIONS_LOG, clear_log_files from bigmler.command import get_stored_command, command_handling COMMAND_LOG = ".bigmler_analyze" DIRS_LOG = ".bigmler_analyze_dir_stack" LOG_FILES = [COMMAND_LOG, DIRS_LOG, u.NEW_DIRS_LOG] def analyze_dispatcher(args=sys.argv[1:]): """Main processing of the parsed options for BigMLer analyze """ # If --clear-logs the log files are cleared if "--clear-logs" in args: clear_log_files(LOG_FILES) command = command_handling(args, COMMAND_LOG) # Parses command line arguments. command_args = command.parser.parse_args(command.args) resume = command_args.resume if resume: command_args, session_file, _ = get_stored_command( args, command_args.debug, command_log=COMMAND_LOG, dirs_log=DIRS_LOG, sessions_log=SESSIONS_LOG) else: if command_args.output_dir is None: command_args.output_dir = a.NOW session_file = os.path.join(command_args.output_dir, SESSIONS_LOG) # If logging is required, open the file for logging log = None if command_args.log_file: u.check_dir(command_args.log_file) log = command_args.log_file # If --clear_logs the log files are cleared if command_args.clear_logs: clear_log_files([log]) if command_args.model_fields: model_fields = command_args.model_fields.split(',') command_args.model_fields_ = [model_field.strip() for model_field in model_fields] else: command_args.model_fields_ = {} u.sys_log_message("%s\n" % os.path.abspath(command_args.output_dir), log_file=DIRS_LOG) session_file = os.path.join(command_args.output_dir, SESSIONS_LOG) # create api instance form args api = a.get_api_instance(command_args, u.check_dir(session_file)) # Creates the corresponding api instance api = a.get_api_instance(command_args, u.check_dir(session_file)) a.transform_dataset_options(command_args, api) # --maximize flag will be deprecated. Use --optimize flag. if command_args.maximize is not None and command_args.optimize is None: command_args.optimize = command_args.maximize incompatible_flags = [command_args.cv, command_args.features, command_args.nodes, command_args.random_fields] if sum([int(bool(flag)) for flag in incompatible_flags]) > 1: sys.exit("The following flags cannot be used together:\n --features" "\n --cross-validation\n --nodes\n --random-fields") if (command_args.dataset is None and command_args.datasets is None and command_args.dataset_file is None): sys.exit("The analyze command needs an existing dataset ID. Please, " "use the --dataset flag.") if not any(incompatible_flags): sys.exit("You need to specify the type of analysis: features, node " "threshold, cross validation or random fields.") # k-fold cross-validation if command_args.cv and command_args.dataset is not None: create_kfold_cv(command_args, api, command, resume=resume) # features analysis elif command_args.features: create_features_analysis(command_args, api, command, resume=resume) # node threshold analysis elif command_args.nodes: create_nodes_analysis(command_args, api, command, resume=resume) # random fields analysis elif command_args.random_fields: create_candidates_analysis(command_args, api, command, resume=resume) else: sys.exit("You must choose one of the available analysis: --features," " --nodes, --random-fields or --cross-validation. Add" " your prefered option to" " the command line or type\n bigmler analyze --help\n" " to see all the available options.")
apache-2.0
viswimmer1/PythonGenerator
data/python_files/33845889/setup_project.py
2
10429
import glob import optparse import os import random import re import shutil import sys import pip try: from pip.exceptions import InstallationError except ImportError: print ("You are using an older version of pip. Please upgrade pip to " "0.7+ (which ships with virtualenv 1.4.7+)") sys.exit(1) import pinax from pinax.core.management.base import BaseCommand, CommandError PROJECTS_DIR = os.path.join(os.path.dirname(pinax.__file__), "projects") class Command(BaseCommand): help = "Creates a new Django project" args = "[projectname]" option_list = BaseCommand.option_list + [ optparse.make_option("-l", "--list-bases", dest = "list_bases", action = "store_true", help = "lists the starter projects (bases) that are available" ), optparse.make_option("-b", "--base", dest = "base", default = "zero", help = "the starter project to use as a base (excluding _project, e.g., basic or social. see --list-projects)" ), optparse.make_option("--no-reqs", dest = "no_reqs", action = "store_true", help = "do not install requirements automatically" ), optparse.make_option("--allow-no-virtualenv", dest = "allow_no_virtualenv", action = "store_true", default = False, help = "turn off the requirement pip must run inside a virtual environment" ) ] def handle(self, *args, **options): if options["list_bases"]: self.base_list() sys.exit(0) if not args: # note: --help prints full path to pinax-admin self.print_help("pinax-admin", "setup_project") sys.exit(0) self.setup_project(args[0], options["base"], options) def base_list(self): sys.path.append(PROJECTS_DIR) for project in self.project_list(): print project.replace("_project", "") __about__ = getattr(__import__(project), "__about__", "") for line in __about__.strip().splitlines(): print " %s" % line print sys.path.pop() def project_list(self): projects = [] for e in os.listdir(PROJECTS_DIR): if os.path.isdir(os.path.join(PROJECTS_DIR, e)): projects.append(e) return projects def setup_project(self, destination, base, options): user_project_name = os.path.basename(destination) if not re.search(r"^\w+$", user_project_name): sys.stderr.write("Error: %r is not a valid app name. Please use only numbers, letters and underscores.\n" % (user_project_name)) sys.exit(1) if os.path.exists(destination): raise CommandError("Destination path already exists [%s]" % destination) try: # check to see if the project_name copies an existing module name __import__(user_project_name) except ImportError: # The module does not exist so we let Pinax create it as a project pass else: # The module exists so we raise a CommandError and exit raise CommandError( "'%s' conflicts with the name of an existing Python " "package/module and cannot be used as a project name. Please " "try another name." % user_project_name ) # check the base value (we could later be much smarter about it and # allow repos and such) if base in [p.replace("_project", "") for p in self.project_list()]: project_name = "%s_project" % base source = os.path.join(PROJECTS_DIR, project_name) else: if not os.path.exists(base): raise CommandError( "Project template does not exist the given " "path: %s" % base ) else: project_name = os.path.basename(base) installer = ProjectInstaller(source, destination, project_name, user_project_name) installer.copy() installer.fix_settings() installer.fix_deploy(project_name, user_project_name) print "Created project %s" % user_project_name if not options["no_reqs"]: print "Installing project requirements..." try: installer.install_reqs(not options["allow_no_virtualenv"]) except InstallationError: print ("Installation of requirements failed. The project %s " "has been created though.") % user_project_name else: print print ("Skipping requirement installation. Run pip install --no-deps " "-r requirements/project.txt inside the project directory.") class ProjectInstaller(object): """ Provides the methods to install a project at a given destination """ def __init__(self, source_dir, project_dir, project_name, user_project_name): self.source_dir = source_dir self.project_dir = project_dir self.project_name = project_name self.user_project_name = user_project_name def generate_secret_key(self): chars = "abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)" return "".join([random.choice(chars) for i in xrange(50)]) def copy(self): copytree(self.source_dir, self.project_dir, excluded_patterns=[ ".svn", ".pyc", "dev.db" ] ) def fix_settings(self): # @@@ settings refactor settings_filename = os.path.join(self.project_dir, "settings.py") data = open(settings_filename, "rb").read() data = re.compile(r"SECRET_KEY\s*=.*$", re.M).sub( 'SECRET_KEY = "%s"' % self.generate_secret_key(), data ) data = re.compile(r"ROOT_URLCONF\s*=.*$", re.M).sub( 'ROOT_URLCONF = "%s"' % "%s.urls" % self.user_project_name, data, ) data = data.replace(self.project_name, self.user_project_name) open(settings_filename, "wb").write(data) def fix_deploy(self, base, project_name): for deploy_file in glob.glob(os.path.join(self.project_dir, "deploy/") + "*"): df = open(deploy_file, "rb") deploy_settings = df.read() df.close() deploy_settings = deploy_settings.replace(base, project_name) df = open(deploy_file, "wb") df.write(deploy_settings) df.close() def install_reqs(self, require_virtualenv=True): # @@@ move to using Python pip APIs and not relying on the OS if sys.platform == "win32": PIP_CMD = "pip.exe" else: PIP_CMD = "pip" pip_cmd = resolve_command(PIP_CMD) requirements_file = os.path.join(self.project_dir, "requirements", "project.txt") environ = {} if require_virtualenv: environ["PIP_REQUIRE_VIRTUALENV"] = "true" pip.call_subprocess([ pip_cmd, "install", "--requirement", requirements_file, ], show_stdout=True, extra_environ=environ) def copytree(src, dst, symlinks=False, excluded_patterns=None): """ Modified copytree from Python 2.6 (backported to run on 2.4) """ try: WindowsError except NameError: WindowsError = None if excluded_patterns is None: excluded_patterns = [] names = os.listdir(src) os.makedirs(dst) errors = [] for name in names: ignore = False for pattern in excluded_patterns: if pattern in os.path.join(src, name): ignore = True if ignore: continue srcname = os.path.join(src, name) dstname = os.path.join(dst, name) try: if symlinks and os.path.islink(srcname): linkto = os.readlink(srcname) os.symlink(linkto, dstname) elif os.path.isdir(srcname): copytree(srcname, dstname, symlinks) else: shutil.copy2(srcname, dstname) except (IOError, os.error), why: errors.append((srcname, dstname, str(why))) except shutil.Error, err: errors.extend(err.args[0]) try: shutil.copystat(src, dst) except OSError, why: if not WindowsError is None and isinstance(why, WindowsError): pass else: errors.extend((src, dst, str(why))) if errors: raise shutil.Error, errors # needed for ProjectInstaller.install_reqs def resolve_command(cmd, path=None, pathext=None): """ Searches the PATH for the given executable and returns the normalized path """ # save the path searched for for later fallback searched_for_path = path if path is None: path = os.environ.get("PATH", []).split(os.pathsep) if isinstance(path, basestring): path = [path] # check if there are funny path extensions for executables, e.g. Windows if pathext is None: pathext = os.environ.get("PATHEXT", ".COM;.EXE;.BAT;.CMD").split(os.pathsep) # don"t use extensions if the command ends with one of them for ext in pathext: if cmd.endswith(ext): pathext = [""] break # check if we find the command on PATH for _dir in path: f = os.path.join(_dir, cmd) for ext in pathext: # try without extension first if os.path.isfile(f): return os.path.realpath(f) # then including the extension fext = f + ext if os.path.isfile(fext): return os.path.realpath(fext) # last resort: just try the searched for path if searched_for_path: cmd = os.path.join(os.path.realpath(searched_for_path), cmd) if not os.path.exists(cmd): print "ERROR: this script requires %s." % cmd print "Please verify it exists because it couldn't be found." sys.exit(3) return os.path.realpath(cmd)
gpl-2.0
srivassumit/servo
tests/wpt/web-platform-tests/content-security-policy/support/report.py
197
1162
import time import json import re def main(request, response): op = request.GET.first("op"); key = request.GET.first("reportID") if op == "take": timeout = float(request.GET.first("timeout")) t0 = time.time() while time.time() - t0 < timeout: time.sleep(0.5) value = request.server.stash.take(key=key) if value is not None: return [("Content-Type", "application/json")], value return [("Content-Type", "application/json")], json.dumps({'error': 'No such report.' , 'guid' : key}) if op == "cookies": cval = request.server.stash.take(key=re.sub('^...', 'ccc', key)) if cval is None: cval = "\"None\"" return [("Content-Type", "application/json")], "{ \"reportCookies\" : " + cval + "}" if hasattr(request, 'Cookies'): request.server.stash.put(key=re.sub('^...', 'ccc', key), value=request.Cookies) report = request.body report.rstrip() request.server.stash.take(key=key) request.server.stash.put(key=key, value=report) return [("Content-Type", "text/plain")], "Recorded report " + report
mpl-2.0
yephper/django
django/views/decorators/debug.py
2
2705
import functools from django.http import HttpRequest def sensitive_variables(*variables): """ Indicates which variables used in the decorated function are sensitive, so that those variables can later be treated in a special way, for example by hiding them when logging unhandled exceptions. Two forms are accepted: * with specified variable names: @sensitive_variables('user', 'password', 'credit_card') def my_function(user): password = user.pass_word credit_card = user.credit_card_number ... * without any specified variable names, in which case it is assumed that all variables are considered sensitive: @sensitive_variables() def my_function() ... """ def decorator(func): @functools.wraps(func) def sensitive_variables_wrapper(*func_args, **func_kwargs): if variables: sensitive_variables_wrapper.sensitive_variables = variables else: sensitive_variables_wrapper.sensitive_variables = '__ALL__' return func(*func_args, **func_kwargs) return sensitive_variables_wrapper return decorator def sensitive_post_parameters(*parameters): """ Indicates which POST parameters used in the decorated view are sensitive, so that those parameters can later be treated in a special way, for example by hiding them when logging unhandled exceptions. Two forms are accepted: * with specified parameters: @sensitive_post_parameters('password', 'credit_card') def my_view(request): pw = request.POST['password'] cc = request.POST['credit_card'] ... * without any specified parameters, in which case it is assumed that all parameters are considered sensitive: @sensitive_post_parameters() def my_view(request) ... """ def decorator(view): @functools.wraps(view) def sensitive_post_parameters_wrapper(request, *args, **kwargs): assert isinstance(request, HttpRequest), ( "sensitive_post_parameters didn't receive an HttpRequest. " "If you are decorating a classmethod, be sure to use " "@method_decorator." ) if parameters: request.sensitive_post_parameters = parameters else: request.sensitive_post_parameters = '__ALL__' return view(request, *args, **kwargs) return sensitive_post_parameters_wrapper return decorator
bsd-3-clause
LonamiWebs/Telethon
telethon/client/chats.py
1
51586
import asyncio import inspect import itertools import string import typing from .. import helpers, utils, hints, errors from ..requestiter import RequestIter from ..tl import types, functions, custom if typing.TYPE_CHECKING: from .telegramclient import TelegramClient _MAX_PARTICIPANTS_CHUNK_SIZE = 200 _MAX_ADMIN_LOG_CHUNK_SIZE = 100 _MAX_PROFILE_PHOTO_CHUNK_SIZE = 100 class _ChatAction: _str_mapping = { 'typing': types.SendMessageTypingAction(), 'contact': types.SendMessageChooseContactAction(), 'game': types.SendMessageGamePlayAction(), 'location': types.SendMessageGeoLocationAction(), 'record-audio': types.SendMessageRecordAudioAction(), 'record-voice': types.SendMessageRecordAudioAction(), # alias 'record-round': types.SendMessageRecordRoundAction(), 'record-video': types.SendMessageRecordVideoAction(), 'audio': types.SendMessageUploadAudioAction(1), 'voice': types.SendMessageUploadAudioAction(1), # alias 'song': types.SendMessageUploadAudioAction(1), # alias 'round': types.SendMessageUploadRoundAction(1), 'video': types.SendMessageUploadVideoAction(1), 'photo': types.SendMessageUploadPhotoAction(1), 'document': types.SendMessageUploadDocumentAction(1), 'file': types.SendMessageUploadDocumentAction(1), # alias 'cancel': types.SendMessageCancelAction() } def __init__(self, client, chat, action, *, delay, auto_cancel): self._client = client self._chat = chat self._action = action self._delay = delay self._auto_cancel = auto_cancel self._request = None self._task = None self._running = False async def __aenter__(self): self._chat = await self._client.get_input_entity(self._chat) # Since `self._action` is passed by reference we can avoid # recreating the request all the time and still modify # `self._action.progress` directly in `progress`. self._request = functions.messages.SetTypingRequest( self._chat, self._action) self._running = True self._task = self._client.loop.create_task(self._update()) return self async def __aexit__(self, *args): self._running = False if self._task: self._task.cancel() try: await self._task except asyncio.CancelledError: pass self._task = None __enter__ = helpers._sync_enter __exit__ = helpers._sync_exit async def _update(self): try: while self._running: await self._client(self._request) await asyncio.sleep(self._delay) except ConnectionError: pass except asyncio.CancelledError: if self._auto_cancel: await self._client(functions.messages.SetTypingRequest( self._chat, types.SendMessageCancelAction())) def progress(self, current, total): if hasattr(self._action, 'progress'): self._action.progress = 100 * round(current / total) class _ParticipantsIter(RequestIter): async def _init(self, entity, filter, search, aggressive): if isinstance(filter, type): if filter in (types.ChannelParticipantsBanned, types.ChannelParticipantsKicked, types.ChannelParticipantsSearch, types.ChannelParticipantsContacts): # These require a `q` parameter (support types for convenience) filter = filter('') else: filter = filter() entity = await self.client.get_input_entity(entity) ty = helpers._entity_type(entity) if search and (filter or ty != helpers._EntityType.CHANNEL): # We need to 'search' ourselves unless we have a PeerChannel search = search.casefold() self.filter_entity = lambda ent: ( search in utils.get_display_name(ent).casefold() or search in (getattr(ent, 'username', None) or '').casefold() ) else: self.filter_entity = lambda ent: True # Only used for channels, but we should always set the attribute self.requests = [] if ty == helpers._EntityType.CHANNEL: self.total = (await self.client( functions.channels.GetFullChannelRequest(entity) )).full_chat.participants_count if self.limit <= 0: raise StopAsyncIteration self.seen = set() if aggressive and not filter: self.requests.extend(functions.channels.GetParticipantsRequest( channel=entity, filter=types.ChannelParticipantsSearch(x), offset=0, limit=_MAX_PARTICIPANTS_CHUNK_SIZE, hash=0 ) for x in (search or string.ascii_lowercase)) else: self.requests.append(functions.channels.GetParticipantsRequest( channel=entity, filter=filter or types.ChannelParticipantsSearch(search), offset=0, limit=_MAX_PARTICIPANTS_CHUNK_SIZE, hash=0 )) elif ty == helpers._EntityType.CHAT: full = await self.client( functions.messages.GetFullChatRequest(entity.chat_id)) if not isinstance( full.full_chat.participants, types.ChatParticipants): # ChatParticipantsForbidden won't have ``.participants`` self.total = 0 raise StopAsyncIteration self.total = len(full.full_chat.participants.participants) users = {user.id: user for user in full.users} for participant in full.full_chat.participants.participants: if isinstance(participant, types.ChannelParticipantBanned): user_id = participant.peer.user_id else: user_id = participant.user_id user = users[user_id] if not self.filter_entity(user): continue user = users[user_id] user.participant = participant self.buffer.append(user) return True else: self.total = 1 if self.limit != 0: user = await self.client.get_entity(entity) if self.filter_entity(user): user.participant = None self.buffer.append(user) return True async def _load_next_chunk(self): if not self.requests: return True # Only care about the limit for the first request # (small amount of people, won't be aggressive). # # Most people won't care about getting exactly 12,345 # members so it doesn't really matter not to be 100% # precise with being out of the offset/limit here. self.requests[0].limit = min( self.limit - self.requests[0].offset, _MAX_PARTICIPANTS_CHUNK_SIZE) if self.requests[0].offset > self.limit: return True results = await self.client(self.requests) for i in reversed(range(len(self.requests))): participants = results[i] if not participants.users: self.requests.pop(i) continue self.requests[i].offset += len(participants.participants) users = {user.id: user for user in participants.users} for participant in participants.participants: if isinstance(participant, types.ChannelParticipantBanned): user_id = participant.peer.user_id else: user_id = participant.user_id user = users[user_id] if not self.filter_entity(user) or user.id in self.seen: continue self.seen.add(user_id) user = users[user_id] user.participant = participant self.buffer.append(user) class _AdminLogIter(RequestIter): async def _init( self, entity, admins, search, min_id, max_id, join, leave, invite, restrict, unrestrict, ban, unban, promote, demote, info, settings, pinned, edit, delete, group_call ): if any((join, leave, invite, restrict, unrestrict, ban, unban, promote, demote, info, settings, pinned, edit, delete, group_call)): events_filter = types.ChannelAdminLogEventsFilter( join=join, leave=leave, invite=invite, ban=restrict, unban=unrestrict, kick=ban, unkick=unban, promote=promote, demote=demote, info=info, settings=settings, pinned=pinned, edit=edit, delete=delete, group_call=group_call ) else: events_filter = None self.entity = await self.client.get_input_entity(entity) admin_list = [] if admins: if not utils.is_list_like(admins): admins = (admins,) for admin in admins: admin_list.append(await self.client.get_input_entity(admin)) self.request = functions.channels.GetAdminLogRequest( self.entity, q=search or '', min_id=min_id, max_id=max_id, limit=0, events_filter=events_filter, admins=admin_list or None ) async def _load_next_chunk(self): self.request.limit = min(self.left, _MAX_ADMIN_LOG_CHUNK_SIZE) r = await self.client(self.request) entities = {utils.get_peer_id(x): x for x in itertools.chain(r.users, r.chats)} self.request.max_id = min((e.id for e in r.events), default=0) for ev in r.events: if isinstance(ev.action, types.ChannelAdminLogEventActionEditMessage): ev.action.prev_message._finish_init( self.client, entities, self.entity) ev.action.new_message._finish_init( self.client, entities, self.entity) elif isinstance(ev.action, types.ChannelAdminLogEventActionDeleteMessage): ev.action.message._finish_init( self.client, entities, self.entity) self.buffer.append(custom.AdminLogEvent(ev, entities)) if len(r.events) < self.request.limit: return True class _ProfilePhotoIter(RequestIter): async def _init( self, entity, offset, max_id ): entity = await self.client.get_input_entity(entity) ty = helpers._entity_type(entity) if ty == helpers._EntityType.USER: self.request = functions.photos.GetUserPhotosRequest( entity, offset=offset, max_id=max_id, limit=1 ) else: self.request = functions.messages.SearchRequest( peer=entity, q='', filter=types.InputMessagesFilterChatPhotos(), min_date=None, max_date=None, offset_id=0, add_offset=offset, limit=1, max_id=max_id, min_id=0, hash=0 ) if self.limit == 0: self.request.limit = 1 result = await self.client(self.request) if isinstance(result, types.photos.Photos): self.total = len(result.photos) elif isinstance(result, types.messages.Messages): self.total = len(result.messages) else: # Luckily both photosSlice and messages have a count for total self.total = getattr(result, 'count', None) async def _load_next_chunk(self): self.request.limit = min(self.left, _MAX_PROFILE_PHOTO_CHUNK_SIZE) result = await self.client(self.request) if isinstance(result, types.photos.Photos): self.buffer = result.photos self.left = len(self.buffer) self.total = len(self.buffer) elif isinstance(result, types.messages.Messages): self.buffer = [x.action.photo for x in result.messages if isinstance(x.action, types.MessageActionChatEditPhoto)] self.left = len(self.buffer) self.total = len(self.buffer) elif isinstance(result, types.photos.PhotosSlice): self.buffer = result.photos self.total = result.count if len(self.buffer) < self.request.limit: self.left = len(self.buffer) else: self.request.offset += len(result.photos) else: # Some broadcast channels have a photo that this request doesn't # retrieve for whatever random reason the Telegram server feels. # # This means the `total` count may be wrong but there's not much # that can be done around it (perhaps there are too many photos # and this is only a partial result so it's not possible to just # use the len of the result). self.total = getattr(result, 'count', None) # Unconditionally fetch the full channel to obtain this photo and # yield it with the rest (unless it's a duplicate). seen_id = None if isinstance(result, types.messages.ChannelMessages): channel = await self.client(functions.channels.GetFullChannelRequest(self.request.peer)) photo = channel.full_chat.chat_photo if isinstance(photo, types.Photo): self.buffer.append(photo) seen_id = photo.id self.buffer.extend( x.action.photo for x in result.messages if isinstance(x.action, types.MessageActionChatEditPhoto) and x.action.photo.id != seen_id ) if len(result.messages) < self.request.limit: self.left = len(self.buffer) elif result.messages: self.request.add_offset = 0 self.request.offset_id = result.messages[-1].id class ChatMethods: # region Public methods def iter_participants( self: 'TelegramClient', entity: 'hints.EntityLike', limit: float = None, *, search: str = '', filter: 'types.TypeChannelParticipantsFilter' = None, aggressive: bool = False) -> _ParticipantsIter: """ Iterator over the participants belonging to the specified chat. The order is unspecified. Arguments entity (`entity`): The entity from which to retrieve the participants list. limit (`int`): Limits amount of participants fetched. search (`str`, optional): Look for participants with this string in name/username. If ``aggressive is True``, the symbols from this string will be used. filter (:tl:`ChannelParticipantsFilter`, optional): The filter to be used, if you want e.g. only admins Note that you might not have permissions for some filter. This has no effect for normal chats or users. .. note:: The filter :tl:`ChannelParticipantsBanned` will return *restricted* users. If you want *banned* users you should use :tl:`ChannelParticipantsKicked` instead. aggressive (`bool`, optional): Aggressively looks for all participants in the chat. This is useful for channels since 20 July 2018, Telegram added a server-side limit where only the first 200 members can be retrieved. With this flag set, more than 200 will be often be retrieved. This has no effect if a ``filter`` is given. Yields The :tl:`User` objects returned by :tl:`GetParticipantsRequest` with an additional ``.participant`` attribute which is the matched :tl:`ChannelParticipant` type for channels/megagroups or :tl:`ChatParticipants` for normal chats. Example .. code-block:: python # Show all user IDs in a chat async for user in client.iter_participants(chat): print(user.id) # Search by name async for user in client.iter_participants(chat, search='name'): print(user.username) # Filter by admins from telethon.tl.types import ChannelParticipantsAdmins async for user in client.iter_participants(chat, filter=ChannelParticipantsAdmins): print(user.first_name) """ return _ParticipantsIter( self, limit, entity=entity, filter=filter, search=search, aggressive=aggressive ) async def get_participants( self: 'TelegramClient', *args, **kwargs) -> 'hints.TotalList': """ Same as `iter_participants()`, but returns a `TotalList <telethon.helpers.TotalList>` instead. Example .. code-block:: python users = await client.get_participants(chat) print(users[0].first_name) for user in users: if user.username is not None: print(user.username) """ return await self.iter_participants(*args, **kwargs).collect() get_participants.__signature__ = inspect.signature(iter_participants) def iter_admin_log( self: 'TelegramClient', entity: 'hints.EntityLike', limit: float = None, *, max_id: int = 0, min_id: int = 0, search: str = None, admins: 'hints.EntitiesLike' = None, join: bool = None, leave: bool = None, invite: bool = None, restrict: bool = None, unrestrict: bool = None, ban: bool = None, unban: bool = None, promote: bool = None, demote: bool = None, info: bool = None, settings: bool = None, pinned: bool = None, edit: bool = None, delete: bool = None, group_call: bool = None) -> _AdminLogIter: """ Iterator over the admin log for the specified channel. The default order is from the most recent event to to the oldest. Note that you must be an administrator of it to use this method. If none of the filters are present (i.e. they all are `None`), *all* event types will be returned. If at least one of them is `True`, only those that are true will be returned. Arguments entity (`entity`): The channel entity from which to get its admin log. limit (`int` | `None`, optional): Number of events to be retrieved. The limit may also be `None`, which would eventually return the whole history. max_id (`int`): All the events with a higher (newer) ID or equal to this will be excluded. min_id (`int`): All the events with a lower (older) ID or equal to this will be excluded. search (`str`): The string to be used as a search query. admins (`entity` | `list`): If present, the events will be filtered by these admins (or single admin) and only those caused by them will be returned. join (`bool`): If `True`, events for when a user joined will be returned. leave (`bool`): If `True`, events for when a user leaves will be returned. invite (`bool`): If `True`, events for when a user joins through an invite link will be returned. restrict (`bool`): If `True`, events with partial restrictions will be returned. This is what the API calls "ban". unrestrict (`bool`): If `True`, events removing restrictions will be returned. This is what the API calls "unban". ban (`bool`): If `True`, events applying or removing all restrictions will be returned. This is what the API calls "kick" (restricting all permissions removed is a ban, which kicks the user). unban (`bool`): If `True`, events removing all restrictions will be returned. This is what the API calls "unkick". promote (`bool`): If `True`, events with admin promotions will be returned. demote (`bool`): If `True`, events with admin demotions will be returned. info (`bool`): If `True`, events changing the group info will be returned. settings (`bool`): If `True`, events changing the group settings will be returned. pinned (`bool`): If `True`, events of new pinned messages will be returned. edit (`bool`): If `True`, events of message edits will be returned. delete (`bool`): If `True`, events of message deletions will be returned. group_call (`bool`): If `True`, events related to group calls will be returned. Yields Instances of `AdminLogEvent <telethon.tl.custom.adminlogevent.AdminLogEvent>`. Example .. code-block:: python async for event in client.iter_admin_log(channel): if event.changed_title: print('The title changed from', event.old, 'to', event.new) """ return _AdminLogIter( self, limit, entity=entity, admins=admins, search=search, min_id=min_id, max_id=max_id, join=join, leave=leave, invite=invite, restrict=restrict, unrestrict=unrestrict, ban=ban, unban=unban, promote=promote, demote=demote, info=info, settings=settings, pinned=pinned, edit=edit, delete=delete, group_call=group_call ) async def get_admin_log( self: 'TelegramClient', *args, **kwargs) -> 'hints.TotalList': """ Same as `iter_admin_log()`, but returns a ``list`` instead. Example .. code-block:: python # Get a list of deleted message events which said "heck" events = await client.get_admin_log(channel, search='heck', delete=True) # Print the old message before it was deleted print(events[0].old) """ return await self.iter_admin_log(*args, **kwargs).collect() get_admin_log.__signature__ = inspect.signature(iter_admin_log) def iter_profile_photos( self: 'TelegramClient', entity: 'hints.EntityLike', limit: int = None, *, offset: int = 0, max_id: int = 0) -> _ProfilePhotoIter: """ Iterator over a user's profile photos or a chat's photos. The order is from the most recent photo to the oldest. Arguments entity (`entity`): The entity from which to get the profile or chat photos. limit (`int` | `None`, optional): Number of photos to be retrieved. The limit may also be `None`, which would eventually all the photos that are still available. offset (`int`): How many photos should be skipped before returning the first one. max_id (`int`): The maximum ID allowed when fetching photos. Yields Instances of :tl:`Photo`. Example .. code-block:: python # Download all the profile photos of some user async for photo in client.iter_profile_photos(user): await client.download_media(photo) """ return _ProfilePhotoIter( self, limit, entity=entity, offset=offset, max_id=max_id ) async def get_profile_photos( self: 'TelegramClient', *args, **kwargs) -> 'hints.TotalList': """ Same as `iter_profile_photos()`, but returns a `TotalList <telethon.helpers.TotalList>` instead. Example .. code-block:: python # Get the photos of a channel photos = await client.get_profile_photos(channel) # Download the oldest photo await client.download_media(photos[-1]) """ return await self.iter_profile_photos(*args, **kwargs).collect() get_profile_photos.__signature__ = inspect.signature(iter_profile_photos) def action( self: 'TelegramClient', entity: 'hints.EntityLike', action: 'typing.Union[str, types.TypeSendMessageAction]', *, delay: float = 4, auto_cancel: bool = True) -> 'typing.Union[_ChatAction, typing.Coroutine]': """ Returns a context-manager object to represent a "chat action". Chat actions indicate things like "user is typing", "user is uploading a photo", etc. If the action is ``'cancel'``, you should just ``await`` the result, since it makes no sense to use a context-manager for it. See the example below for intended usage. Arguments entity (`entity`): The entity where the action should be showed in. action (`str` | :tl:`SendMessageAction`): The action to show. You can either pass a instance of :tl:`SendMessageAction` or better, a string used while: * ``'typing'``: typing a text message. * ``'contact'``: choosing a contact. * ``'game'``: playing a game. * ``'location'``: choosing a geo location. * ``'record-audio'``: recording a voice note. You may use ``'record-voice'`` as alias. * ``'record-round'``: recording a round video. * ``'record-video'``: recording a normal video. * ``'audio'``: sending an audio file (voice note or song). You may use ``'voice'`` and ``'song'`` as aliases. * ``'round'``: uploading a round video. * ``'video'``: uploading a video file. * ``'photo'``: uploading a photo. * ``'document'``: uploading a document file. You may use ``'file'`` as alias. * ``'cancel'``: cancel any pending action in this chat. Invalid strings will raise a ``ValueError``. delay (`int` | `float`): The delay, in seconds, to wait between sending actions. For example, if the delay is 5 and it takes 7 seconds to do something, three requests will be made at 0s, 5s, and 7s to cancel the action. auto_cancel (`bool`): Whether the action should be cancelled once the context manager exists or not. The default is `True`, since you don't want progress to be shown when it has already completed. Returns Either a context-manager object or a coroutine. Example .. code-block:: python # Type for 2 seconds, then send a message async with client.action(chat, 'typing'): await asyncio.sleep(2) await client.send_message(chat, 'Hello world! I type slow ^^') # Cancel any previous action await client.action(chat, 'cancel') # Upload a document, showing its progress (most clients ignore this) async with client.action(chat, 'document') as action: await client.send_file(chat, zip_file, progress_callback=action.progress) """ if isinstance(action, str): try: action = _ChatAction._str_mapping[action.lower()] except KeyError: raise ValueError( 'No such action "{}"'.format(action)) from None elif not isinstance(action, types.TLObject) or action.SUBCLASS_OF_ID != 0x20b2cc21: # 0x20b2cc21 = crc32(b'SendMessageAction') if isinstance(action, type): raise ValueError('You must pass an instance, not the class') else: raise ValueError('Cannot use {} as action'.format(action)) if isinstance(action, types.SendMessageCancelAction): # ``SetTypingRequest.resolve`` will get input peer of ``entity``. return self(functions.messages.SetTypingRequest( entity, types.SendMessageCancelAction())) return _ChatAction( self, entity, action, delay=delay, auto_cancel=auto_cancel) async def edit_admin( self: 'TelegramClient', entity: 'hints.EntityLike', user: 'hints.EntityLike', *, change_info: bool = None, post_messages: bool = None, edit_messages: bool = None, delete_messages: bool = None, ban_users: bool = None, invite_users: bool = None, pin_messages: bool = None, add_admins: bool = None, manage_call: bool = None, anonymous: bool = None, is_admin: bool = None, title: str = None) -> types.Updates: """ Edits admin permissions for someone in a chat. Raises an error if a wrong combination of rights are given (e.g. you don't have enough permissions to grant one). Unless otherwise stated, permissions will work in channels and megagroups. Arguments entity (`entity`): The channel, megagroup or chat where the promotion should happen. user (`entity`): The user to be promoted. change_info (`bool`, optional): Whether the user will be able to change info. post_messages (`bool`, optional): Whether the user will be able to post in the channel. This will only work in broadcast channels. edit_messages (`bool`, optional): Whether the user will be able to edit messages in the channel. This will only work in broadcast channels. delete_messages (`bool`, optional): Whether the user will be able to delete messages. ban_users (`bool`, optional): Whether the user will be able to ban users. invite_users (`bool`, optional): Whether the user will be able to invite users. Needs some testing. pin_messages (`bool`, optional): Whether the user will be able to pin messages. add_admins (`bool`, optional): Whether the user will be able to add admins. manage_call (`bool`, optional): Whether the user will be able to manage group calls. anonymous (`bool`, optional): Whether the user will remain anonymous when sending messages. The sender of the anonymous messages becomes the group itself. .. note:: Users may be able to identify the anonymous admin by its custom title, so additional care is needed when using both ``anonymous`` and custom titles. For example, if multiple anonymous admins share the same title, users won't be able to distinguish them. is_admin (`bool`, optional): Whether the user will be an admin in the chat. This will only work in small group chats. Whether the user will be an admin in the chat. This is the only permission available in small group chats, and when used in megagroups, all non-explicitly set permissions will have this value. Essentially, only passing ``is_admin=True`` will grant all permissions, but you can still disable those you need. title (`str`, optional): The custom title (also known as "rank") to show for this admin. This text will be shown instead of the "admin" badge. This will only work in channels and megagroups. When left unspecified or empty, the default localized "admin" badge will be shown. Returns The resulting :tl:`Updates` object. Example .. code-block:: python # Allowing `user` to pin messages in `chat` await client.edit_admin(chat, user, pin_messages=True) # Granting all permissions except for `add_admins` await client.edit_admin(chat, user, is_admin=True, add_admins=False) """ entity = await self.get_input_entity(entity) user = await self.get_input_entity(user) ty = helpers._entity_type(user) if ty != helpers._EntityType.USER: raise ValueError('You must pass a user entity') perm_names = ( 'change_info', 'post_messages', 'edit_messages', 'delete_messages', 'ban_users', 'invite_users', 'pin_messages', 'add_admins', 'anonymous', 'manage_call', ) ty = helpers._entity_type(entity) if ty == helpers._EntityType.CHANNEL: # If we try to set these permissions in a megagroup, we # would get a RIGHT_FORBIDDEN. However, it makes sense # that an admin can post messages, so we want to avoid the error if post_messages or edit_messages: # TODO get rid of this once sessions cache this information if entity.channel_id not in self._megagroup_cache: full_entity = await self.get_entity(entity) self._megagroup_cache[entity.channel_id] = full_entity.megagroup if self._megagroup_cache[entity.channel_id]: post_messages = None edit_messages = None perms = locals() return await self(functions.channels.EditAdminRequest(entity, user, types.ChatAdminRights(**{ # A permission is its explicit (not-None) value or `is_admin`. # This essentially makes `is_admin` be the default value. name: perms[name] if perms[name] is not None else is_admin for name in perm_names }), rank=title or '')) elif ty == helpers._EntityType.CHAT: # If the user passed any permission in a small # group chat, they must be a full admin to have it. if is_admin is None: is_admin = any(locals()[x] for x in perm_names) return await self(functions.messages.EditChatAdminRequest( entity, user, is_admin=is_admin)) else: raise ValueError( 'You can only edit permissions in groups and channels') async def edit_permissions( self: 'TelegramClient', entity: 'hints.EntityLike', user: 'typing.Optional[hints.EntityLike]' = None, until_date: 'hints.DateLike' = None, *, view_messages: bool = True, send_messages: bool = True, send_media: bool = True, send_stickers: bool = True, send_gifs: bool = True, send_games: bool = True, send_inline: bool = True, embed_link_previews: bool = True, send_polls: bool = True, change_info: bool = True, invite_users: bool = True, pin_messages: bool = True) -> types.Updates: """ Edits user restrictions in a chat. Set an argument to `False` to apply a restriction (i.e. remove the permission), or omit them to use the default `True` (i.e. don't apply a restriction). Raises an error if a wrong combination of rights are given (e.g. you don't have enough permissions to revoke one). By default, each boolean argument is `True`, meaning that it is true that the user has access to the default permission and may be able to make use of it. If you set an argument to `False`, then a restriction is applied regardless of the default permissions. It is important to note that `True` does *not* mean grant, only "don't restrict", and this is where the default permissions come in. A user may have not been revoked the ``pin_messages`` permission (it is `True`) but they won't be able to use it if the default permissions don't allow it either. Arguments entity (`entity`): The channel or megagroup where the restriction should happen. user (`entity`, optional): If specified, the permission will be changed for the specific user. If left as `None`, the default chat permissions will be updated. until_date (`DateLike`, optional): When the user will be unbanned. If the due date or duration is longer than 366 days or shorter than 30 seconds, the ban will be forever. Defaults to ``0`` (ban forever). view_messages (`bool`, optional): Whether the user is able to view messages or not. Forbidding someone from viewing messages equals to banning them. This will only work if ``user`` is set. send_messages (`bool`, optional): Whether the user is able to send messages or not. send_media (`bool`, optional): Whether the user is able to send media or not. send_stickers (`bool`, optional): Whether the user is able to send stickers or not. send_gifs (`bool`, optional): Whether the user is able to send animated gifs or not. send_games (`bool`, optional): Whether the user is able to send games or not. send_inline (`bool`, optional): Whether the user is able to use inline bots or not. embed_link_previews (`bool`, optional): Whether the user is able to enable the link preview in the messages they send. Note that the user will still be able to send messages with links if this permission is removed, but these links won't display a link preview. send_polls (`bool`, optional): Whether the user is able to send polls or not. change_info (`bool`, optional): Whether the user is able to change info or not. invite_users (`bool`, optional): Whether the user is able to invite other users or not. pin_messages (`bool`, optional): Whether the user is able to pin messages or not. Returns The resulting :tl:`Updates` object. Example .. code-block:: python from datetime import timedelta # Banning `user` from `chat` for 1 minute await client.edit_permissions(chat, user, timedelta(minutes=1), view_messages=False) # Banning `user` from `chat` forever await client.edit_permissions(chat, user, view_messages=False) # Kicking someone (ban + un-ban) await client.edit_permissions(chat, user, view_messages=False) await client.edit_permissions(chat, user) """ entity = await self.get_input_entity(entity) ty = helpers._entity_type(entity) if ty != helpers._EntityType.CHANNEL: raise ValueError('You must pass either a channel or a supergroup') rights = types.ChatBannedRights( until_date=until_date, view_messages=not view_messages, send_messages=not send_messages, send_media=not send_media, send_stickers=not send_stickers, send_gifs=not send_gifs, send_games=not send_games, send_inline=not send_inline, embed_links=not embed_link_previews, send_polls=not send_polls, change_info=not change_info, invite_users=not invite_users, pin_messages=not pin_messages ) if user is None: return await self(functions.messages.EditChatDefaultBannedRightsRequest( peer=entity, banned_rights=rights )) user = await self.get_input_entity(user) ty = helpers._entity_type(user) if ty != helpers._EntityType.USER: raise ValueError('You must pass a user entity') if isinstance(user, types.InputPeerSelf): raise ValueError('You cannot restrict yourself') return await self(functions.channels.EditBannedRequest( channel=entity, participant=user, banned_rights=rights )) async def kick_participant( self: 'TelegramClient', entity: 'hints.EntityLike', user: 'typing.Optional[hints.EntityLike]' ): """ Kicks a user from a chat. Kicking yourself (``'me'``) will result in leaving the chat. .. note:: Attempting to kick someone who was banned will remove their restrictions (and thus unbanning them), since kicking is just ban + unban. Arguments entity (`entity`): The channel or chat where the user should be kicked from. user (`entity`, optional): The user to kick. Returns Returns the service `Message <telethon.tl.custom.message.Message>` produced about a user being kicked, if any. Example .. code-block:: python # Kick some user from some chat, and deleting the service message msg = await client.kick_participant(chat, user) await msg.delete() # Leaving chat await client.kick_participant(chat, 'me') """ entity = await self.get_input_entity(entity) user = await self.get_input_entity(user) if helpers._entity_type(user) != helpers._EntityType.USER: raise ValueError('You must pass a user entity') ty = helpers._entity_type(entity) if ty == helpers._EntityType.CHAT: resp = await self(functions.messages.DeleteChatUserRequest(entity.chat_id, user)) elif ty == helpers._EntityType.CHANNEL: if isinstance(user, types.InputPeerSelf): # Despite no longer being in the channel, the account still # seems to get the service message. resp = await self(functions.channels.LeaveChannelRequest(entity)) else: resp = await self(functions.channels.EditBannedRequest( channel=entity, participant=user, banned_rights=types.ChatBannedRights( until_date=None, view_messages=True) )) await asyncio.sleep(0.5) await self(functions.channels.EditBannedRequest( channel=entity, participant=user, banned_rights=types.ChatBannedRights(until_date=None) )) else: raise ValueError('You must pass either a channel or a chat') return self._get_response_message(None, resp, entity) async def get_permissions( self: 'TelegramClient', entity: 'hints.EntityLike', user: 'hints.EntityLike' = None ) -> 'typing.Optional[custom.ParticipantPermissions]': """ Fetches the permissions of a user in a specific chat or channel or get Default Restricted Rights of Chat or Channel. .. note:: This request has to fetch the entire chat for small group chats, which can get somewhat expensive, so use of a cache is advised. Arguments entity (`entity`): The channel or chat the user is participant of. user (`entity`, optional): Target user. Returns A `ParticipantPermissions <telethon.tl.custom.participantpermissions.ParticipantPermissions>` instance. Refer to its documentation to see what properties are available. Example .. code-block:: python permissions = await client.get_permissions(chat, user) if permissions.is_admin: # do something # Get Banned Permissions of Chat await client.get_permissions(chat) """ entity = await self.get_entity(entity) if not user: if isinstance(entity, types.Channel): FullChat = await self(functions.channels.GetFullChannelRequest(entity)) elif isinstance(entity, types.Chat): FullChat = await self(functions.messages.GetFullChatRequest(entity)) else: return return FullChat.chats[0].default_banned_rights entity = await self.get_input_entity(entity) user = await self.get_input_entity(user) if helpers._entity_type(user) != helpers._EntityType.USER: raise ValueError('You must pass a user entity') if helpers._entity_type(entity) == helpers._EntityType.CHANNEL: participant = await self(functions.channels.GetParticipantRequest( entity, user )) return custom.ParticipantPermissions(participant.participant, False) elif helpers._entity_type(entity) == helpers._EntityType.CHAT: chat = await self(functions.messages.GetFullChatRequest( entity )) if isinstance(user, types.InputPeerSelf): user = await self.get_me(input_peer=True) for participant in chat.full_chat.participants.participants: if participant.user_id == user.user_id: return custom.ParticipantPermissions(participant, True) raise errors.UserNotParticipantError(None) raise ValueError('You must pass either a channel or a chat') async def get_stats( self: 'TelegramClient', entity: 'hints.EntityLike', message: 'typing.Union[int, types.Message]' = None, ): """ Retrieves statistics from the given megagroup or broadcast channel. Note that some restrictions apply before being able to fetch statistics, in particular the channel must have enough members (for megagroups, this requires `at least 500 members`_). Arguments entity (`entity`): The channel from which to get statistics. message (`int` | ``Message``, optional): The message ID from which to get statistics, if your goal is to obtain the statistics of a single message. Raises If the given entity is not a channel (broadcast or megagroup), a `TypeError` is raised. If there are not enough members (poorly named) errors such as ``telethon.errors.ChatAdminRequiredError`` will appear. Returns If both ``entity`` and ``message`` were provided, returns :tl:`MessageStats`. Otherwise, either :tl:`BroadcastStats` or :tl:`MegagroupStats`, depending on whether the input belonged to a broadcast channel or megagroup. Example .. code-block:: python # Some megagroup or channel username or ID to fetch channel = -100123 stats = await client.get_stats(channel) print('Stats from', stats.period.min_date, 'to', stats.period.max_date, ':') print(stats.stringify()) .. _`at least 500 members`: https://telegram.org/blog/profile-videos-people-nearby-and-more """ entity = await self.get_input_entity(entity) if helpers._entity_type(entity) != helpers._EntityType.CHANNEL: raise TypeError('You must pass a channel entity') message = utils.get_message_id(message) if message is not None: try: req = functions.stats.GetMessageStatsRequest(entity, message) return await self(req) except errors.StatsMigrateError as e: dc = e.dc else: # Don't bother fetching the Channel entity (costs a request), instead # try to guess and if it fails we know it's the other one (best case # no extra request, worst just one). try: req = functions.stats.GetBroadcastStatsRequest(entity) return await self(req) except errors.StatsMigrateError as e: dc = e.dc except errors.BroadcastRequiredError: req = functions.stats.GetMegagroupStatsRequest(entity) try: return await self(req) except errors.StatsMigrateError as e: dc = e.dc sender = await self._borrow_exported_sender(dc) try: # req will be resolved to use the right types inside by now return await sender.send(req) finally: await self._return_exported_sender(sender) # endregion
mit
pnedunuri/scipy
scipy/special/_ellip_harm.py
80
5743
from __future__ import division, print_function, absolute_import import threading import numpy as np from ._ufuncs import _ellip_harm from ._ellip_harm_2 import _ellipsoid, _ellipsoid_norm # the functions _ellipsoid, _ellipsoid_norm use global variables, the lock # protects them if the function is called from multiple threads simultaneously _ellip_lock = threading.Lock() def ellip_harm(h2, k2, n, p, s, signm=1, signn=1): r""" Ellipsoidal harmonic functions E^p_n(l) These are also known as Lame functions of the first kind, and are solutions to the Lame equation: .. math:: (s^2 - h^2)(s^2 - k^2)E''(s) + s(2s^2 - h^2 - k^2)E'(s) + (a - q s^2)E(s) = 0 where :math:`q = (n+1)n` and :math:`a` is the eigenvalue (not returned) corresponding to the solutions. Parameters ---------- h2 : float ``h**2`` k2 : float ``k**2``; should be larger than ``h**2`` n : int Degree s : float Coordinate p : int Order, can range between [1,2n+1] signm : {1, -1}, optional Sign of prefactor of functions. Can be +/-1. See Notes. signn : {1, -1}, optional Sign of prefactor of functions. Can be +/-1. See Notes. Returns ------- E : float the harmonic :math:`E^p_n(s)` See Also -------- ellip_harm_2, ellip_normal Notes ----- The geometric intepretation of the ellipsoidal functions is explained in [2]_, [3]_, [4]_. The `signm` and `signn` arguments control the sign of prefactors for functions according to their type:: K : +1 L : signm M : signn N : signm*signn .. versionadded:: 0.15.0 References ---------- .. [1] Digital Libary of Mathematical Functions 29.12 http://dlmf.nist.gov/29.12 .. [2] Bardhan and Knepley, "Computational science and re-discovery: open-source implementations of ellipsoidal harmonics for problems in potential theory", Comput. Sci. Disc. 5, 014006 (2012) doi:10.1088/1749-4699/5/1/014006 .. [3] David J.and Dechambre P, "Computation of Ellipsoidal Gravity Field Harmonics for small solar system bodies" pp. 30-36, 2000 .. [4] George Dassios, "Ellipsoidal Harmonics: Theory and Applications" pp. 418, 2012 Examples -------- >>> from scipy.special import ellip_harm >>> w = ellip_harm(5,8,1,1,2.5) >>> w 2.5 Check that the functions indeed are solutions to the Lame equation: >>> from scipy.interpolate import UnivariateSpline >>> def eigenvalue(f, df, ddf): ... r = ((s**2 - h**2)*(s**2 - k**2)*ddf + s*(2*s**2 - h**2 - k**2)*df - n*(n+1)*s**2*f)/f ... return -r.mean(), r.std() >>> s = np.linspace(0.1, 10, 200) >>> k, h, n, p = 8.0, 2.2, 3, 2 >>> E = ellip_harm(h**2, k**2, n, p, s) >>> E_spl = UnivariateSpline(s, E) >>> a, a_err = eigenvalue(E_spl(s), E_spl(s,1), E_spl(s,2)) >>> a, a_err (583.44366156701483, 6.4580890640310646e-11) """ return _ellip_harm(h2, k2, n, p, s, signm, signn) # np.vectorize does not work on Cython functions on Numpy < 1.8, so a wrapper is needed def _ellip_harm_2_vec(h2, k2, n, p, s): return _ellipsoid(h2, k2, n, p, s) _ellip_harm_2_vec = np.vectorize(_ellip_harm_2_vec, otypes='d') def ellip_harm_2(h2, k2, n, p, s): r""" Ellipsoidal harmonic functions F^p_n(l) These are also known as Lame functions of the second kind, and are solutions to the Lame equation: .. math:: (s^2 - h^2)(s^2 - k^2)F''(s) + s(2s^2 - h^2 - k^2)F'(s) + (a - q s^2)F(s) = 0 where :math:`q = (n+1)n` and :math:`a` is the eigenvalue (not returned) corresponding to the solutions. Parameters ---------- h2 : float ``h**2`` k2 : float ``k**2``; should be larger than ``h**2`` n : int Degree. p : int Order, can range between [1,2n+1]. s : float Coordinate Returns ------- F : float The harmonic :math:`F^p_n(s)` Notes ----- Lame functions of the second kind are related to the functions of the first kind: .. math:: F^p_n(s)=(2n + 1)E^p_n(s)\int_{0}^{1/s}\frac{du}{(E^p_n(1/u))^2\sqrt{(1-u^2k^2)(1-u^2h^2)}} .. versionadded:: 0.15.0 See Also -------- ellip_harm, ellip_normal Examples -------- >>> from scipy.special import ellip_harm_2 >>> w = ellip_harm_2(5,8,2,1,10) >>> w 0.00108056853382 """ with _ellip_lock: with np.errstate(all='ignore'): return _ellip_harm_2_vec(h2, k2, n, p, s) def _ellip_normal_vec(h2, k2, n, p): return _ellipsoid_norm(h2, k2, n, p) _ellip_normal_vec = np.vectorize(_ellip_normal_vec, otypes='d') def ellip_normal(h2, k2, n, p): r""" Ellipsoidal harmonic normalization constants gamma^p_n The normalization constant is defined as .. math:: \gamma^p_n=8\int_{0}^{h}dx\int_{h}^{k}dy\frac{(y^2-x^2)(E^p_n(y)E^p_n(x))^2}{\sqrt((k^2-y^2)(y^2-h^2)(h^2-x^2)(k^2-x^2)} Parameters ---------- h2 : float ``h**2`` k2 : float ``k**2``; should be larger than ``h**2`` n : int Degree. p : int Order, can range between [1,2n+1]. Returns ------- gamma : float The normalization constant :math:`\gamma^p_n` See Also -------- ellip_harm, ellip_harm_2 Notes ----- .. versionadded:: 0.15.0 Examples -------- >>> from scipy.special import ellip_normal >>> w = ellip_normal(5,8,3,7) >>> w 1723.38796997 """ with _ellip_lock: with np.errstate(all='ignore'): return _ellip_normal_vec(h2, k2, n, p)
bsd-3-clause
amwelch/a10sdk-python
a10sdk/core/A10_file/file_bw_list_oper.py
2
2195
from a10sdk.common.A10BaseClass import A10BaseClass class FileList(A10BaseClass): """This class does not support CRUD Operations please use parent. :param url: {"type": "string", "format": "string"} :param file: {"type": "string", "format": "string"} :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py` """ def __init__(self, **kwargs): self.ERROR_MSG = "" self.b_key = "file-list" self.DeviceProxy = "" self.url = "" self.A10WW_file = "" for keys, value in kwargs.items(): setattr(self,keys, value) class Oper(A10BaseClass): """This class does not support CRUD Operations please use parent. :param file_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"url": {"type": "string", "format": "string"}, "optional": true, "file": {"type": "string", "format": "string"}}}]} :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py` """ def __init__(self, **kwargs): self.ERROR_MSG = "" self.b_key = "oper" self.DeviceProxy = "" self.file_list = [] for keys, value in kwargs.items(): setattr(self,keys, value) class BwList(A10BaseClass): """Class Description:: Operational Status for the object bw-list. Class bw-list supports CRUD Operations and inherits from `common/A10BaseClass`. This class is the `"PARENT"` class for this module.` :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py` URL for this object:: `https://<Hostname|Ip address>//axapi/v3/file/bw-list/oper`. """ def __init__(self, **kwargs): self.ERROR_MSG = "" self.required=[] self.b_key = "bw-list" self.a10_url="/axapi/v3/file/bw-list/oper" self.DeviceProxy = "" self.oper = {} for keys, value in kwargs.items(): setattr(self,keys, value)
apache-2.0
cstavr/synnefo
snf-webproject/synnefo/webproject/context_processors.py
2
2345
from django.utils.safestring import mark_safe from django.conf import settings def cloudbar(request): """ Django context processor that applies all cloudbar settings in response context plus a ready to use pre rendered script html tag containing valid javascript code for cloudbar to display. To use it add ``synnefo.webproject.context_processors.cloudbar`` in your project's ``TEMPLATE_CONTEXT_PROCESSORS setting`` (snf-webproject already does). Then in your base html template:: <html> .... <head> ... {% if CLOUDBAR_ACTIVE %} {{ CLOUDBAR_CODE }} {% endif %} </head> <body> .... </body> </html> """ CB_ACTIVE = getattr(settings, 'CLOUDBAR_ACTIVE', True) CB_LOCATION = getattr(settings, 'CLOUDBAR_LOCATION') CB_COOKIE_NAME = getattr(settings, 'CLOUDBAR_COOKIE_NAME', 'okeanos_account') CB_SERVICES_URL = getattr(settings, 'CLOUDBAR_SERVICES_URL') CB_MENU_URL = getattr(settings, 'CLOUDBAR_MENU_URL') CB_HEIGHT = getattr(settings, 'CLOUDBAR_HEIGHT', '35') CB_BGCOLOR = getattr(settings, 'CLOUDBAR_BACKGROUND_COLOR', '#000000') CB_CODE = """ <script type="text/javascript"> var CLOUDBAR_LOCATION = "%(location)s"; var CLOUDBAR_COOKIE_NAME = "%(cookie_name)s"; var GET_SERVICES_URL = "%(services_url)s"; var GET_MENU_URL = "%(menu_url)s"; var CLOUDBAR_HEIGHT = '%(height)s'; $(document).ready(function(){ $.getScript(CLOUDBAR_LOCATION + 'cloudbar.js'); }); </script> <style> body { border-top: %(height)spx solid %(bg_color)s; } body .cloudbar { height: %(height)spx; } </style> """ % {'location': CB_LOCATION, 'cookie_name': CB_COOKIE_NAME, 'services_url': CB_SERVICES_URL, 'menu_url': CB_MENU_URL, 'height': str(CB_HEIGHT), 'bg_color': CB_BGCOLOR} CB_CODE = mark_safe(CB_CODE) return { 'CLOUDBAR_ACTIVE': CB_ACTIVE, 'CLOUDBAR_LOCATION': CB_LOCATION, 'CLOUDBAR_COOKIE_NAME': CB_COOKIE_NAME, 'CLOUDBAR_SERVICES_URL': CB_SERVICES_URL, 'CLOUDBAR_MENU_URL': CB_MENU_URL, 'CLOUDBAR_CODE': CB_CODE }
gpl-3.0
auduny/home-assistant
homeassistant/components/asterisk_cdr/mailbox.py
7
2015
"""Support for the Asterisk CDR interface.""" import logging import hashlib import datetime from homeassistant.core import callback from homeassistant.components.asterisk_mbox import SIGNAL_CDR_UPDATE from homeassistant.components.asterisk_mbox import DOMAIN as ASTERISK_DOMAIN from homeassistant.components.mailbox import Mailbox from homeassistant.helpers.dispatcher import async_dispatcher_connect _LOGGER = logging.getLogger(__name__) MAILBOX_NAME = 'asterisk_cdr' async def async_get_handler(hass, config, discovery_info=None): """Set up the Asterix CDR platform.""" return AsteriskCDR(hass, MAILBOX_NAME) class AsteriskCDR(Mailbox): """Asterisk VM Call Data Record mailbox.""" def __init__(self, hass, name): """Initialize Asterisk CDR.""" super().__init__(hass, name) self.cdr = [] async_dispatcher_connect( self.hass, SIGNAL_CDR_UPDATE, self._update_callback) @callback def _update_callback(self, msg): """Update the message count in HA, if needed.""" self._build_message() self.async_update() def _build_message(self): """Build message structure.""" cdr = [] for entry in self.hass.data[ASTERISK_DOMAIN].cdr: timestamp = datetime.datetime.strptime( entry['time'], "%Y-%m-%d %H:%M:%S").timestamp() info = { 'origtime': timestamp, 'callerid': entry['callerid'], 'duration': entry['duration'], } sha = hashlib.sha256(str(entry).encode('utf-8')).hexdigest() msg = "Destination: {}\nApplication: {}\n Context: {}".format( entry['dest'], entry['application'], entry['context']) cdr.append({'info': info, 'sha': sha, 'text': msg}) self.cdr = cdr async def async_get_messages(self): """Return a list of the current messages.""" if not self.cdr: self._build_message() return self.cdr
apache-2.0
ax003d/openerp
openerp/tests/test_translate.py
460
1941
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2010 OpenERP S.A. http://www.openerp.com # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import unittest from openerp.tools.translate import quote, unquote class TranslationToolsTestCase(unittest.TestCase): def test_quote_unquote(self): def test_string(str): quoted = quote(str) #print "\n1:", repr(str) #print "2:", repr(quoted) unquoted = unquote("".join(quoted.split('"\n"'))) #print "3:", repr(unquoted) self.assertEquals(str, unquoted) test_string("""test \nall kinds\n \n o\r \\\\ nope\n\n" """) # The ones with 1+ backslashes directly followed by # a newline or literal N can fail... we would need a # state-machine parser to handle these, but this would # be much slower so it's better to avoid them at the moment self.assertRaises(AssertionError, quote, """test \nall kinds\n\no\r \\\\nope\n\n" """) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
hradec/gaffer
python/GafferArnoldTest/ArnoldLightTest.py
8
5096
########################################################################## # # Copyright (c) 2016, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import arnold import imath import IECore import IECoreScene import IECoreArnold import GafferSceneTest import GafferArnold class ArnoldLightTest( GafferSceneTest.SceneTestCase ) : def testUsesShaders( self ) : l = GafferArnold.ArnoldLight() l.loadShader( "point_light" ) n = l["out"].attributes( "/light" )["ai:light"] self.assertTrue( isinstance( n, IECoreScene.ShaderNetwork ) ) self.assertEqual( len( n ), 1 ) self.assertTrue( isinstance( n.outputShader(), IECoreScene.Shader ) ) self.assertEqual( n.outputShader().type, "ai:light" ) self.assertEqual( n.outputShader().name, "point_light" ) def testLoadAllLightsWithoutWarnings( self ) : lightNames = [] with IECoreArnold.UniverseBlock( writable = False ) : it = arnold.AiUniverseGetNodeEntryIterator( arnold.AI_NODE_LIGHT ) while not arnold.AiNodeEntryIteratorFinished( it ) : nodeEntry = arnold.AiNodeEntryIteratorGetNext( it ) lightNames.append( arnold.AiNodeEntryGetName( nodeEntry ) ) self.longMessage = True for lightName in lightNames : with IECore.CapturingMessageHandler() as mh : l = GafferArnold.ArnoldLight() l.loadShader( lightName ) self.assertEqual( [ m.message for m in mh.messages ], [], "Error loading %s" % lightName ) def testShaderInputs( self ) : s = GafferArnold.ArnoldShader( "sky" ) s.loadShader( "physical_sky" ) s["parameters"]["intensity"].setValue( 2 ) # Test setting up a matte closure connected to "shader" # Note that this doesn't currently render correctly, but SolidAngle assures me that they are fixing # it and is the preferred way s2 = GafferArnold.ArnoldShader( "matte" ) s2.loadShader( "matte" ) s2["parameters"]["color"].setValue( imath.Color4f( 0, 1, 0, 0.5 ) ) l = GafferArnold.ArnoldLight() l.loadShader( "skydome_light" ) l["parameters"]["color"].setInput( s["out"] ) l["parameters"]["shader"].setInput( s2["out"] ) network = l["out"].attributes( "/light" )["ai:light"] self.assertEqual( len( network ), 3 ) self.assertEqual( network.getShader( "sky" ).name, "physical_sky" ) self.assertEqual( network.getShader( "sky" ).parameters["intensity"].value, 2 ) self.assertEqual( network.getShader( "matte" ).name, "matte" ) self.assertEqual( network.getShader( "matte" ).parameters["color"].value, imath.Color4f( 0, 1, 0, 0.5 ) ) self.assertEqual( network.inputConnections( network.getOutput().shader ), [ network.Connection( ( "sky", "" ), ( network.getOutput().shader, "color" ) ), network.Connection( ( "matte", "" ), ( network.getOutput().shader, "shader" ) ), ] ) s["parameters"]["intensity"].setValue( 4 ) network = l["out"].attributes( "/light" )["ai:light"] self.assertEqual( network.getShader( "sky" ).parameters["intensity"].value, 4 ) def testOSLShaderInputs( self ) : l = GafferArnold.ArnoldLight() l.loadShader( "skydome_light" ) c = GafferSceneTest.TestShader( "mockOSL" ) c["type"].setValue( "osl:shader" ) l["parameters"]["color"].setInput( c["out"] ) network = l["out"].attributes( "/light" )["ai:light"] self.assertEqual( network.inputConnections( network.getOutput().shader ), [ network.Connection( ( "mockOSL", "" ), ( network.getOutput().shader, "color" ) ) ] ) if __name__ == "__main__": unittest.main()
bsd-3-clause
flyfei/python-for-android
python-build/python-libs/xmpppy/xmpp/debug.py
207
14069
## debug.py ## ## Copyright (C) 2003 Jacob Lundqvist ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU Lesser General Public License as published ## by the Free Software Foundation; either version 2, or (at your option) ## any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU Lesser General Public License for more details. _version_ = '1.4.0' """\ Generic debug class Other modules can always define extra debug flags for local usage, as long as they make sure they append them to debug_flags Also its always a good thing to prefix local flags with something, to reduce risk of coliding flags. Nothing breaks if two flags would be identical, but it might activate unintended debugging. flags can be numeric, but that makes analysing harder, on creation its not obvious what is activated, and when flag_show is given, output isnt really meaningfull. This Debug class can either be initialized and used on app level, or used independantly by the individual classes. For samples of usage, see samples subdir in distro source, and selftest in this code """ import sys import traceback import time import os import types if os.environ.has_key('TERM'): colors_enabled=True else: colors_enabled=False color_none = chr(27) + "[0m" color_black = chr(27) + "[30m" color_red = chr(27) + "[31m" color_green = chr(27) + "[32m" color_brown = chr(27) + "[33m" color_blue = chr(27) + "[34m" color_magenta = chr(27) + "[35m" color_cyan = chr(27) + "[36m" color_light_gray = chr(27) + "[37m" color_dark_gray = chr(27) + "[30;1m" color_bright_red = chr(27) + "[31;1m" color_bright_green = chr(27) + "[32;1m" color_yellow = chr(27) + "[33;1m" color_bright_blue = chr(27) + "[34;1m" color_purple = chr(27) + "[35;1m" color_bright_cyan = chr(27) + "[36;1m" color_white = chr(27) + "[37;1m" """ Define your flags in yor modules like this: from debug import * DBG_INIT = 'init' ; debug_flags.append( DBG_INIT ) DBG_CONNECTION = 'connection' ; debug_flags.append( DBG_CONNECTION ) The reason for having a double statement wis so we can validate params and catch all undefined debug flags This gives us control over all used flags, and makes it easier to allow global debugging in your code, just do something like foo = Debug( debug_flags ) group flags, that is a flag in it self containing multiple flags should be defined without the debug_flags.append() sequence, since the parts are already in the list, also they must of course be defined after the flags they depend on ;) example: DBG_MULTI = [ DBG_INIT, DBG_CONNECTION ] NoDebug ------- To speed code up, typically for product releases or such use this class instead if you globaly want to disable debugging """ class NoDebug: def __init__( self, *args, **kwargs ): self.debug_flags = [] def show( self, *args, **kwargs): pass def Show( self, *args, **kwargs): pass def is_active( self, flag ): pass colors={} def active_set( self, active_flags = None ): return 0 LINE_FEED = '\n' class Debug: def __init__( self, # # active_flags are those that will trigger output # active_flags = None, # # Log file should be file object or file namne # log_file = sys.stderr, # # prefix and sufix can either be set globaly or per call. # personally I use this to color code debug statements # with prefix = chr(27) + '[34m' # sufix = chr(27) + '[37;1m\n' # prefix = 'DEBUG: ', sufix = '\n', # # If you want unix style timestamps, # 0 disables timestamps # 1 before prefix, good when prefix is a string # 2 after prefix, good when prefix is a color # time_stamp = 0, # # flag_show should normaly be of, but can be turned on to get a # good view of what flags are actually used for calls, # if it is not None, it should be a string # flags for current call will be displayed # with flag_show as separator # recomended values vould be '-' or ':', but any string goes # flag_show = None, # # If you dont want to validate flags on each call to # show(), set this to 0 # validate_flags = 1, # # If you dont want the welcome message, set to 0 # default is to show welcome if any flags are active welcome = -1 ): self.debug_flags = [] if welcome == -1: if active_flags and len(active_flags): welcome = 1 else: welcome = 0 self._remove_dupe_flags() if log_file: if type( log_file ) is type(''): try: self._fh = open(log_file,'w') except: print 'ERROR: can open %s for writing' sys.exit(0) else: ## assume its a stream type object self._fh = log_file else: self._fh = sys.stdout if time_stamp not in (0,1,2): msg2 = '%s' % time_stamp raise 'Invalid time_stamp param', msg2 self.prefix = prefix self.sufix = sufix self.time_stamp = time_stamp self.flag_show = None # must be initialised after possible welcome self.validate_flags = validate_flags self.active_set( active_flags ) if welcome: self.show('') caller = sys._getframe(1) # used to get name of caller try: mod_name= ":%s" % caller.f_locals['__name__'] except: mod_name = "" self.show('Debug created for %s%s' % (caller.f_code.co_filename, mod_name )) self.show(' flags defined: %s' % ','.join( self.active )) if type(flag_show) in (type(''), type(None)): self.flag_show = flag_show else: msg2 = '%s' % type(flag_show ) raise 'Invalid type for flag_show!', msg2 def show( self, msg, flag = None, prefix = None, sufix = None, lf = 0 ): """ flag can be of folowing types: None - this msg will always be shown if any debugging is on flag - will be shown if flag is active (flag1,flag2,,,) - will be shown if any of the given flags are active if prefix / sufix are not given, default ones from init will be used lf = -1 means strip linefeed if pressent lf = 1 means add linefeed if not pressent """ if self.validate_flags: self._validate_flag( flag ) if not self.is_active(flag): return if prefix: pre = prefix else: pre = self.prefix if sufix: suf = sufix else: suf = self.sufix if self.time_stamp == 2: output = '%s%s ' % ( pre, time.strftime('%b %d %H:%M:%S', time.localtime(time.time() )), ) elif self.time_stamp == 1: output = '%s %s' % ( time.strftime('%b %d %H:%M:%S', time.localtime(time.time() )), pre, ) else: output = pre if self.flag_show: if flag: output = '%s%s%s' % ( output, flag, self.flag_show ) else: # this call uses the global default, # dont print "None", just show the separator output = '%s %s' % ( output, self.flag_show ) output = '%s%s%s' % ( output, msg, suf ) if lf: # strip/add lf if needed last_char = output[-1] if lf == 1 and last_char != LINE_FEED: output = output + LINE_FEED elif lf == -1 and last_char == LINE_FEED: output = output[:-1] try: self._fh.write( output ) except: # unicode strikes again ;) s=u'' for i in range(len(output)): if ord(output[i]) < 128: c = output[i] else: c = '?' s=s+c self._fh.write( '%s%s%s' % ( pre, s, suf )) self._fh.flush() def is_active( self, flag ): 'If given flag(s) should generate output.' # try to abort early to quicken code if not self.active: return 0 if not flag or flag in self.active: return 1 else: # check for multi flag type: if type( flag ) in ( type(()), type([]) ): for s in flag: if s in self.active: return 1 return 0 def active_set( self, active_flags = None ): "returns 1 if any flags where actually set, otherwise 0." r = 0 ok_flags = [] if not active_flags: #no debuging at all self.active = [] elif type( active_flags ) in ( types.TupleType, types.ListType ): flags = self._as_one_list( active_flags ) for t in flags: if t not in self.debug_flags: sys.stderr.write('Invalid debugflag given: %s\n' % t ) ok_flags.append( t ) self.active = ok_flags r = 1 else: # assume comma string try: flags = active_flags.split(',') except: self.show( '***' ) self.show( '*** Invalid debug param given: %s' % active_flags ) self.show( '*** please correct your param!' ) self.show( '*** due to this, full debuging is enabled' ) self.active = self.debug_flags for f in flags: s = f.strip() ok_flags.append( s ) self.active = ok_flags self._remove_dupe_flags() return r def active_get( self ): "returns currently active flags." return self.active def _as_one_list( self, items ): """ init param might contain nested lists, typically from group flags. This code organises lst and remves dupes """ if type( items ) <> type( [] ) and type( items ) <> type( () ): return [ items ] r = [] for l in items: if type( l ) == type([]): lst2 = self._as_one_list( l ) for l2 in lst2: self._append_unique_str(r, l2 ) elif l == None: continue else: self._append_unique_str(r, l ) return r def _append_unique_str( self, lst, item ): """filter out any dupes.""" if type(item) <> type(''): msg2 = '%s' % item raise 'Invalid item type (should be string)',msg2 if item not in lst: lst.append( item ) return lst def _validate_flag( self, flags ): 'verify that flag is defined.' if flags: for f in self._as_one_list( flags ): if not f in self.debug_flags: msg2 = '%s' % f raise 'Invalid debugflag given', msg2 def _remove_dupe_flags( self ): """ if multiple instances of Debug is used in same app, some flags might be created multiple time, filter out dupes """ unique_flags = [] for f in self.debug_flags: if f not in unique_flags: unique_flags.append(f) self.debug_flags = unique_flags colors={} def Show(self, flag, msg, prefix=''): msg=msg.replace('\r','\\r').replace('\n','\\n').replace('><','>\n <') if not colors_enabled: pass elif self.colors.has_key(prefix): msg=self.colors[prefix]+msg+color_none else: msg=color_none+msg if not colors_enabled: prefixcolor='' elif self.colors.has_key(flag): prefixcolor=self.colors[flag] else: prefixcolor=color_none if prefix=='error': _exception = sys.exc_info() if _exception[0]: msg=msg+'\n'+''.join(traceback.format_exception(_exception[0], _exception[1], _exception[2])).rstrip() prefix= self.prefix+prefixcolor+(flag+' '*12)[:12]+' '+(prefix+' '*6)[:6] self.show(msg, flag, prefix) def is_active( self, flag ): if not self.active: return 0 if not flag or flag in self.active and DBG_ALWAYS not in self.active or flag not in self.active and DBG_ALWAYS in self.active : return 1 return 0 DBG_ALWAYS='always' ##Uncomment this to effectively disable all debugging and all debugging overhead. #Debug=NoDebug
apache-2.0
shsingh/ansible
lib/ansible/module_utils/univention_umc.py
118
8767
# -*- coding: UTF-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright (c) 2016, Adfinis SyGroup AG # Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch> # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # """Univention Corporate Server (UCS) access module. Provides the following functions for working with an UCS server. - ldap_search(filter, base=None, attr=None) Search the LDAP via Univention's LDAP wrapper (ULDAP) - config_registry() Return the UCR registry object - base_dn() Return the configured Base DN according to the UCR - uldap() Return a handle to the ULDAP LDAP wrapper - umc_module_for_add(module, container_dn, superordinate=None) Return a UMC module for creating a new object of the given type - umc_module_for_edit(module, object_dn, superordinate=None) Return a UMC module for editing an existing object of the given type Any other module is not part of the "official" API and may change at any time. """ import re __all__ = [ 'ldap_search', 'config_registry', 'base_dn', 'uldap', 'umc_module_for_add', 'umc_module_for_edit', ] _singletons = {} def ldap_module(): import ldap as orig_ldap return orig_ldap def _singleton(name, constructor): if name in _singletons: return _singletons[name] _singletons[name] = constructor() return _singletons[name] def config_registry(): def construct(): import univention.config_registry ucr = univention.config_registry.ConfigRegistry() ucr.load() return ucr return _singleton('config_registry', construct) def base_dn(): return config_registry()['ldap/base'] def uldap(): "Return a configured univention uldap object" def construct(): try: secret_file = open('/etc/ldap.secret', 'r') bind_dn = 'cn=admin,{0}'.format(base_dn()) except IOError: # pragma: no cover secret_file = open('/etc/machine.secret', 'r') bind_dn = config_registry()["ldap/hostdn"] pwd_line = secret_file.readline() pwd = re.sub('\n', '', pwd_line) import univention.admin.uldap return univention.admin.uldap.access( host=config_registry()['ldap/master'], base=base_dn(), binddn=bind_dn, bindpw=pwd, start_tls=1, ) return _singleton('uldap', construct) def config(): def construct(): import univention.admin.config return univention.admin.config.config() return _singleton('config', construct) def init_modules(): def construct(): import univention.admin.modules univention.admin.modules.update() return True return _singleton('modules_initialized', construct) def position_base_dn(): def construct(): import univention.admin.uldap return univention.admin.uldap.position(base_dn()) return _singleton('position_base_dn', construct) def ldap_dn_tree_parent(dn, count=1): dn_array = dn.split(',') dn_array[0:count] = [] return ','.join(dn_array) def ldap_search(filter, base=None, attr=None): """Replaces uldaps search and uses a generator. !! Arguments are not the same.""" if base is None: base = base_dn() msgid = uldap().lo.lo.search( base, ldap_module().SCOPE_SUBTREE, filterstr=filter, attrlist=attr ) # I used to have a try: finally: here but there seems to be a bug in python # which swallows the KeyboardInterrupt # The abandon now doesn't make too much sense while True: result_type, result_data = uldap().lo.lo.result(msgid, all=0) if not result_data: break if result_type is ldap_module().RES_SEARCH_RESULT: # pragma: no cover break else: if result_type is ldap_module().RES_SEARCH_ENTRY: for res in result_data: yield res uldap().lo.lo.abandon(msgid) def module_by_name(module_name_): """Returns an initialized UMC module, identified by the given name. The module is a module specification according to the udm commandline. Example values are: * users/user * shares/share * groups/group If the module does not exist, a KeyError is raised. The modules are cached, so they won't be re-initialized in subsequent calls. """ def construct(): import univention.admin.modules init_modules() module = univention.admin.modules.get(module_name_) univention.admin.modules.init(uldap(), position_base_dn(), module) return module return _singleton('module/%s' % module_name_, construct) def get_umc_admin_objects(): """Convenience accessor for getting univention.admin.objects. This implements delayed importing, so the univention.* modules are not loaded until this function is called. """ import univention.admin return univention.admin.objects def umc_module_for_add(module, container_dn, superordinate=None): """Returns an UMC module object prepared for creating a new entry. The module is a module specification according to the udm commandline. Example values are: * users/user * shares/share * groups/group The container_dn MUST be the dn of the container (not of the object to be created itself!). """ mod = module_by_name(module) position = position_base_dn() position.setDn(container_dn) # config, ldap objects from common module obj = mod.object(config(), uldap(), position, superordinate=superordinate) obj.open() return obj def umc_module_for_edit(module, object_dn, superordinate=None): """Returns an UMC module object prepared for editing an existing entry. The module is a module specification according to the udm commandline. Example values are: * users/user * shares/share * groups/group The object_dn MUST be the dn of the object itself, not the container! """ mod = module_by_name(module) objects = get_umc_admin_objects() position = position_base_dn() position.setDn(ldap_dn_tree_parent(object_dn)) obj = objects.get( mod, config(), uldap(), position=position, superordinate=superordinate, dn=object_dn ) obj.open() return obj def create_containers_and_parents(container_dn): """Create a container and if needed the parents containers""" import univention.admin.uexceptions as uexcp if not container_dn.startswith("cn="): raise AssertionError() try: parent = ldap_dn_tree_parent(container_dn) obj = umc_module_for_add( 'container/cn', parent ) obj['name'] = container_dn.split(',')[0].split('=')[1] obj['description'] = "container created by import" except uexcp.ldapError: create_containers_and_parents(parent) obj = umc_module_for_add( 'container/cn', parent ) obj['name'] = container_dn.split(',')[0].split('=')[1] obj['description'] = "container created by import"
gpl-3.0
beppec56/core
sfx2/qa/python/check_sidebar_registry.py
1
3132
# -*- tab-width: 4; indent-tabs-mode: nil; py-indent-offset: 4 -*- # # This file is part of the LibreOffice project. # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. # import unittest import unohelper import os from org.libreoffice.unotest import UnoInProcess import uno class CheckSidebarRegistry(unittest.TestCase): @classmethod def setUpClass(cls): cls._uno = UnoInProcess() cls._uno.setUp() cls._xDoc = cls._uno.openEmptyDoc( url = "private:factory/scalc", bHidden = False, bReadOnly = False) @classmethod def tearDownClass(cls): cls._uno.tearDown() def test_sidebar_registry(self): # assert(result) after whole processing to list defective nodes at once result = True #open registry node in Sidebar.xcu config_provider = self.createUnoService("com.sun.star.configuration.ConfigurationProvider") param = uno.createUnoStruct('com.sun.star.beans.PropertyValue') param.Name = "nodepath" # Deck names consitency param.Value = "org.openoffice.Office.UI.Sidebar/Content/DeckList" sidebar_decks_settings = config_provider.createInstanceWithArguments("com.sun.star.configuration.ConfigurationAccess", (param, )) for nodeName in sidebar_decks_settings: node = sidebar_decks_settings[nodeName] if (node.Id != nodeName): print("\nNon-consistent sidebar.xcu Deck registry names", nodeName, node.Id) result = False # panel names consitency param.Value = "org.openoffice.Office.UI.Sidebar/Content/PanelList" sidebar_panels_settings = config_provider.createInstanceWithArguments("com.sun.star.configuration.ConfigurationAccess", (param, )) for nodeName in sidebar_panels_settings: node = sidebar_panels_settings[nodeName] if (node.Id != nodeName): print("\nNon-consistent sidebar.xcu Panel registry names", nodeName, node.Id) result = False # is panel bound to an existing Deck ? FoundDeckId = False for deckNodeName in sidebar_decks_settings: deck_node = sidebar_decks_settings[deckNodeName] if (node.DeckId == deck_node.Id): FoundDeckId = True if not FoundDeckId: print("\nNon existing DeckId for the panel ",node.Id) result = False # trigger the overall result. details of each error have already be printed assert(result) def createUnoService(self, serviceName): sm = uno.getComponentContext().ServiceManager return sm.createInstanceWithContext(serviceName, uno.getComponentContext()) if __name__ == "__main__": unittest.main() # vim: set shiftwidth=4 softtabstop=4 expandtab:
gpl-3.0
Arthurvdmerwe/AS2805_Python_Implementation
Shared/MAC.py
1
3460
from operator import * from Crypto.Cipher import DES3 from Crypto.Cipher import DES DES_IV = '\0\0\0\0\0\0\0\0' DES_PAD = [chr(0x80), chr(0), chr(0), chr(0), chr(0), chr(0), chr(0), chr(0)] DES_PAD_HEX = '8000000000000000' KENC = '\0\0\0\1' KMAC = '\0\0\0\2' DO87 = '870901' DO8E = '8E08' DO97 = '9701' DO99 = '99029000' def ToBinary(string): """convert hex string to binary characters""" output = '' x = 0 while x < len(string): output += chr(int(string[x:x + 2], 16)) x += 2 return output def DES3MAC(message, key, ssc): "iso 9797-1 Algorithm 3 (Full DES3)" tdes = DES3.new(key, DES3.MODE_ECB, DES_IV) if ssc: mac = tdes.encrypt(ToBinary(ssc)) else: mac = DES_IV message += PADBlock('') for y in range(len(message) / 8): current = message[y * 8:(y * 8) + 8] left = '' right = '' for x in range(len(mac)): left += '%02x' % ord(mac[x]) right += '%02x' % ord(current[x]) machex = '%016x' % xor(int(left, 16), int(right, 16)) mac = tdes.encrypt(ToBinary(machex)) # iso 9797-1 says we should do the next two steps for "Output Transform 3" # but they're obviously redundant for DES3 with only one key, so I don't bother! # mac= tdes.decrypt(mac) # mac= tdes.encrypt(mac) return mac def PADBlock(block): "add DES padding to data block" # call with null string to return an 8 byte padding block # call with an unknown sized block to return the block padded to a multiple of 8 bytes for x in range(8 - (len(block) % 8)): block += DES_PAD[x] return block def DESMAC(message, key, ssc): "iso 9797-1 Algorithm 3 (Retail MAC)" # DES for all blocks # DES3 for last block tdesa = DES.new(key[0:8], DES.MODE_ECB, DES_IV) tdesb = DES.new(key[8:16], DES.MODE_ECB, DES_IV) if ssc: mac = tdesa.encrypt(ToBinary(ssc)) else: mac = DES_IV message += PADBlock('') for y in range(len(message) / 8): current = message[y * 8:(y * 8) + 8] left = right = '' for x in range(len(mac)): left += '%02x' % ord(mac[x]) right += '%02x' % ord(current[x]) machex = '%016x' % xor(int(left, 16), int(right, 16)) # print machex mac = tdesa.encrypt(ToBinary(machex)) # print mac mac = tdesb.decrypt(mac) return tdesa.encrypt(mac) def ToHex(data): "convert binary data to hex printable" string = '' for x in range(len(data)): string += '%02x' % ord(data[x]) return string def HexPrint(data): return ToHex(data) def MACVerify(message, key): mess = ToBinary(message[:len(message) - 16]) mac = DESMAC(mess, key, '') if not mac == ToBinary(message[len(message) - 16:]): print 'MAC Error!' print 'Expected MAC: ', message[len(message) - 16:] print 'Actual MAC: ', HexPrint(mac) return False return True Message = "<STX>000000 td7W0 <FS>9VDD9002 <FS>11<FS>0056<FS>4902370000002348=121210111234123<FS>00006000<FS>00000200<FS>4F50E157E8D544B1<FS><FS><FS>VA5.00.07WV02.70.10 V04.00.19 0 0T 00 000 00000002K0047000000005K005500000000000000000000000000000000000000<FS><FS><ETX>" ResultMac = "^EB5E 8B9A" Message_Block = HexPrint(Message) result = DESMAC(Message_Block, 'F92260FA70A180E1B30D9E95DAD6B823', '') print result print ToHex(result).upper()
gpl-3.0
trondhindenes/ansible
lib/ansible/modules/cloud/cloudstack/cs_snapshot_policy.py
80
10636
#!/usr/bin/python # -*- coding: utf-8 -*- # # (c) 2016, René Moser <mail@renemoser.net> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: cs_snapshot_policy short_description: Manages volume snapshot policies on Apache CloudStack based clouds. description: - Create, update and delete volume snapshot policies. version_added: '2.2' author: "René Moser (@resmo)" options: volume: description: - Name of the volume. - Either C(volume) or C(vm) is required. volume_type: description: - Type of the volume. choices: - DATADISK - ROOT version_added: "2.3" vm: description: - Name of the instance to select the volume from. - Use C(volume_type) if VM has a DATADISK and ROOT volume. - In case of C(volume_type=DATADISK), additionally use C(device_id) if VM has more than one DATADISK volume. - Either C(volume) or C(vm) is required. version_added: "2.3" device_id: description: - ID of the device on a VM the volume is attached to. - This will only be considered if VM has multiple DATADISK volumes. version_added: "2.3" vpc: description: - Name of the vpc the instance is deployed in. version_added: "2.3" interval_type: description: - Interval of the snapshot. default: daily choices: [ hourly, daily, weekly, monthly ] aliases: [ interval ] max_snaps: description: - Max number of snapshots. default: 8 aliases: [ max ] schedule: description: - Time the snapshot is scheduled. Required if C(state=present). - 'Format for C(interval_type=HOURLY): C(MM)' - 'Format for C(interval_type=DAILY): C(MM:HH)' - 'Format for C(interval_type=WEEKLY): C(MM:HH:DD (1-7))' - 'Format for C(interval_type=MONTHLY): C(MM:HH:DD (1-28))' time_zone: description: - Specifies a timezone for this command. default: UTC aliases: [ timezone ] state: description: - State of the snapshot policy. default: present choices: [ present, absent ] domain: description: - Domain the volume is related to. account: description: - Account the volume is related to. project: description: - Name of the project the volume is related to. extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' - name: ensure a snapshot policy daily at 1h00 UTC local_action: module: cs_snapshot_policy volume: ROOT-478 schedule: '00:1' max_snaps: 3 - name: ensure a snapshot policy daily at 1h00 UTC on the second DATADISK of VM web-01 local_action: module: cs_snapshot_policy vm: web-01 volume_type: DATADISK device_id: 2 schedule: '00:1' max_snaps: 3 - name: ensure a snapshot policy hourly at minute 5 UTC local_action: module: cs_snapshot_policy volume: ROOT-478 schedule: '5' interval_type: hourly max_snaps: 1 - name: ensure a snapshot policy weekly on Sunday at 05h00, TZ Europe/Zurich local_action: module: cs_snapshot_policy volume: ROOT-478 schedule: '00:5:1' interval_type: weekly max_snaps: 1 time_zone: 'Europe/Zurich' - name: ensure a snapshot policy is absent local_action: module: cs_snapshot_policy volume: ROOT-478 interval_type: hourly state: absent ''' RETURN = ''' --- id: description: UUID of the snapshot policy. returned: success type: string sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f interval_type: description: interval type of the snapshot policy. returned: success type: string sample: daily schedule: description: schedule of the snapshot policy. returned: success type: string sample: max_snaps: description: maximum number of snapshots retained. returned: success type: int sample: 10 time_zone: description: the time zone of the snapshot policy. returned: success type: string sample: Etc/UTC volume: description: the volume of the snapshot policy. returned: success type: string sample: Etc/UTC zone: description: Name of zone the volume is related to. returned: success type: string sample: ch-gva-2 project: description: Name of project the volume is related to. returned: success type: string sample: Production account: description: Account the volume is related to. returned: success type: string sample: example account domain: description: Domain the volume is related to. returned: success type: string sample: example domain ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.cloudstack import ( AnsibleCloudStack, cs_argument_spec, cs_required_together ) class AnsibleCloudStackSnapshotPolicy(AnsibleCloudStack): def __init__(self, module): super(AnsibleCloudStackSnapshotPolicy, self).__init__(module) self.returns = { 'schedule': 'schedule', 'timezone': 'time_zone', 'maxsnaps': 'max_snaps', } self.interval_types = { 'hourly': 0, 'daily': 1, 'weekly': 2, 'monthly': 3, } self.volume = None def get_interval_type(self): interval_type = self.module.params.get('interval_type') return self.interval_types[interval_type] def get_volume(self, key=None): if self.volume: return self._get_by_key(key, self.volume) args = { 'name': self.module.params.get('volume'), 'account': self.get_account(key='name'), 'domainid': self.get_domain(key='id'), 'projectid': self.get_project(key='id'), 'virtualmachineid': self.get_vm(key='id', filter_zone=False), 'type': self.module.params.get('volume_type'), } volumes = self.query_api('listVolumes', **args) if volumes: if volumes['count'] > 1: device_id = self.module.params.get('device_id') if not device_id: self.module.fail_json(msg="Found more then 1 volume: combine params 'vm', 'volume_type', 'device_id' and/or 'volume' to select the volume") else: for v in volumes['volume']: if v.get('deviceid') == device_id: self.volume = v return self._get_by_key(key, self.volume) self.module.fail_json(msg="No volume found with device id %s" % device_id) self.volume = volumes['volume'][0] return self._get_by_key(key, self.volume) return None def get_snapshot_policy(self): args = { 'volumeid': self.get_volume(key='id') } policies = self.query_api('listSnapshotPolicies', **args) if policies: for policy in policies['snapshotpolicy']: if policy['intervaltype'] == self.get_interval_type(): return policy return None def present_snapshot_policy(self): required_params = [ 'schedule', ] self.module.fail_on_missing_params(required_params=required_params) policy = self.get_snapshot_policy() args = { 'id': policy.get('id') if policy else None, 'intervaltype': self.module.params.get('interval_type'), 'schedule': self.module.params.get('schedule'), 'maxsnaps': self.module.params.get('max_snaps'), 'timezone': self.module.params.get('time_zone'), 'volumeid': self.get_volume(key='id') } if not policy or (policy and self.has_changed(policy, args, only_keys=['schedule', 'maxsnaps', 'timezone'])): self.result['changed'] = True if not self.module.check_mode: res = self.query_api('createSnapshotPolicy', **args) policy = res['snapshotpolicy'] return policy def absent_snapshot_policy(self): policy = self.get_snapshot_policy() if policy: self.result['changed'] = True args = { 'id': policy['id'] } if not self.module.check_mode: self.query_api('deleteSnapshotPolicies', **args) return policy def get_result(self, policy): super(AnsibleCloudStackSnapshotPolicy, self).get_result(policy) if policy and 'intervaltype' in policy: for key, value in self.interval_types.items(): if value == policy['intervaltype']: self.result['interval_type'] = key break volume = self.get_volume() if volume: volume_results = { 'volume': volume.get('name'), 'zone': volume.get('zonename'), 'project': volume.get('project'), 'account': volume.get('account'), 'domain': volume.get('domain'), } self.result.update(volume_results) return self.result def main(): argument_spec = cs_argument_spec() argument_spec.update(dict( volume=dict(), volume_type=dict(choices=['DATADISK', 'ROOT']), vm=dict(), device_id=dict(type='int'), vpc=dict(), interval_type=dict(default='daily', choices=['hourly', 'daily', 'weekly', 'monthly'], aliases=['interval']), schedule=dict(), time_zone=dict(default='UTC', aliases=['timezone']), max_snaps=dict(type='int', default=8, aliases=['max']), state=dict(choices=['present', 'absent'], default='present'), domain=dict(), account=dict(), project=dict(), )) module = AnsibleModule( argument_spec=argument_spec, required_together=cs_required_together(), required_one_of=( ['vm', 'volume'], ), supports_check_mode=True ) acs_snapshot_policy = AnsibleCloudStackSnapshotPolicy(module) state = module.params.get('state') if state in ['absent']: policy = acs_snapshot_policy.absent_snapshot_policy() else: policy = acs_snapshot_policy.present_snapshot_policy() result = acs_snapshot_policy.get_result(policy) module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
vivocoin/vivo
qa/rpc-tests/rest.py
37
15318
#!/usr/bin/env python2 # Copyright (c) 2014-2015 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test REST interface # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from struct import * from io import BytesIO from codecs import encode import binascii try: import http.client as httplib except ImportError: import httplib try: import urllib.parse as urlparse except ImportError: import urlparse def deser_uint256(f): r = 0 for i in range(8): t = unpack(b"<I", f.read(4))[0] r += t << (i * 32) return r #allows simple http get calls def http_get_call(host, port, path, response_object = 0): conn = httplib.HTTPConnection(host, port) conn.request('GET', path) if response_object: return conn.getresponse() return conn.getresponse().read().decode('utf-8') #allows simple http post calls with a request body def http_post_call(host, port, path, requestdata = '', response_object = 0): conn = httplib.HTTPConnection(host, port) conn.request('POST', path, requestdata) if response_object: return conn.getresponse() return conn.getresponse().read() class RESTTest (BitcoinTestFramework): FORMAT_SEPARATOR = "." def setup_chain(self): print("Initializing test directory "+self.options.tmpdir) initialize_chain_clean(self.options.tmpdir, 3) def setup_network(self, split=False): self.nodes = start_nodes(3, self.options.tmpdir) connect_nodes_bi(self.nodes,0,1) connect_nodes_bi(self.nodes,1,2) connect_nodes_bi(self.nodes,0,2) self.is_network_split=False self.sync_all() def run_test(self): url = urlparse.urlparse(self.nodes[0].url) print "Mining blocks..." self.nodes[0].generate(1) self.sync_all() self.nodes[2].generate(100) self.sync_all() assert_equal(self.nodes[0].getbalance(), 500) txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1) self.sync_all() self.nodes[2].generate(1) self.sync_all() bb_hash = self.nodes[0].getbestblockhash() assert_equal(self.nodes[1].getbalance(), Decimal("0.1")) #balance now should be 0.1 on node 1 # load the latest 0.1 tx over the REST API json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json") json_obj = json.loads(json_string) vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then) # get n of 0.1 outpoint n = 0 for vout in json_obj['vout']: if vout['value'] == 0.1: n = vout['n'] ###################################### # GETUTXOS: query a unspent outpoint # ###################################### json_request = '/checkmempool/'+txid+'-'+str(n) json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) #check chainTip response assert_equal(json_obj['chaintipHash'], bb_hash) #make sure there is one utxo assert_equal(len(json_obj['utxos']), 1) assert_equal(json_obj['utxos'][0]['value'], 0.1) ################################################ # GETUTXOS: now query a already spent outpoint # ################################################ json_request = '/checkmempool/'+vintx+'-0' json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) #check chainTip response assert_equal(json_obj['chaintipHash'], bb_hash) #make sure there is no utox in the response because this oupoint has been spent assert_equal(len(json_obj['utxos']), 0) #check bitmap assert_equal(json_obj['bitmap'], "0") ################################################## # GETUTXOS: now check both with the same request # ################################################## json_request = '/checkmempool/'+txid+'-'+str(n)+'/'+vintx+'-0' json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) assert_equal(len(json_obj['utxos']), 1) assert_equal(json_obj['bitmap'], "10") #test binary response bb_hash = self.nodes[0].getbestblockhash() binaryRequest = b'\x01\x02' binaryRequest += hex_str_to_bytes(txid) binaryRequest += pack("i", n) binaryRequest += hex_str_to_bytes(vintx) binaryRequest += pack("i", 0) bin_response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', binaryRequest) output = BytesIO() output.write(bin_response) output.seek(0) chainHeight = unpack("i", output.read(4))[0] hashFromBinResponse = hex(deser_uint256(output))[2:].zfill(65).rstrip("L") assert_equal(bb_hash, hashFromBinResponse) #check if getutxo's chaintip during calculation was fine assert_equal(chainHeight, 102) #chain height must be 102 ############################ # GETUTXOS: mempool checks # ############################ # do a tx and don't sync txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1) json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json") json_obj = json.loads(json_string) vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then) # get n of 0.1 outpoint n = 0 for vout in json_obj['vout']: if vout['value'] == 0.1: n = vout['n'] json_request = '/'+txid+'-'+str(n) json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) assert_equal(len(json_obj['utxos']), 0) #there should be a outpoint because it has just added to the mempool json_request = '/checkmempool/'+txid+'-'+str(n) json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) assert_equal(len(json_obj['utxos']), 1) #there should be a outpoint because it has just added to the mempool #do some invalid requests json_request = '{"checkmempool' response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True) assert_equal(response.status, 500) #must be a 500 because we send a invalid json request json_request = '{"checkmempool' response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', json_request, True) assert_equal(response.status, 500) #must be a 500 because we send a invalid bin request response = http_post_call(url.hostname, url.port, '/rest/getutxos/checkmempool'+self.FORMAT_SEPARATOR+'bin', '', True) assert_equal(response.status, 500) #must be a 500 because we send a invalid bin request #test limits json_request = '/checkmempool/' for x in range(0, 20): json_request += txid+'-'+str(n)+'/' json_request = json_request.rstrip("/") response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True) assert_equal(response.status, 500) #must be a 500 because we exceeding the limits json_request = '/checkmempool/' for x in range(0, 15): json_request += txid+'-'+str(n)+'/' json_request = json_request.rstrip("/") response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True) assert_equal(response.status, 200) #must be a 500 because we exceeding the limits self.nodes[0].generate(1) #generate block to not affect upcoming tests self.sync_all() ################ # /rest/block/ # ################ # check binary format response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True) assert_equal(response.status, 200) assert_greater_than(int(response.getheader('content-length')), 80) response_str = response.read() # compare with block header response_header = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True) assert_equal(response_header.status, 200) assert_equal(int(response_header.getheader('content-length')), 80) response_header_str = response_header.read() assert_equal(response_str[0:80], response_header_str) # check block hex format response_hex = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True) assert_equal(response_hex.status, 200) assert_greater_than(int(response_hex.getheader('content-length')), 160) response_hex_str = response_hex.read() assert_equal(encode(response_str, "hex_codec")[0:160], response_hex_str[0:160]) # compare with hex block header response_header_hex = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True) assert_equal(response_header_hex.status, 200) assert_greater_than(int(response_header_hex.getheader('content-length')), 160) response_header_hex_str = response_header_hex.read() assert_equal(response_hex_str[0:160], response_header_hex_str[0:160]) assert_equal(encode(response_header_str, "hex_codec")[0:160], response_header_hex_str[0:160]) # check json format block_json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json') block_json_obj = json.loads(block_json_string) assert_equal(block_json_obj['hash'], bb_hash) # compare with json block header response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"json", True) assert_equal(response_header_json.status, 200) response_header_json_str = response_header_json.read().decode('utf-8') json_obj = json.loads(response_header_json_str, parse_float=Decimal) assert_equal(len(json_obj), 1) #ensure that there is one header in the json response assert_equal(json_obj[0]['hash'], bb_hash) #request/response hash should be the same #compare with normal RPC block response rpc_block_json = self.nodes[0].getblock(bb_hash) assert_equal(json_obj[0]['hash'], rpc_block_json['hash']) assert_equal(json_obj[0]['confirmations'], rpc_block_json['confirmations']) assert_equal(json_obj[0]['height'], rpc_block_json['height']) assert_equal(json_obj[0]['version'], rpc_block_json['version']) assert_equal(json_obj[0]['merkleroot'], rpc_block_json['merkleroot']) assert_equal(json_obj[0]['time'], rpc_block_json['time']) assert_equal(json_obj[0]['nonce'], rpc_block_json['nonce']) assert_equal(json_obj[0]['bits'], rpc_block_json['bits']) assert_equal(json_obj[0]['difficulty'], rpc_block_json['difficulty']) assert_equal(json_obj[0]['chainwork'], rpc_block_json['chainwork']) assert_equal(json_obj[0]['previousblockhash'], rpc_block_json['previousblockhash']) #see if we can get 5 headers in one response self.nodes[1].generate(5) self.sync_all() response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/5/'+bb_hash+self.FORMAT_SEPARATOR+"json", True) assert_equal(response_header_json.status, 200) response_header_json_str = response_header_json.read().decode('utf-8') json_obj = json.loads(response_header_json_str) assert_equal(len(json_obj), 5) #now we should have 5 header objects # do tx test tx_hash = block_json_obj['tx'][0]['txid'] json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json") json_obj = json.loads(json_string) assert_equal(json_obj['txid'], tx_hash) # check hex format response hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", True) assert_equal(hex_string.status, 200) assert_greater_than(int(response.getheader('content-length')), 10) # check block tx details # let's make 3 tx and mine them on node 1 txs = [] txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)) txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)) txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)) self.sync_all() # check that there are exactly 3 transactions in the TX memory pool before generating the block json_string = http_get_call(url.hostname, url.port, '/rest/mempool/info'+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) assert_equal(json_obj['size'], 3) # the size of the memory pool should be greater than 3x ~100 bytes assert_greater_than(json_obj['bytes'], 300) # check that there are our submitted transactions in the TX memory pool json_string = http_get_call(url.hostname, url.port, '/rest/mempool/contents'+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) for tx in txs: assert_equal(tx in json_obj, True) # now mine the transactions newblockhash = self.nodes[1].generate(1) self.sync_all() #check if the 3 tx show up in the new block json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) for tx in json_obj['tx']: if not 'coinbase' in tx['vin'][0]: #exclude coinbase assert_equal(tx['txid'] in txs, True) #check the same but without tx details json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) for tx in txs: assert_equal(tx in json_obj['tx'], True) #test rest bestblock bb_hash = self.nodes[0].getbestblockhash() json_string = http_get_call(url.hostname, url.port, '/rest/chaininfo.json') json_obj = json.loads(json_string) assert_equal(json_obj['bestblockhash'], bb_hash) if __name__ == '__main__': RESTTest ().main ()
mit
eamuntz/Django-Tut
env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/six.py
2375
11628
"""Utilities for writing code that runs on Python 2 and 3""" #Copyright (c) 2010-2011 Benjamin Peterson #Permission is hereby granted, free of charge, to any person obtaining a copy of #this software and associated documentation files (the "Software"), to deal in #the Software without restriction, including without limitation the rights to #use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of #the Software, and to permit persons to whom the Software is furnished to do so, #subject to the following conditions: #The above copyright notice and this permission notice shall be included in all #copies or substantial portions of the Software. #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS #FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR #COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER #IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN #CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import operator import sys import types __author__ = "Benjamin Peterson <benjamin@python.org>" __version__ = "1.2.0" # Revision 41c74fef2ded # True if we are running on Python 3. PY3 = sys.version_info[0] == 3 if PY3: string_types = str, integer_types = int, class_types = type, text_type = str binary_type = bytes MAXSIZE = sys.maxsize else: string_types = basestring, integer_types = (int, long) class_types = (type, types.ClassType) text_type = unicode binary_type = str if sys.platform.startswith("java"): # Jython always uses 32 bits. MAXSIZE = int((1 << 31) - 1) else: # It's possible to have sizeof(long) != sizeof(Py_ssize_t). class X(object): def __len__(self): return 1 << 31 try: len(X()) except OverflowError: # 32-bit MAXSIZE = int((1 << 31) - 1) else: # 64-bit MAXSIZE = int((1 << 63) - 1) del X def _add_doc(func, doc): """Add documentation to a function.""" func.__doc__ = doc def _import_module(name): """Import module, returning the module after the last dot.""" __import__(name) return sys.modules[name] class _LazyDescr(object): def __init__(self, name): self.name = name def __get__(self, obj, tp): result = self._resolve() setattr(obj, self.name, result) # This is a bit ugly, but it avoids running this again. delattr(tp, self.name) return result class MovedModule(_LazyDescr): def __init__(self, name, old, new=None): super(MovedModule, self).__init__(name) if PY3: if new is None: new = name self.mod = new else: self.mod = old def _resolve(self): return _import_module(self.mod) class MovedAttribute(_LazyDescr): def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): super(MovedAttribute, self).__init__(name) if PY3: if new_mod is None: new_mod = name self.mod = new_mod if new_attr is None: if old_attr is None: new_attr = name else: new_attr = old_attr self.attr = new_attr else: self.mod = old_mod if old_attr is None: old_attr = name self.attr = old_attr def _resolve(self): module = _import_module(self.mod) return getattr(module, self.attr) class _MovedItems(types.ModuleType): """Lazy loading of moved objects""" _moved_attributes = [ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), MovedAttribute("map", "itertools", "builtins", "imap", "map"), MovedAttribute("reload_module", "__builtin__", "imp", "reload"), MovedAttribute("reduce", "__builtin__", "functools"), MovedAttribute("StringIO", "StringIO", "io"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), MovedModule("copyreg", "copy_reg"), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookies", "Cookie", "http.cookies"), MovedModule("html_entities", "htmlentitydefs", "html.entities"), MovedModule("html_parser", "HTMLParser", "html.parser"), MovedModule("http_client", "httplib", "http.client"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), MovedModule("cPickle", "cPickle", "pickle"), MovedModule("queue", "Queue"), MovedModule("reprlib", "repr"), MovedModule("socketserver", "SocketServer"), MovedModule("tkinter", "Tkinter"), MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), MovedModule("tkinter_tix", "Tix", "tkinter.tix"), MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), MovedModule("tkinter_colorchooser", "tkColorChooser", "tkinter.colorchooser"), MovedModule("tkinter_commondialog", "tkCommonDialog", "tkinter.commondialog"), MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), MovedModule("tkinter_font", "tkFont", "tkinter.font"), MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"), MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), MovedModule("winreg", "_winreg"), ] for attr in _moved_attributes: setattr(_MovedItems, attr.name, attr) del attr moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves") def add_move(move): """Add an item to six.moves.""" setattr(_MovedItems, move.name, move) def remove_move(name): """Remove item from six.moves.""" try: delattr(_MovedItems, name) except AttributeError: try: del moves.__dict__[name] except KeyError: raise AttributeError("no such move, %r" % (name,)) if PY3: _meth_func = "__func__" _meth_self = "__self__" _func_code = "__code__" _func_defaults = "__defaults__" _iterkeys = "keys" _itervalues = "values" _iteritems = "items" else: _meth_func = "im_func" _meth_self = "im_self" _func_code = "func_code" _func_defaults = "func_defaults" _iterkeys = "iterkeys" _itervalues = "itervalues" _iteritems = "iteritems" try: advance_iterator = next except NameError: def advance_iterator(it): return it.next() next = advance_iterator if PY3: def get_unbound_function(unbound): return unbound Iterator = object def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) else: def get_unbound_function(unbound): return unbound.im_func class Iterator(object): def next(self): return type(self).__next__(self) callable = callable _add_doc(get_unbound_function, """Get the function out of a possibly unbound function""") get_method_function = operator.attrgetter(_meth_func) get_method_self = operator.attrgetter(_meth_self) get_function_code = operator.attrgetter(_func_code) get_function_defaults = operator.attrgetter(_func_defaults) def iterkeys(d): """Return an iterator over the keys of a dictionary.""" return iter(getattr(d, _iterkeys)()) def itervalues(d): """Return an iterator over the values of a dictionary.""" return iter(getattr(d, _itervalues)()) def iteritems(d): """Return an iterator over the (key, value) pairs of a dictionary.""" return iter(getattr(d, _iteritems)()) if PY3: def b(s): return s.encode("latin-1") def u(s): return s if sys.version_info[1] <= 1: def int2byte(i): return bytes((i,)) else: # This is about 2x faster than the implementation above on 3.2+ int2byte = operator.methodcaller("to_bytes", 1, "big") import io StringIO = io.StringIO BytesIO = io.BytesIO else: def b(s): return s def u(s): return unicode(s, "unicode_escape") int2byte = chr import StringIO StringIO = BytesIO = StringIO.StringIO _add_doc(b, """Byte literal""") _add_doc(u, """Text literal""") if PY3: import builtins exec_ = getattr(builtins, "exec") def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value print_ = getattr(builtins, "print") del builtins else: def exec_(code, globs=None, locs=None): """Execute code in a namespace.""" if globs is None: frame = sys._getframe(1) globs = frame.f_globals if locs is None: locs = frame.f_locals del frame elif locs is None: locs = globs exec("""exec code in globs, locs""") exec_("""def reraise(tp, value, tb=None): raise tp, value, tb """) def print_(*args, **kwargs): """The new-style print function.""" fp = kwargs.pop("file", sys.stdout) if fp is None: return def write(data): if not isinstance(data, basestring): data = str(data) fp.write(data) want_unicode = False sep = kwargs.pop("sep", None) if sep is not None: if isinstance(sep, unicode): want_unicode = True elif not isinstance(sep, str): raise TypeError("sep must be None or a string") end = kwargs.pop("end", None) if end is not None: if isinstance(end, unicode): want_unicode = True elif not isinstance(end, str): raise TypeError("end must be None or a string") if kwargs: raise TypeError("invalid keyword arguments to print()") if not want_unicode: for arg in args: if isinstance(arg, unicode): want_unicode = True break if want_unicode: newline = unicode("\n") space = unicode(" ") else: newline = "\n" space = " " if sep is None: sep = space if end is None: end = newline for i, arg in enumerate(args): if i: write(sep) write(arg) write(end) _add_doc(reraise, """Reraise an exception.""") def with_metaclass(meta, base=object): """Create a base class with a metaclass.""" return meta("NewBase", (base,), {})
mit
tony/kivy
kivy/uix/codeinput.py
19
7815
''' Code Input ========== .. versionadded:: 1.5.0 .. image:: images/codeinput.jpg .. note:: This widget requires ``pygments`` package to run. Install it with ``pip``. The :class:`CodeInput` provides a box of editable highlighted text like the one shown in the image. It supports all the features provided by the :class:`~kivy.uix.textinput` as well as code highlighting for `languages supported by pygments <http://pygments.org/docs/lexers/>`_ along with `KivyLexer` for :mod:`kivy.lang` highlighting. Usage example ------------- To create a CodeInput with highlighting for `KV language`:: from kivy.uix.codeinput import CodeInput from kivy.extras.highlight import KivyLexer codeinput = CodeInput(lexer=KivyLexer()) To create a CodeInput with highlighting for `Cython`:: from kivy.uix.codeinput import CodeInput from pygments.lexers import CythonLexer codeinput = CodeInput(lexer=CythonLexer()) ''' __all__ = ('CodeInput', ) from pygments import highlight from pygments import lexers from pygments import styles from pygments.formatters import BBCodeFormatter from kivy.uix.textinput import TextInput from kivy.core.text.markup import MarkupLabel as Label from kivy.cache import Cache from kivy.properties import ObjectProperty, OptionProperty from kivy.utils import get_hex_from_color, get_color_from_hex from kivy.uix.behaviors import CodeNavigationBehavior Cache_get = Cache.get Cache_append = Cache.append # TODO: color chooser for keywords/strings/... class CodeInput(CodeNavigationBehavior, TextInput): '''CodeInput class, used for displaying highlighted code. ''' lexer = ObjectProperty(None) '''This holds the selected Lexer used by pygments to highlight the code. :attr:`lexer` is an :class:`~kivy.properties.ObjectProperty` and defaults to `PythonLexer`. ''' style_name = OptionProperty( 'default', options=list(styles.get_all_styles()) ) '''Name of the pygments style to use for formatting. :attr:`style_name` is an :class:`~kivy.properties.OptionProperty` and defaults to ``'default'``. ''' style = ObjectProperty(None) '''The pygments style object to use for formatting. When ``style_name`` is set, this will be changed to the corresponding style object. :attr:`style` is a :class:`~kivy.properties.ObjectProperty` and defaults to ``None`` ''' def __init__(self, **kwargs): stylename = kwargs.get('style_name', 'default') style = kwargs['style'] if 'style' in kwargs \ else styles.get_style_by_name(stylename) self.formatter = BBCodeFormatter(style=style) self.lexer = lexers.PythonLexer() self.text_color = '#000000' self._label_cached = Label() self.use_text_color = True super(CodeInput, self).__init__(**kwargs) self._line_options = kw = self._get_line_options() self._label_cached = Label(**kw) # use text_color as foreground color text_color = kwargs.get('foreground_color') if text_color: self.text_color = get_hex_from_color(text_color) # set foreground to white to allow text colors to show # use text_color as the default color in bbcodes self.use_text_color = False self.foreground_color = [1, 1, 1, .999] if not kwargs.get('background_color'): self.background_color = [.9, .92, .92, 1] def on_style_name(self, *args): self.style = styles.get_style_by_name(self.style_name) self.background_color = get_color_from_hex(self.style.background_color) self._trigger_refresh_text() def on_style(self, *args): self.formatter = BBCodeFormatter(style=self.style) self._trigger_update_graphics() def _create_line_label(self, text, hint=False): # Create a label from a text, using line options ntext = text.replace(u'\n', u'').replace(u'\t', u' ' * self.tab_width) if self.password and not hint: # Don't replace hint_text with * ntext = u'*' * len(ntext) ntext = self._get_bbcode(ntext) kw = self._get_line_options() cid = u'{}\0{}\0{}'.format(ntext, self.password, kw) texture = Cache_get('textinput.label', cid) if texture is None: # FIXME right now, we can't render very long line... # if we move on "VBO" version as fallback, we won't need to # do this. # try to find the maximum text we can handle label = Label(text=ntext, **kw) if text.find(u'\n') > 0: label.text = u'' else: label.text = ntext label.refresh() # ok, we found it. texture = label.texture Cache_append('textinput.label', cid, texture) label.text = '' return texture def _get_line_options(self): kw = super(CodeInput, self)._get_line_options() kw['markup'] = True kw['valign'] = 'top' kw['codeinput'] = repr(self.lexer) return kw def _get_text_width(self, text, tab_width, _label_cached): # Return the width of a text, according to the current line options. cid = u'{}\0{}\0{}'.format(text, self.password, self._get_line_options()) width = Cache_get('textinput.width', cid) if width is not None: return width lbl = self._create_line_label(text) width = lbl.width Cache_append('textinput.width', cid, width) return width def _get_bbcode(self, ntext): # get bbcoded text for python try: ntext[0] # replace brackets with special chars that aren't highlighted # by pygment. can't use &bl; ... cause & is highlighted ntext = ntext.replace(u'[', u'\x01').replace(u']', u'\x02') ntext = highlight(ntext, self.lexer, self.formatter) ntext = ntext.replace(u'\x01', u'&bl;').replace(u'\x02', u'&br;') # replace special chars with &bl; and &br; ntext = ''.join((u'[color=', str(self.text_color), u']', ntext, u'[/color]')) ntext = ntext.replace(u'\n', u'') # remove possible extra highlight options ntext = ntext.replace(u'[u]', '').replace(u'[/u]', '') return ntext except IndexError: return '' # overriden to prevent cursor position off screen def _cursor_offset(self): '''Get the cursor x offset on the current line ''' offset = 0 try: if self.cursor_col: offset = self._get_text_width( self._lines[self.cursor_row][:self.cursor_col]) return offset except: pass finally: return offset def on_lexer(self, instance, value): self._trigger_refresh_text() def on_foreground_color(self, instance, text_color): if not self.use_text_color: self.use_text_color = True return self.text_color = get_hex_from_color(text_color) self.use_text_color = False self.foreground_color = (1, 1, 1, .999) self._trigger_refresh_text() if __name__ == '__main__': from kivy.extras.highlight import KivyLexer from kivy.app import App class CodeInputTest(App): def build(self): return CodeInput(lexer=KivyLexer(), font_size=12, text=''' #:kivy 1.0 <YourWidget>: canvas: Color: rgb: .5, .5, .5 Rectangle: pos: self.pos size: self.size''') CodeInputTest().run()
mit
team-synerjesus-experience/feel-frog
FeelFrog/backend/testcodegeneratorcode2.py
1
2586
import random #set up reusable variables. #value table VT = [[True,0], [True,1], [False,1], [True,0], [True,1], [True,1], [False,1], [True,0], [True,0], [False,1]] #set up starting mood. L = [[]] L[0] = [0,0,0,0,0,0,0,0,0,0,3] m = 3 e = False #start of data generation for j in range(1, 1000): #runs 999 times, which, added to L[0] and c[0], gives 1000 results a = [0,0,0,0,0,0,0,0,0,0] oldm = m #keeps a note of the initial state of m if e == True: a[0] = 0 a[1] = 0 a[2] = 0 a[3] = 0 a[4] = 0 a[5] = 0 a[6] = 0 a[7] = 0 a[8] = 0 a[9] = 1 m = 3 e = False else: for i in range (0, 9): #runs over 0-8. a[i] = random.randint(0, 1) #assigns a[i] a random digit, either 0 or 1 if a[i] == 1: #checks if each a is value 1 (done) or 0 (not done) if done, continues. if VT[i][0] == True: #checks if the value of that activity is positive if (m + VT[i][1]) > 5 : #if so, checks if adding the value will exceed the upper limit (5) m = 5 #if so, sets m to the maximum (5) else: #if not m = (m + VT[i][1]) #adds the value of activity i to the mood. else: #if the value of the activity is negative, if m - (VT[i][1] < 1) : #checks to see if removing the value of a[i] would exceed the lower limit m = 1 #if so, sets m to lower limt (1) else: #otherwise, m = m - VT[i][1] #subtracts the value from the mood. a[9] = 0 e = True L.append([a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], m]) #stores the values in the list. #end of data generation. #for testing porpoises. #for k in range(0, 1000): #print L[k] #for saving to file. hopefully. #f = open('testfile2.txt', 'w') #S = str(L) #f.write(S) #if you want more data, increase the value X in line 21: for j in range(1, x) #if you want to change the effect of the activities, they are set up as: [A,B], #with A being whether the effect is positive or negative, and B being the extent (from 0 to 4)* #*you can increase this by more, but it will only serve to cancel out other activity effects, since mood caps at 1 and 5. #this version has flattened all the activity variables (so the effect is between +1 and -1. #it also replaces every even item with [0,0,0,0,0,0,0,0,0,1,3], so that capping isn't an issue.
mit
cgqyh/pyalgotrade-mod
testcases/doc_test.py
1
11031
# PyAlgoTrade # # Copyright 2011-2015 Gabriel Martin Becedillas Ruiz # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ .. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com> """ import os from testcases import common class DocCodeTest(common.TestCase): def testTutorial1(self): with common.CopyFiles([os.path.join("testcases", "data", "orcl-2000.csv")], "."): res = common.run_sample_script("tutorial-1.py") self.assertTrue(common.compare_head("tutorial-1.output", res.get_output_lines(True)[:3])) self.assertTrue(common.compare_tail("tutorial-1.output", res.get_output_lines(True)[-3:])) self.assertTrue(res.exit_ok()) def testTutorial2(self): with common.CopyFiles([os.path.join("testcases", "data", "orcl-2000.csv")], "."): res = common.run_sample_script("tutorial-2.py") self.assertTrue(common.compare_head("tutorial-2.output", res.get_output_lines(True)[:15])) self.assertTrue(common.compare_tail("tutorial-2.output", res.get_output_lines(True)[-3:])) self.assertTrue(res.exit_ok()) def testTutorial3(self): with common.CopyFiles([os.path.join("testcases", "data", "orcl-2000.csv")], "."): res = common.run_sample_script("tutorial-3.py") self.assertTrue(common.compare_head("tutorial-3.output", res.get_output_lines(True)[:30])) self.assertTrue(common.compare_tail("tutorial-3.output", res.get_output_lines(True)[-3:])) self.assertTrue(res.exit_ok()) def testTutorial4(self): with common.CopyFiles([os.path.join("testcases", "data", "orcl-2000.csv")], "."): res = common.run_sample_script("tutorial-4.py") self.assertTrue(common.compare_head("tutorial-4.output", res.get_output_lines(True))) self.assertTrue(res.exit_ok()) def testCSVFeed(self): with common.CopyFiles([os.path.join("samples", "data", "quandl_gold_2.csv")], "."): code = """import sys sys.path.append('samples') import csvfeed_1 """ res = common.run_python_code(code) self.assertTrue(common.compare_head("csvfeed_1.output", res.get_output_lines()[0:10])) self.assertTrue(common.compare_tail("csvfeed_1.output", res.get_output_lines()[-10:-1])) self.assertTrue(res.exit_ok()) class CompInvTestCase(common.TestCase): def testCompInv_1(self): files = [os.path.join("samples", "data", src) for src in ["aeti-2011-yahoofinance.csv", "egan-2011-yahoofinance.csv", "simo-2011-yahoofinance.csv", "glng-2011-yahoofinance.csv"]] with common.CopyFiles(files, "."): res = common.run_sample_script("compinv-1.py") # Skip the first two lines that have debug messages from the # broker. self.assertTrue(common.compare_head("compinv-1.output", res.get_output_lines(True)[2:])) self.assertTrue(res.exit_ok()) class StratAnalyzerTestCase(common.TestCase): def testSampleStrategyAnalyzer(self): with common.CopyFiles([os.path.join("testcases", "data", "orcl-2000.csv")], "."): res = common.run_sample_script("sample-strategy-analyzer.py") self.assertTrue(common.compare_head("sample-strategy-analyzer.output", res.get_output_lines(True))) self.assertTrue(res.exit_ok()) class TechnicalTestCase(common.TestCase): def testTechnical_1(self): res = common.run_sample_script("technical-1.py") self.assertTrue(common.compare_head("technical-1.output", res.get_output_lines(True))) self.assertTrue(res.exit_ok()) class SampleStratTestCase(common.TestCase): def testErnieChanGldVsGdx(self): files = [] for year in range(2006, 2013): for symbol in ["gld", "gdx"]: fileName = "%s-%d-yahoofinance.csv" % (symbol, year) files.append(os.path.join("samples", "data", fileName)) with common.CopyFiles(files, "."): code = """import sys sys.path.append('samples') import statarb_erniechan statarb_erniechan.main(False) """ res = common.run_python_code(code) obtained = res.get_output_lines()[-2] expected = common.tail_file("statarb_erniechan.output", 1)[0] self.assertEquals(expected, obtained, "Got this lines %s instead" % (res.get_output_lines())) # self.assertTrue(common.compare_tail("statarb_erniechan.output", res.get_output_lines()[-2:-1])) self.assertTrue(res.exit_ok()) def testVWAPMomentum(self): files = [] for year in range(2011, 2013): for symbol in ["aapl"]: fileName = "%s-%d-yahoofinance.csv" % (symbol, year) files.append(os.path.join("samples", "data", fileName)) with common.CopyFiles(files, "."): code = """import sys sys.path.append('samples') import vwap_momentum vwap_momentum.main(False) """ res = common.run_python_code(code) self.assertTrue(common.compare_tail("vwap_momentum.output", res.get_output_lines()[-2:-1])) self.assertTrue(res.exit_ok()) def testSMACrossOver(self): files = [] for year in range(2011, 2013): for symbol in ["aapl"]: fileName = "%s-%d-yahoofinance.csv" % (symbol, year) files.append(os.path.join("samples", "data", fileName)) with common.CopyFiles(files, "."): code = """import sys sys.path.append('samples') import sma_crossover_sample sma_crossover_sample.main(False) """ res = common.run_python_code(code) self.assertTrue(common.compare_tail("sma_crossover.output", res.get_output_lines()[-2:-1])) self.assertTrue(res.exit_ok()) def testRSI2(self): files = [] for year in range(2009, 2013): for symbol in ["DIA"]: fileName = "%s-%d-yahoofinance.csv" % (symbol, year) files.append(os.path.join("samples", "data", fileName)) with common.CopyFiles(files, "."): code = """import sys sys.path.append('samples') import rsi2_sample rsi2_sample.main(False) """ res = common.run_python_code(code) self.assertTrue(common.compare_tail("rsi2_sample.output", res.get_output_lines()[-2:-1])) self.assertTrue(res.exit_ok()) def testBBands(self): files = [] for year in range(2011, 2013): for symbol in ["yhoo"]: fileName = "%s-%d-yahoofinance.csv" % (symbol, year) files.append(os.path.join("samples", "data", fileName)) with common.CopyFiles(files, "."): code = """import sys sys.path.append('samples') import bbands bbands.main(False) """ res = common.run_python_code(code) self.assertTrue(common.compare_tail("bbands.output", res.get_output_lines()[-2:-1])) self.assertTrue(res.exit_ok()) def testEventStudy(self): files = [] for year in range(2008, 2010): for symbol in ["AA", "AES", "AIG"]: fileName = "%s-%d-yahoofinance.csv" % (symbol, year) files.append(os.path.join("samples", "data", fileName)) with common.CopyFiles(files, "."): code = """import sys sys.path.append('samples') import eventstudy eventstudy.main(False) """ res = common.run_python_code(code) self.assertTrue(common.compare_tail("eventstudy.output", res.get_output_lines()[-2:-1])) self.assertTrue(res.exit_ok()) def testQuandl(self): files = [] for year in range(2006, 2013): for symbol in ["GORO"]: fileName = "WIKI-%s-%d-quandl.csv" % (symbol, year) files.append(os.path.join("samples", "data", fileName)) files.append(os.path.join("samples", "data", "quandl_gold_2.csv")) with common.CopyFiles(files, "."): code = """import sys sys.path.append('samples') import quandl_sample quandl_sample.main(False) """ res = common.run_python_code(code) self.assertTrue(common.compare_head("quandl_sample.output", res.get_output_lines()[0:10])) self.assertTrue(common.compare_tail("quandl_sample.output", res.get_output_lines()[-10:-1])) self.assertTrue(res.exit_ok()) def testMarketTiming(self): common.init_temp_path() files = [] instruments = ["VTI", "VEU", "IEF", "VNQ", "DBC", "SPY"] for year in range(2007, 2013+1): for symbol in instruments: fileName = "%s-%d-yahoofinance.csv" % (symbol, year) files.append(os.path.join("samples", "data", fileName)) with common.CopyFiles(files, "data"): code = """import sys sys.path.append('samples') import market_timing market_timing.main(False) """ res = common.run_python_code(code) self.assertTrue(common.compare_tail("market_timing.output", res.get_output_lines()[-10:-1])) self.assertTrue(res.exit_ok()) class BitcoinChartsTestCase(common.TestCase): def testExample1(self): with common.CopyFiles([os.path.join("testcases", "data", "bitstampUSD-2.csv")], "bitstampUSD.csv"): code = """import sys sys.path.append('samples') import bccharts_example_1 bccharts_example_1.main() """ res = common.run_python_code(code) lines = common.get_file_lines("30min-bitstampUSD.csv") self.assertTrue(common.compare_head("30min-bitstampUSD-2.csv", lines[0:10], "testcases/data")) self.assertTrue(common.compare_tail("30min-bitstampUSD-2.csv", lines[-10:], "testcases/data")) os.remove("30min-bitstampUSD.csv") self.assertTrue(res.exit_ok()) def testExample2(self): with common.CopyFiles([os.path.join("testcases", "data", "30min-bitstampUSD-2.csv")], "30min-bitstampUSD.csv"): code = """import sys sys.path.append('samples') import bccharts_example_2 bccharts_example_2.main(False) """ res = common.run_python_code(code) self.assertTrue( common.compare_head("bccharts_example_2.output", res.get_output_lines()[0:10], "testcases/data") ) self.assertTrue( common.compare_tail("bccharts_example_2.output", res.get_output_lines()[-10:-1], "testcases/data") ) self.assertTrue(res.exit_ok())
apache-2.0
brian-l/django-1.4.10
tests/regressiontests/templates/filters.py
33
30453
# coding: utf-8 """ Tests for template filters (as opposed to template tags). The tests are hidden inside a function so that things like timestamps and timezones are only evaluated at the moment of execution and will therefore be consistent. """ from datetime import date, datetime, timedelta from django.utils.tzinfo import LocalTimezone, FixedOffset from django.utils.safestring import mark_safe # These two classes are used to test auto-escaping of __unicode__ output. class UnsafeClass: def __unicode__(self): return u'you & me' class SafeClass: def __unicode__(self): return mark_safe(u'you &gt; me') # RESULT SYNTAX -- # 'template_name': ('template contents', 'context dict', # 'expected string output' or Exception class) def get_filter_tests(): now = datetime.now() now_tz = datetime.now(LocalTimezone(now)) now_tz_i = datetime.now(FixedOffset((3 * 60) + 15)) # imaginary time zone today = date.today() return { # Default compare with datetime.now() 'filter-timesince01' : ('{{ a|timesince }}', {'a': datetime.now() + timedelta(minutes=-1, seconds = -10)}, '1 minute'), 'filter-timesince02' : ('{{ a|timesince }}', {'a': datetime.now() - timedelta(days=1, minutes = 1)}, '1 day'), 'filter-timesince03' : ('{{ a|timesince }}', {'a': datetime.now() - timedelta(hours=1, minutes=25, seconds = 10)}, '1 hour, 25 minutes'), # Compare to a given parameter 'filter-timesince04' : ('{{ a|timesince:b }}', {'a':now - timedelta(days=2), 'b':now - timedelta(days=1)}, '1 day'), 'filter-timesince05' : ('{{ a|timesince:b }}', {'a':now - timedelta(days=2, minutes=1), 'b':now - timedelta(days=2)}, '1 minute'), # Check that timezone is respected 'filter-timesince06' : ('{{ a|timesince:b }}', {'a':now_tz - timedelta(hours=8), 'b':now_tz}, '8 hours'), # Regression for #7443 'filter-timesince07': ('{{ earlier|timesince }}', { 'earlier': now - timedelta(days=7) }, '1 week'), 'filter-timesince08': ('{{ earlier|timesince:now }}', { 'now': now, 'earlier': now - timedelta(days=7) }, '1 week'), 'filter-timesince09': ('{{ later|timesince }}', { 'later': now + timedelta(days=7) }, '0 minutes'), 'filter-timesince10': ('{{ later|timesince:now }}', { 'now': now, 'later': now + timedelta(days=7) }, '0 minutes'), # Ensures that differing timezones are calculated correctly 'filter-timesince11' : ('{{ a|timesince }}', {'a': now}, '0 minutes'), 'filter-timesince12' : ('{{ a|timesince }}', {'a': now_tz}, '0 minutes'), 'filter-timesince13' : ('{{ a|timesince }}', {'a': now_tz_i}, '0 minutes'), 'filter-timesince14' : ('{{ a|timesince:b }}', {'a': now_tz, 'b': now_tz_i}, '0 minutes'), 'filter-timesince15' : ('{{ a|timesince:b }}', {'a': now, 'b': now_tz_i}, ''), 'filter-timesince16' : ('{{ a|timesince:b }}', {'a': now_tz_i, 'b': now}, ''), # Regression for #9065 (two date objects). 'filter-timesince17' : ('{{ a|timesince:b }}', {'a': today, 'b': today}, '0 minutes'), 'filter-timesince18' : ('{{ a|timesince:b }}', {'a': today, 'b': today + timedelta(hours=24)}, '1 day'), # Default compare with datetime.now() 'filter-timeuntil01' : ('{{ a|timeuntil }}', {'a':datetime.now() + timedelta(minutes=2, seconds = 10)}, '2 minutes'), 'filter-timeuntil02' : ('{{ a|timeuntil }}', {'a':(datetime.now() + timedelta(days=1, seconds = 10))}, '1 day'), 'filter-timeuntil03' : ('{{ a|timeuntil }}', {'a':(datetime.now() + timedelta(hours=8, minutes=10, seconds = 10))}, '8 hours, 10 minutes'), # Compare to a given parameter 'filter-timeuntil04' : ('{{ a|timeuntil:b }}', {'a':now - timedelta(days=1), 'b':now - timedelta(days=2)}, '1 day'), 'filter-timeuntil05' : ('{{ a|timeuntil:b }}', {'a':now - timedelta(days=2), 'b':now - timedelta(days=2, minutes=1)}, '1 minute'), # Regression for #7443 'filter-timeuntil06': ('{{ earlier|timeuntil }}', { 'earlier': now - timedelta(days=7) }, '0 minutes'), 'filter-timeuntil07': ('{{ earlier|timeuntil:now }}', { 'now': now, 'earlier': now - timedelta(days=7) }, '0 minutes'), 'filter-timeuntil08': ('{{ later|timeuntil }}', { 'later': now + timedelta(days=7, hours=1) }, '1 week'), 'filter-timeuntil09': ('{{ later|timeuntil:now }}', { 'now': now, 'later': now + timedelta(days=7) }, '1 week'), # Ensures that differing timezones are calculated correctly 'filter-timeuntil10' : ('{{ a|timeuntil }}', {'a': now_tz_i}, '0 minutes'), 'filter-timeuntil11' : ('{{ a|timeuntil:b }}', {'a': now_tz_i, 'b': now_tz}, '0 minutes'), # Regression for #9065 (two date objects). 'filter-timeuntil12' : ('{{ a|timeuntil:b }}', {'a': today, 'b': today}, '0 minutes'), 'filter-timeuntil13' : ('{{ a|timeuntil:b }}', {'a': today, 'b': today - timedelta(hours=24)}, '1 day'), 'filter-addslash01': ("{% autoescape off %}{{ a|addslashes }} {{ b|addslashes }}{% endautoescape %}", {"a": "<a>'", "b": mark_safe("<a>'")}, ur"<a>\' <a>\'"), 'filter-addslash02': ("{{ a|addslashes }} {{ b|addslashes }}", {"a": "<a>'", "b": mark_safe("<a>'")}, ur"&lt;a&gt;\&#39; <a>\'"), 'filter-capfirst01': ("{% autoescape off %}{{ a|capfirst }} {{ b|capfirst }}{% endautoescape %}", {"a": "fred>", "b": mark_safe("fred&gt;")}, u"Fred> Fred&gt;"), 'filter-capfirst02': ("{{ a|capfirst }} {{ b|capfirst }}", {"a": "fred>", "b": mark_safe("fred&gt;")}, u"Fred&gt; Fred&gt;"), # Note that applying fix_ampsersands in autoescape mode leads to # double escaping. 'filter-fix_ampersands01': ("{% autoescape off %}{{ a|fix_ampersands }} {{ b|fix_ampersands }}{% endautoescape %}", {"a": "a&b", "b": mark_safe("a&b")}, u"a&amp;b a&amp;b"), 'filter-fix_ampersands02': ("{{ a|fix_ampersands }} {{ b|fix_ampersands }}", {"a": "a&b", "b": mark_safe("a&b")}, u"a&amp;amp;b a&amp;b"), 'filter-floatformat01': ("{% autoescape off %}{{ a|floatformat }} {{ b|floatformat }}{% endautoescape %}", {"a": "1.42", "b": mark_safe("1.42")}, u"1.4 1.4"), 'filter-floatformat02': ("{{ a|floatformat }} {{ b|floatformat }}", {"a": "1.42", "b": mark_safe("1.42")}, u"1.4 1.4"), # The contents of "linenumbers" is escaped according to the current # autoescape setting. 'filter-linenumbers01': ("{{ a|linenumbers }} {{ b|linenumbers }}", {"a": "one\n<two>\nthree", "b": mark_safe("one\n&lt;two&gt;\nthree")}, u"1. one\n2. &lt;two&gt;\n3. three 1. one\n2. &lt;two&gt;\n3. three"), 'filter-linenumbers02': ("{% autoescape off %}{{ a|linenumbers }} {{ b|linenumbers }}{% endautoescape %}", {"a": "one\n<two>\nthree", "b": mark_safe("one\n&lt;two&gt;\nthree")}, u"1. one\n2. <two>\n3. three 1. one\n2. &lt;two&gt;\n3. three"), 'filter-lower01': ("{% autoescape off %}{{ a|lower }} {{ b|lower }}{% endautoescape %}", {"a": "Apple & banana", "b": mark_safe("Apple &amp; banana")}, u"apple & banana apple &amp; banana"), 'filter-lower02': ("{{ a|lower }} {{ b|lower }}", {"a": "Apple & banana", "b": mark_safe("Apple &amp; banana")}, u"apple &amp; banana apple &amp; banana"), # The make_list filter can destroy existing escaping, so the results are # escaped. 'filter-make_list01': ("{% autoescape off %}{{ a|make_list }}{% endautoescape %}", {"a": mark_safe("&")}, u"[u'&']"), 'filter-make_list02': ("{{ a|make_list }}", {"a": mark_safe("&")}, u"[u&#39;&amp;&#39;]"), 'filter-make_list03': ('{% autoescape off %}{{ a|make_list|stringformat:"s"|safe }}{% endautoescape %}', {"a": mark_safe("&")}, u"[u'&']"), 'filter-make_list04': ('{{ a|make_list|stringformat:"s"|safe }}', {"a": mark_safe("&")}, u"[u'&']"), # Running slugify on a pre-escaped string leads to odd behavior, # but the result is still safe. 'filter-slugify01': ("{% autoescape off %}{{ a|slugify }} {{ b|slugify }}{% endautoescape %}", {"a": "a & b", "b": mark_safe("a &amp; b")}, u"a-b a-amp-b"), 'filter-slugify02': ("{{ a|slugify }} {{ b|slugify }}", {"a": "a & b", "b": mark_safe("a &amp; b")}, u"a-b a-amp-b"), # Notice that escaping is applied *after* any filters, so the string # formatting here only needs to deal with pre-escaped characters. 'filter-stringformat01': ('{% autoescape off %}.{{ a|stringformat:"5s" }}. .{{ b|stringformat:"5s" }}.{% endautoescape %}', {"a": "a<b", "b": mark_safe("a<b")}, u". a<b. . a<b."), 'filter-stringformat02': ('.{{ a|stringformat:"5s" }}. .{{ b|stringformat:"5s" }}.', {"a": "a<b", "b": mark_safe("a<b")}, u". a&lt;b. . a<b."), # Test the title filter 'filter-title1' : ('{{ a|title }}', {'a' : 'JOE\'S CRAB SHACK'}, u'Joe&#39;s Crab Shack'), 'filter-title2' : ('{{ a|title }}', {'a' : '555 WEST 53RD STREET'}, u'555 West 53rd Street'), 'filter-truncatewords01': ('{% autoescape off %}{{ a|truncatewords:"2" }} {{ b|truncatewords:"2"}}{% endautoescape %}', {"a": "alpha & bravo", "b": mark_safe("alpha &amp; bravo")}, u"alpha & ... alpha &amp; ..."), 'filter-truncatewords02': ('{{ a|truncatewords:"2" }} {{ b|truncatewords:"2"}}', {"a": "alpha & bravo", "b": mark_safe("alpha &amp; bravo")}, u"alpha &amp; ... alpha &amp; ..."), 'filter-truncatechars01': ('{{ a|truncatechars:5 }}', {'a': "Testing, testing"}, u"Te..."), 'filter-truncatechars02': ('{{ a|truncatechars:7 }}', {'a': "Testing"}, u"Testing"), # The "upper" filter messes up entities (which are case-sensitive), # so it's not safe for non-escaping purposes. 'filter-upper01': ('{% autoescape off %}{{ a|upper }} {{ b|upper }}{% endautoescape %}', {"a": "a & b", "b": mark_safe("a &amp; b")}, u"A & B A &AMP; B"), 'filter-upper02': ('{{ a|upper }} {{ b|upper }}', {"a": "a & b", "b": mark_safe("a &amp; b")}, u"A &amp; B A &amp;AMP; B"), 'filter-urlize01': ('{% autoescape off %}{{ a|urlize }} {{ b|urlize }}{% endautoescape %}', {"a": "http://example.com/?x=&y=", "b": mark_safe("http://example.com?x=&amp;y=")}, u'<a href="http://example.com/?x=&y=" rel="nofollow">http://example.com/?x=&y=</a> <a href="http://example.com?x=&amp;y=" rel="nofollow">http://example.com?x=&amp;y=</a>'), 'filter-urlize02': ('{{ a|urlize }} {{ b|urlize }}', {"a": "http://example.com/?x=&y=", "b": mark_safe("http://example.com?x=&amp;y=")}, u'<a href="http://example.com/?x=&amp;y=" rel="nofollow">http://example.com/?x=&amp;y=</a> <a href="http://example.com?x=&amp;y=" rel="nofollow">http://example.com?x=&amp;y=</a>'), 'filter-urlize03': ('{% autoescape off %}{{ a|urlize }}{% endautoescape %}', {"a": mark_safe("a &amp; b")}, 'a &amp; b'), 'filter-urlize04': ('{{ a|urlize }}', {"a": mark_safe("a &amp; b")}, 'a &amp; b'), # This will lead to a nonsense result, but at least it won't be # exploitable for XSS purposes when auto-escaping is on. 'filter-urlize05': ('{% autoescape off %}{{ a|urlize }}{% endautoescape %}', {"a": "<script>alert('foo')</script>"}, "<script>alert('foo')</script>"), 'filter-urlize06': ('{{ a|urlize }}', {"a": "<script>alert('foo')</script>"}, '&lt;script&gt;alert(&#39;foo&#39;)&lt;/script&gt;'), # mailto: testing for urlize 'filter-urlize07': ('{{ a|urlize }}', {"a": "Email me at me@example.com"}, 'Email me at <a href="mailto:me@example.com">me@example.com</a>'), 'filter-urlize08': ('{{ a|urlize }}', {"a": "Email me at <me@example.com>"}, 'Email me at &lt;<a href="mailto:me@example.com">me@example.com</a>&gt;'), 'filter-urlizetrunc01': ('{% autoescape off %}{{ a|urlizetrunc:"8" }} {{ b|urlizetrunc:"8" }}{% endautoescape %}', {"a": '"Unsafe" http://example.com/x=&y=', "b": mark_safe('&quot;Safe&quot; http://example.com?x=&amp;y=')}, u'"Unsafe" <a href="http://example.com/x=&y=" rel="nofollow">http:...</a> &quot;Safe&quot; <a href="http://example.com?x=&amp;y=" rel="nofollow">http:...</a>'), 'filter-urlizetrunc02': ('{{ a|urlizetrunc:"8" }} {{ b|urlizetrunc:"8" }}', {"a": '"Unsafe" http://example.com/x=&y=', "b": mark_safe('&quot;Safe&quot; http://example.com?x=&amp;y=')}, u'&quot;Unsafe&quot; <a href="http://example.com/x=&amp;y=" rel="nofollow">http:...</a> &quot;Safe&quot; <a href="http://example.com?x=&amp;y=" rel="nofollow">http:...</a>'), 'filter-wordcount01': ('{% autoescape off %}{{ a|wordcount }} {{ b|wordcount }}{% endautoescape %}', {"a": "a & b", "b": mark_safe("a &amp; b")}, "3 3"), 'filter-wordcount02': ('{{ a|wordcount }} {{ b|wordcount }}', {"a": "a & b", "b": mark_safe("a &amp; b")}, "3 3"), 'filter-wordwrap01': ('{% autoescape off %}{{ a|wordwrap:"3" }} {{ b|wordwrap:"3" }}{% endautoescape %}', {"a": "a & b", "b": mark_safe("a & b")}, u"a &\nb a &\nb"), 'filter-wordwrap02': ('{{ a|wordwrap:"3" }} {{ b|wordwrap:"3" }}', {"a": "a & b", "b": mark_safe("a & b")}, u"a &amp;\nb a &\nb"), 'filter-ljust01': ('{% autoescape off %}.{{ a|ljust:"5" }}. .{{ b|ljust:"5" }}.{% endautoescape %}', {"a": "a&b", "b": mark_safe("a&b")}, u".a&b . .a&b ."), 'filter-ljust02': ('.{{ a|ljust:"5" }}. .{{ b|ljust:"5" }}.', {"a": "a&b", "b": mark_safe("a&b")}, u".a&amp;b . .a&b ."), 'filter-rjust01': ('{% autoescape off %}.{{ a|rjust:"5" }}. .{{ b|rjust:"5" }}.{% endautoescape %}', {"a": "a&b", "b": mark_safe("a&b")}, u". a&b. . a&b."), 'filter-rjust02': ('.{{ a|rjust:"5" }}. .{{ b|rjust:"5" }}.', {"a": "a&b", "b": mark_safe("a&b")}, u". a&amp;b. . a&b."), 'filter-center01': ('{% autoescape off %}.{{ a|center:"5" }}. .{{ b|center:"5" }}.{% endautoescape %}', {"a": "a&b", "b": mark_safe("a&b")}, u". a&b . . a&b ."), 'filter-center02': ('.{{ a|center:"5" }}. .{{ b|center:"5" }}.', {"a": "a&b", "b": mark_safe("a&b")}, u". a&amp;b . . a&b ."), 'filter-cut01': ('{% autoescape off %}{{ a|cut:"x" }} {{ b|cut:"x" }}{% endautoescape %}', {"a": "x&y", "b": mark_safe("x&amp;y")}, u"&y &amp;y"), 'filter-cut02': ('{{ a|cut:"x" }} {{ b|cut:"x" }}', {"a": "x&y", "b": mark_safe("x&amp;y")}, u"&amp;y &amp;y"), 'filter-cut03': ('{% autoescape off %}{{ a|cut:"&" }} {{ b|cut:"&" }}{% endautoescape %}', {"a": "x&y", "b": mark_safe("x&amp;y")}, u"xy xamp;y"), 'filter-cut04': ('{{ a|cut:"&" }} {{ b|cut:"&" }}', {"a": "x&y", "b": mark_safe("x&amp;y")}, u"xy xamp;y"), # Passing ';' to cut can break existing HTML entities, so those strings # are auto-escaped. 'filter-cut05': ('{% autoescape off %}{{ a|cut:";" }} {{ b|cut:";" }}{% endautoescape %}', {"a": "x&y", "b": mark_safe("x&amp;y")}, u"x&y x&ampy"), 'filter-cut06': ('{{ a|cut:";" }} {{ b|cut:";" }}', {"a": "x&y", "b": mark_safe("x&amp;y")}, u"x&amp;y x&amp;ampy"), # The "escape" filter works the same whether autoescape is on or off, # but it has no effect on strings already marked as safe. 'filter-escape01': ('{{ a|escape }} {{ b|escape }}', {"a": "x&y", "b": mark_safe("x&y")}, u"x&amp;y x&y"), 'filter-escape02': ('{% autoescape off %}{{ a|escape }} {{ b|escape }}{% endautoescape %}', {"a": "x&y", "b": mark_safe("x&y")}, "x&amp;y x&y"), # It is only applied once, regardless of the number of times it # appears in a chain. 'filter-escape03': ('{% autoescape off %}{{ a|escape|escape }}{% endautoescape %}', {"a": "x&y"}, u"x&amp;y"), 'filter-escape04': ('{{ a|escape|escape }}', {"a": "x&y"}, u"x&amp;y"), # Force_escape is applied immediately. It can be used to provide # double-escaping, for example. 'filter-force-escape01': ('{% autoescape off %}{{ a|force_escape }}{% endautoescape %}', {"a": "x&y"}, u"x&amp;y"), 'filter-force-escape02': ('{{ a|force_escape }}', {"a": "x&y"}, u"x&amp;y"), 'filter-force-escape03': ('{% autoescape off %}{{ a|force_escape|force_escape }}{% endautoescape %}', {"a": "x&y"}, u"x&amp;amp;y"), 'filter-force-escape04': ('{{ a|force_escape|force_escape }}', {"a": "x&y"}, u"x&amp;amp;y"), # Because the result of force_escape is "safe", an additional # escape filter has no effect. 'filter-force-escape05': ('{% autoescape off %}{{ a|force_escape|escape }}{% endautoescape %}', {"a": "x&y"}, u"x&amp;y"), 'filter-force-escape06': ('{{ a|force_escape|escape }}', {"a": "x&y"}, u"x&amp;y"), 'filter-force-escape07': ('{% autoescape off %}{{ a|escape|force_escape }}{% endautoescape %}', {"a": "x&y"}, u"x&amp;y"), 'filter-force-escape08': ('{{ a|escape|force_escape }}', {"a": "x&y"}, u"x&amp;y"), # The contents in "linebreaks" and "linebreaksbr" are escaped # according to the current autoescape setting. 'filter-linebreaks01': ('{{ a|linebreaks }} {{ b|linebreaks }}', {"a": "x&\ny", "b": mark_safe("x&\ny")}, u"<p>x&amp;<br />y</p> <p>x&<br />y</p>"), 'filter-linebreaks02': ('{% autoescape off %}{{ a|linebreaks }} {{ b|linebreaks }}{% endautoescape %}', {"a": "x&\ny", "b": mark_safe("x&\ny")}, u"<p>x&<br />y</p> <p>x&<br />y</p>"), 'filter-linebreaksbr01': ('{{ a|linebreaksbr }} {{ b|linebreaksbr }}', {"a": "x&\ny", "b": mark_safe("x&\ny")}, u"x&amp;<br />y x&<br />y"), 'filter-linebreaksbr02': ('{% autoescape off %}{{ a|linebreaksbr }} {{ b|linebreaksbr }}{% endautoescape %}', {"a": "x&\ny", "b": mark_safe("x&\ny")}, u"x&<br />y x&<br />y"), 'filter-safe01': ("{{ a }} -- {{ a|safe }}", {"a": u"<b>hello</b>"}, "&lt;b&gt;hello&lt;/b&gt; -- <b>hello</b>"), 'filter-safe02': ("{% autoescape off %}{{ a }} -- {{ a|safe }}{% endautoescape %}", {"a": "<b>hello</b>"}, u"<b>hello</b> -- <b>hello</b>"), 'filter-safeseq01': ('{{ a|join:", " }} -- {{ a|safeseq|join:", " }}', {"a": ["&", "<"]}, "&amp;, &lt; -- &, <"), 'filter-safeseq02': ('{% autoescape off %}{{ a|join:", " }} -- {{ a|safeseq|join:", " }}{% endautoescape %}', {"a": ["&", "<"]}, "&, < -- &, <"), 'filter-removetags01': ('{{ a|removetags:"a b" }} {{ b|removetags:"a b" }}', {"a": "<a>x</a> <p><b>y</b></p>", "b": mark_safe("<a>x</a> <p><b>y</b></p>")}, u"x &lt;p&gt;y&lt;/p&gt; x <p>y</p>"), 'filter-removetags02': ('{% autoescape off %}{{ a|removetags:"a b" }} {{ b|removetags:"a b" }}{% endautoescape %}', {"a": "<a>x</a> <p><b>y</b></p>", "b": mark_safe("<a>x</a> <p><b>y</b></p>")}, u"x <p>y</p> x <p>y</p>"), 'filter-striptags01': ('{{ a|striptags }} {{ b|striptags }}', {"a": "<a>x</a> <p><b>y</b></p>", "b": mark_safe("<a>x</a> <p><b>y</b></p>")}, "x y x y"), 'filter-striptags02': ('{% autoescape off %}{{ a|striptags }} {{ b|striptags }}{% endautoescape %}', {"a": "<a>x</a> <p><b>y</b></p>", "b": mark_safe("<a>x</a> <p><b>y</b></p>")}, "x y x y"), 'filter-first01': ('{{ a|first }} {{ b|first }}', {"a": ["a&b", "x"], "b": [mark_safe("a&b"), "x"]}, "a&amp;b a&b"), 'filter-first02': ('{% autoescape off %}{{ a|first }} {{ b|first }}{% endautoescape %}', {"a": ["a&b", "x"], "b": [mark_safe("a&b"), "x"]}, "a&b a&b"), 'filter-last01': ('{{ a|last }} {{ b|last }}', {"a": ["x", "a&b"], "b": ["x", mark_safe("a&b")]}, "a&amp;b a&b"), 'filter-last02': ('{% autoescape off %}{{ a|last }} {{ b|last }}{% endautoescape %}', {"a": ["x", "a&b"], "b": ["x", mark_safe("a&b")]}, "a&b a&b"), 'filter-random01': ('{{ a|random }} {{ b|random }}', {"a": ["a&b", "a&b"], "b": [mark_safe("a&b"), mark_safe("a&b")]}, "a&amp;b a&b"), 'filter-random02': ('{% autoescape off %}{{ a|random }} {{ b|random }}{% endautoescape %}', {"a": ["a&b", "a&b"], "b": [mark_safe("a&b"), mark_safe("a&b")]}, "a&b a&b"), 'filter-slice01': ('{{ a|slice:"1:3" }} {{ b|slice:"1:3" }}', {"a": "a&b", "b": mark_safe("a&b")}, "&amp;b &b"), 'filter-slice02': ('{% autoescape off %}{{ a|slice:"1:3" }} {{ b|slice:"1:3" }}{% endautoescape %}', {"a": "a&b", "b": mark_safe("a&b")}, "&b &b"), 'filter-unordered_list01': ('{{ a|unordered_list }}', {"a": ["x>", [["<y", []]]]}, "\t<li>x&gt;\n\t<ul>\n\t\t<li>&lt;y</li>\n\t</ul>\n\t</li>"), 'filter-unordered_list02': ('{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}', {"a": ["x>", [["<y", []]]]}, "\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>"), 'filter-unordered_list03': ('{{ a|unordered_list }}', {"a": ["x>", [[mark_safe("<y"), []]]]}, "\t<li>x&gt;\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>"), 'filter-unordered_list04': ('{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}', {"a": ["x>", [[mark_safe("<y"), []]]]}, "\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>"), 'filter-unordered_list05': ('{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}', {"a": ["x>", [["<y", []]]]}, "\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>"), # Literal string arguments to the default filter are always treated as # safe strings, regardless of the auto-escaping state. # # Note: we have to use {"a": ""} here, otherwise the invalid template # variable string interferes with the test result. 'filter-default01': ('{{ a|default:"x<" }}', {"a": ""}, "x<"), 'filter-default02': ('{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}', {"a": ""}, "x<"), 'filter-default03': ('{{ a|default:"x<" }}', {"a": mark_safe("x>")}, "x>"), 'filter-default04': ('{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}', {"a": mark_safe("x>")}, "x>"), 'filter-default_if_none01': ('{{ a|default:"x<" }}', {"a": None}, "x<"), 'filter-default_if_none02': ('{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}', {"a": None}, "x<"), 'filter-phone2numeric01': ('{{ a|phone2numeric }} {{ b|phone2numeric }}', {"a": "<1-800-call-me>", "b": mark_safe("<1-800-call-me>") }, "&lt;1-800-2255-63&gt; <1-800-2255-63>"), 'filter-phone2numeric02': ('{% autoescape off %}{{ a|phone2numeric }} {{ b|phone2numeric }}{% endautoescape %}', {"a": "<1-800-call-me>", "b": mark_safe("<1-800-call-me>") }, "<1-800-2255-63> <1-800-2255-63>"), 'filter-phone2numeric03': ('{{ a|phone2numeric }}', {"a": "How razorback-jumping frogs can level six piqued gymnasts!"}, "469 729672225-5867464 37647 226 53835 749 747833 49662787!"), # Ensure iriencode keeps safe strings: 'filter-iriencode01': ('{{ url|iriencode }}', {'url': '?test=1&me=2'}, '?test=1&amp;me=2'), 'filter-iriencode02': ('{% autoescape off %}{{ url|iriencode }}{% endautoescape %}', {'url': '?test=1&me=2'}, '?test=1&me=2'), 'filter-iriencode03': ('{{ url|iriencode }}', {'url': mark_safe('?test=1&me=2')}, '?test=1&me=2'), 'filter-iriencode04': ('{% autoescape off %}{{ url|iriencode }}{% endautoescape %}', {'url': mark_safe('?test=1&me=2')}, '?test=1&me=2'), # urlencode 'filter-urlencode01': ('{{ url|urlencode }}', {'url': '/test&"/me?/'}, '/test%26%22/me%3F/'), 'filter-urlencode02': ('/test/{{ urlbit|urlencode:"" }}/', {'urlbit': 'escape/slash'}, '/test/escape%2Fslash/'), # Chaining a bunch of safeness-preserving filters should not alter # the safe status either way. 'chaining01': ('{{ a|capfirst|center:"7" }}.{{ b|capfirst|center:"7" }}', {"a": "a < b", "b": mark_safe("a < b")}, " A &lt; b . A < b "), 'chaining02': ('{% autoescape off %}{{ a|capfirst|center:"7" }}.{{ b|capfirst|center:"7" }}{% endautoescape %}', {"a": "a < b", "b": mark_safe("a < b")}, " A < b . A < b "), # Using a filter that forces a string back to unsafe: 'chaining03': ('{{ a|cut:"b"|capfirst }}.{{ b|cut:"b"|capfirst }}', {"a": "a < b", "b": mark_safe("a < b")}, "A &lt; .A < "), 'chaining04': ('{% autoescape off %}{{ a|cut:"b"|capfirst }}.{{ b|cut:"b"|capfirst }}{% endautoescape %}', {"a": "a < b", "b": mark_safe("a < b")}, "A < .A < "), # Using a filter that forces safeness does not lead to double-escaping 'chaining05': ('{{ a|escape|capfirst }}', {"a": "a < b"}, "A &lt; b"), 'chaining06': ('{% autoescape off %}{{ a|escape|capfirst }}{% endautoescape %}', {"a": "a < b"}, "A &lt; b"), # Force to safe, then back (also showing why using force_escape too # early in a chain can lead to unexpected results). 'chaining07': ('{{ a|force_escape|cut:";" }}', {"a": "a < b"}, "a &amp;lt b"), 'chaining08': ('{% autoescape off %}{{ a|force_escape|cut:";" }}{% endautoescape %}', {"a": "a < b"}, "a &lt b"), 'chaining09': ('{{ a|cut:";"|force_escape }}', {"a": "a < b"}, "a &lt; b"), 'chaining10': ('{% autoescape off %}{{ a|cut:";"|force_escape }}{% endautoescape %}', {"a": "a < b"}, "a &lt; b"), 'chaining11': ('{{ a|cut:"b"|safe }}', {"a": "a < b"}, "a < "), 'chaining12': ('{% autoescape off %}{{ a|cut:"b"|safe }}{% endautoescape %}', {"a": "a < b"}, "a < "), 'chaining13': ('{{ a|safe|force_escape }}', {"a": "a < b"}, "a &lt; b"), 'chaining14': ('{% autoescape off %}{{ a|safe|force_escape }}{% endautoescape %}', {"a": "a < b"}, "a &lt; b"), # Filters decorated with stringfilter still respect is_safe. 'autoescape-stringfilter01': (r'{{ unsafe|capfirst }}', {'unsafe': UnsafeClass()}, 'You &amp; me'), 'autoescape-stringfilter02': (r'{% autoescape off %}{{ unsafe|capfirst }}{% endautoescape %}', {'unsafe': UnsafeClass()}, 'You & me'), 'autoescape-stringfilter03': (r'{{ safe|capfirst }}', {'safe': SafeClass()}, 'You &gt; me'), 'autoescape-stringfilter04': (r'{% autoescape off %}{{ safe|capfirst }}{% endautoescape %}', {'safe': SafeClass()}, 'You &gt; me'), 'escapejs01': (r'{{ a|escapejs }}', {'a': 'testing\r\njavascript \'string" <b>escaping</b>'}, 'testing\\u000D\\u000Ajavascript \\u0027string\\u0022 \\u003Cb\\u003Eescaping\\u003C/b\\u003E'), 'escapejs02': (r'{% autoescape off %}{{ a|escapejs }}{% endautoescape %}', {'a': 'testing\r\njavascript \'string" <b>escaping</b>'}, 'testing\\u000D\\u000Ajavascript \\u0027string\\u0022 \\u003Cb\\u003Eescaping\\u003C/b\\u003E'), # length filter. 'length01': ('{{ list|length }}', {'list': ['4', None, True, {}]}, '4'), 'length02': ('{{ list|length }}', {'list': []}, '0'), 'length03': ('{{ string|length }}', {'string': ''}, '0'), 'length04': ('{{ string|length }}', {'string': 'django'}, '6'), # Invalid uses that should fail silently. 'length05': ('{{ int|length }}', {'int': 7}, ''), 'length06': ('{{ None|length }}', {'None': None}, ''), # length_is filter. 'length_is01': ('{% if some_list|length_is:"4" %}Four{% endif %}', {'some_list': ['4', None, True, {}]}, 'Four'), 'length_is02': ('{% if some_list|length_is:"4" %}Four{% else %}Not Four{% endif %}', {'some_list': ['4', None, True, {}, 17]}, 'Not Four'), 'length_is03': ('{% if mystring|length_is:"4" %}Four{% endif %}', {'mystring': 'word'}, 'Four'), 'length_is04': ('{% if mystring|length_is:"4" %}Four{% else %}Not Four{% endif %}', {'mystring': 'Python'}, 'Not Four'), 'length_is05': ('{% if mystring|length_is:"4" %}Four{% else %}Not Four{% endif %}', {'mystring': ''}, 'Not Four'), 'length_is06': ('{% with var|length as my_length %}{{ my_length }}{% endwith %}', {'var': 'django'}, '6'), # Boolean return value from length_is should not be coerced to a string 'length_is07': (r'{% if "X"|length_is:0 %}Length is 0{% else %}Length not 0{% endif %}', {}, 'Length not 0'), 'length_is08': (r'{% if "X"|length_is:1 %}Length is 1{% else %}Length not 1{% endif %}', {}, 'Length is 1'), # Invalid uses that should fail silently. 'length_is09': ('{{ var|length_is:"fish" }}', {'var': 'django'}, ''), 'length_is10': ('{{ int|length_is:"1" }}', {'int': 7}, ''), 'length_is11': ('{{ none|length_is:"1" }}', {'none': None}, ''), 'join01': (r'{{ a|join:", " }}', {'a': ['alpha', 'beta & me']}, 'alpha, beta &amp; me'), 'join02': (r'{% autoescape off %}{{ a|join:", " }}{% endautoescape %}', {'a': ['alpha', 'beta & me']}, 'alpha, beta & me'), 'join03': (r'{{ a|join:" &amp; " }}', {'a': ['alpha', 'beta & me']}, 'alpha &amp; beta &amp; me'), 'join04': (r'{% autoescape off %}{{ a|join:" &amp; " }}{% endautoescape %}', {'a': ['alpha', 'beta & me']}, 'alpha &amp; beta & me'), # Test that joining with unsafe joiners don't result in unsafe strings (#11377) 'join05': (r'{{ a|join:var }}', {'a': ['alpha', 'beta & me'], 'var': ' & '}, 'alpha &amp; beta &amp; me'), 'join06': (r'{{ a|join:var }}', {'a': ['alpha', 'beta & me'], 'var': mark_safe(' & ')}, 'alpha & beta &amp; me'), 'join07': (r'{{ a|join:var|lower }}', {'a': ['Alpha', 'Beta & me'], 'var': ' & ' }, 'alpha &amp; beta &amp; me'), 'join08': (r'{{ a|join:var|lower }}', {'a': ['Alpha', 'Beta & me'], 'var': mark_safe(' & ')}, 'alpha & beta &amp; me'), 'date01': (r'{{ d|date:"m" }}', {'d': datetime(2008, 1, 1)}, '01'), 'date02': (r'{{ d|date }}', {'d': datetime(2008, 1, 1)}, 'Jan. 1, 2008'), #Ticket 9520: Make sure |date doesn't blow up on non-dates 'date03': (r'{{ d|date:"m" }}', {'d': 'fail_string'}, ''), # ISO date formats 'date04': (r'{{ d|date:"o" }}', {'d': datetime(2008, 12, 29)}, '2009'), 'date05': (r'{{ d|date:"o" }}', {'d': datetime(2010, 1, 3)}, '2009'), # Timezone name 'date06': (r'{{ d|date:"e" }}', {'d': datetime(2009, 3, 12, tzinfo=FixedOffset(30))}, '+0030'), 'date07': (r'{{ d|date:"e" }}', {'d': datetime(2009, 3, 12)}, ''), # Tests for #11687 and #16676 'add01': (r'{{ i|add:"5" }}', {'i': 2000}, '2005'), 'add02': (r'{{ i|add:"napis" }}', {'i': 2000}, ''), 'add03': (r'{{ i|add:16 }}', {'i': 'not_an_int'}, ''), 'add04': (r'{{ i|add:"16" }}', {'i': 'not_an_int'}, 'not_an_int16'), 'add05': (r'{{ l1|add:l2 }}', {'l1': [1, 2], 'l2': [3, 4]}, '[1, 2, 3, 4]'), 'add06': (r'{{ t1|add:t2 }}', {'t1': (3, 4), 't2': (1, 2)}, '(3, 4, 1, 2)'), 'add07': (r'{{ d|add:t }}', {'d': date(2000, 1, 1), 't': timedelta(10)}, 'Jan. 11, 2000'), }
bsd-3-clause
Just-D/chromium-1
tools/telemetry/third_party/gsutilz/third_party/boto/tests/unit/ec2/elb/test_attribute.py
114
8314
from tests.unit import unittest from tests.compat import mock from boto.ec2.elb import ELBConnection from boto.ec2.elb import LoadBalancer from boto.ec2.elb.attributes import LbAttributes ATTRIBUTE_GET_TRUE_CZL_RESPONSE = b"""<?xml version="1.0" encoding="UTF-8"?> <DescribeLoadBalancerAttributesResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2012-06-01/"> <DescribeLoadBalancerAttributesResult> <LoadBalancerAttributes> <CrossZoneLoadBalancing> <Enabled>true</Enabled> </CrossZoneLoadBalancing> </LoadBalancerAttributes> </DescribeLoadBalancerAttributesResult> <ResponseMetadata> <RequestId>83c88b9d-12b7-11e3-8b82-87b12EXAMPLE</RequestId> </ResponseMetadata> </DescribeLoadBalancerAttributesResponse> """ ATTRIBUTE_GET_FALSE_CZL_RESPONSE = b"""<?xml version="1.0" encoding="UTF-8"?> <DescribeLoadBalancerAttributesResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2012-06-01/"> <DescribeLoadBalancerAttributesResult> <LoadBalancerAttributes> <CrossZoneLoadBalancing> <Enabled>false</Enabled> </CrossZoneLoadBalancing> </LoadBalancerAttributes> </DescribeLoadBalancerAttributesResult> <ResponseMetadata> <RequestId>83c88b9d-12b7-11e3-8b82-87b12EXAMPLE</RequestId> </ResponseMetadata> </DescribeLoadBalancerAttributesResponse> """ ATTRIBUTE_GET_CS_RESPONSE = b"""<?xml version="1.0" encoding="UTF-8"?> <DescribeLoadBalancerAttributesResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2012-06-01/"> <DescribeLoadBalancerAttributesResult> <LoadBalancerAttributes> <ConnectionSettings> <IdleTimeout>30</IdleTimeout> </ConnectionSettings> </LoadBalancerAttributes> </DescribeLoadBalancerAttributesResult> <ResponseMetadata> <RequestId>83c88b9d-12b7-11e3-8b82-87b12EXAMPLE</RequestId> </ResponseMetadata> </DescribeLoadBalancerAttributesResponse> """ ATTRIBUTE_SET_RESPONSE = b"""<?xml version="1.0" encoding="UTF-8"?> <ModifyLoadBalancerAttributesResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2012-06-01/"> <ModifyLoadBalancerAttributesResult/> <ResponseMetadata> <RequestId>83c88b9d-12b7-11e3-8b82-87b12EXAMPLE</RequestId> </ResponseMetadata> </ModifyLoadBalancerAttributesResponse> """ # make_request arguments for setting attributes. # Format: (API_COMMAND, API_PARAMS, API_PATH, API_METHOD) ATTRIBUTE_SET_CZL_TRUE_REQUEST = ( 'ModifyLoadBalancerAttributes', {'LoadBalancerAttributes.CrossZoneLoadBalancing.Enabled': 'true', 'LoadBalancerName': 'test_elb'}, mock.ANY, mock.ANY) ATTRIBUTE_SET_CZL_FALSE_REQUEST = ( 'ModifyLoadBalancerAttributes', {'LoadBalancerAttributes.CrossZoneLoadBalancing.Enabled': 'false', 'LoadBalancerName': 'test_elb'}, mock.ANY, mock.ANY) # Tests to be run on an LbAttributes # Format: # (EC2_RESPONSE_STRING, list( (string_of_attribute_to_test, value) ) ) ATTRIBUTE_TESTS = [ (ATTRIBUTE_GET_TRUE_CZL_RESPONSE, [('cross_zone_load_balancing.enabled', True)]), (ATTRIBUTE_GET_FALSE_CZL_RESPONSE, [('cross_zone_load_balancing.enabled', False)]), (ATTRIBUTE_GET_CS_RESPONSE, [('connecting_settings.idle_timeout', 30)]), ] class TestLbAttributes(unittest.TestCase): """Tests LB Attributes.""" def _setup_mock(self): """Sets up a mock elb request. Returns: response, elb connection and LoadBalancer """ mock_response = mock.Mock() mock_response.status = 200 elb = ELBConnection(aws_access_key_id='aws_access_key_id', aws_secret_access_key='aws_secret_access_key') elb.make_request = mock.Mock(return_value=mock_response) return mock_response, elb, LoadBalancer(elb, 'test_elb') def _verify_attributes(self, attributes, attr_tests): """Verifies an LbAttributes object.""" for attr, result in attr_tests: attr_result = attributes for sub_attr in attr.split('.'): attr_result = getattr(attr_result, sub_attr, None) self.assertEqual(attr_result, result) def test_get_all_lb_attributes(self): """Tests getting the LbAttributes from the elb.connection.""" mock_response, elb, _ = self._setup_mock() for response, attr_tests in ATTRIBUTE_TESTS: mock_response.read.return_value = response attributes = elb.get_all_lb_attributes('test_elb') self.assertTrue(isinstance(attributes, LbAttributes)) self._verify_attributes(attributes, attr_tests) def test_get_lb_attribute(self): """Tests getting a single attribute from elb.connection.""" mock_response, elb, _ = self._setup_mock() tests = [ ('crossZoneLoadBalancing', True, ATTRIBUTE_GET_TRUE_CZL_RESPONSE), ('crossZoneLoadBalancing', False, ATTRIBUTE_GET_FALSE_CZL_RESPONSE), ] for attr, value, response in tests: mock_response.read.return_value = response status = elb.get_lb_attribute('test_elb', attr) self.assertEqual(status, value) def test_modify_lb_attribute(self): """Tests setting the attributes from elb.connection.""" mock_response, elb, _ = self._setup_mock() tests = [ ('crossZoneLoadBalancing', True, ATTRIBUTE_SET_CZL_TRUE_REQUEST), ('crossZoneLoadBalancing', False, ATTRIBUTE_SET_CZL_FALSE_REQUEST), ] for attr, value, args in tests: mock_response.read.return_value = ATTRIBUTE_SET_RESPONSE result = elb.modify_lb_attribute('test_elb', attr, value) self.assertTrue(result) elb.make_request.assert_called_with(*args) def test_lb_get_attributes(self): """Tests the LbAttributes from the ELB object.""" mock_response, _, lb = self._setup_mock() for response, attr_tests in ATTRIBUTE_TESTS: mock_response.read.return_value = response attributes = lb.get_attributes(force=True) self.assertTrue(isinstance(attributes, LbAttributes)) self._verify_attributes(attributes, attr_tests) def test_lb_is_cross_zone_load_balancing(self): """Tests checking is_cross_zone_load_balancing.""" mock_response, _, lb = self._setup_mock() tests = [ # Format: (method, args, result, response) # Gets a true result. (lb.is_cross_zone_load_balancing, [], True, ATTRIBUTE_GET_TRUE_CZL_RESPONSE), # Returns the previous calls cached value. (lb.is_cross_zone_load_balancing, [], True, ATTRIBUTE_GET_FALSE_CZL_RESPONSE), # Gets a false result. (lb.is_cross_zone_load_balancing, [True], False, ATTRIBUTE_GET_FALSE_CZL_RESPONSE), ] for method, args, result, response in tests: mock_response.read.return_value = response self.assertEqual(method(*args), result) def test_lb_enable_cross_zone_load_balancing(self): """Tests enabling cross zone balancing from LoadBalancer.""" mock_response, elb, lb = self._setup_mock() mock_response.read.return_value = ATTRIBUTE_SET_RESPONSE self.assertTrue(lb.enable_cross_zone_load_balancing()) elb.make_request.assert_called_with(*ATTRIBUTE_SET_CZL_TRUE_REQUEST) def test_lb_disable_cross_zone_load_balancing(self): """Tests disabling cross zone balancing from LoadBalancer.""" mock_response, elb, lb = self._setup_mock() mock_response.read.return_value = ATTRIBUTE_SET_RESPONSE self.assertTrue(lb.disable_cross_zone_load_balancing()) elb.make_request.assert_called_with(*ATTRIBUTE_SET_CZL_FALSE_REQUEST) def test_lb_get_connection_settings(self): """Tests checking connectionSettings attribute""" mock_response, elb, _ = self._setup_mock() attrs = [('idle_timeout', 30), ] mock_response.read.return_value = ATTRIBUTE_GET_CS_RESPONSE attributes = elb.get_all_lb_attributes('test_elb') self.assertTrue(isinstance(attributes, LbAttributes)) for attr, value in attrs: self.assertEqual(getattr(attributes.connecting_settings, attr), value) if __name__ == '__main__': unittest.main()
bsd-3-clause
wadleo/appinventor-sources
appinventor/misc/emulator-support/aiStarter.py
77
5102
#!/usr/bin/python from bottle import run,route,app,request,response,template,default_app,Bottle,debug,abort import sys import os import subprocess import re #from flup.server.fcgi import WSGIServer #from cStringIO import StringIO #import memcache VERSION = "2.2" app = Bottle() default_app.push(app) platform = os.uname()[0] if platform == 'Linux': PLATDIR = '/usr/google/appinventor/' elif platform == 'Darwin': # MacOS PLATDIR = '/Applications/AppInventor/' else: # Need to add Windows sys.exit(1) @route('/ping/') def ping(): response.headers['Access-Control-Allow-Origin'] = '*' response.headers['Access-Control-Allow-Headers'] = 'origin, content-type' response.headers['Content-Type'] = 'application/json' return '{ "status" : "OK", "version" : "%s" }' % VERSION @route('/utest/') def utest(): response.headers['Access-Control-Allow-Origin'] = '*' response.headers['Access-Control-Allow-Headers'] = 'origin, content-type' response.headers['Content-Type'] = 'application/json' device = checkrunning(False) if device: return '{ "status" : "OK", "device" : "%s", "version" : "%s" }' % (device, VERSION) else: return '{ "status" : "NO", "version" : "%s" }' % VERSION @route('/start/') def start(): subprocess.call(PLATDIR + "commands-for-Appinventor/run-emulator ", shell=True, close_fds=True) response.headers['Access-Control-Allow-Origin'] = '*' response.headers['Access-Control-Allow-Headers'] = 'origin, content-type' return '' @route('/emulatorreset/') def emulatorreset(): subprocess.call(PLATDIR + "commands-for-Appinventor/reset-emulator ", shell=True, close_fds=True) response.headers['Access-Control-Allow-Origin'] = '*' response.headers['Access-Control-Allow-Headers'] = 'origin, content-type' return '' @route('/echeck/') def echeck(): response.headers['Access-Control-Allow-Origin'] = '*' response.headers['Access-Control-Allow-Headers'] = 'origin, content-type' response.headers['Content-Type'] = 'application/json' device = checkrunning(True) if device: return '{ "status" : "OK", "device" : "%s", "version" : "%s"}' % (device, VERSION) else: return '{ "status" : "NO", "version" : "%s" }' % VERSION @route('/ucheck/') def ucheck(): response.headers['Access-Control-Allow-Origin'] = '*' response.headers['Access-Control-Allow-Headers'] = 'origin, content-type' response.headers['Content-Type'] = 'application/json' device = checkrunning(False) if device: return '{ "status" : "OK", "device" : "%s", "version" : "%s"}' % (device, VERSION) else: return '{ "status" : "NO", "version" : "%s" }' % VERSION @route('/reset/') def reset(): response.headers['Access-Control-Allow-Origin'] = '*' response.headers['Access-Control-Allow-Headers'] = 'origin, content-type' response.headers['Content-Type'] = 'application/json' killadb() return '{ "status" : "OK", "version" : "%s" }' % VERSION @route('/replstart/:device') def replstart(device=None): print "Device = %s" % device subprocess.check_output((PLATDIR + "commands-for-Appinventor/adb -s %s forward tcp:8001 tcp:8001") % device, shell=True, close_fds=True) if re.match('.*emulat.*', device): # Only fake the menu key for the emulator subprocess.check_output((PLATDIR + "commands-for-Appinventor/adb -s %s shell input keyevent 82") % device, shell=True, close_fds=True) subprocess.check_output((PLATDIR + "commands-for-Appinventor/adb -s %s shell am start -a android.intent.action.VIEW -n edu.mit.appinventor.aicompanion3/.Screen1 --ez rundirect true") % device, shell=True, close_fds=True) response.headers['Access-Control-Allow-Origin'] = '*' response.headers['Access-Control-Allow-Headers'] = 'origin, content-type' return '' def checkrunning(emulator): result = subprocess.check_output(PLATDIR + 'commands-for-Appinventor/adb devices', shell=True, close_fds=True) lines = result.split('\n') for line in lines[1:]: if emulator: m = re.search('^(.*emulator-[1-9]+)\t+device.*', line) else: if re.search('^(.*emulator-[1-9]+)\t+device.*', line): # We are an emulator continue # Skip it m = re.search('^([A-z0-9.:]+.*?)\t+device.*', line) if m: break if m: return m.group(1) return False def killadb(): """Time to nuke adb!""" subprocess.check_output(PLATDIR + "commands-for-Appinventor/adb kill-server", shell=True, close_fds=True) sys.stdout.write("Killed adb\n") subprocess.check_output(PLATDIR + "commands-for-Appinventor/kill-emulator", shell=True, close_fds=True) sys.stdout.write("Killed emulator\n") def shutdown(): try: # Be quiet... killadb() except: pass if __name__ == '__main__': import atexit atexit.register(shutdown) run(host='127.0.0.1', port=8004) ##WSGIServer(app).run()
apache-2.0
Odingod/mne-python
mne/preprocessing/stim.py
7
4735
# Authors: Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de> # # License: BSD (3-clause) import numpy as np from ..evoked import Evoked from ..epochs import Epochs from ..io import Raw from ..event import find_events from ..io.pick import pick_channels def _get_window(start, end): """Return window which has length as much as parameter start - end""" from scipy.signal import hann window = 1 - np.r_[hann(4)[:2], np.ones(np.abs(end - start) - 4), hann(4)[-2:]].T return window def _check_preload(inst): """Check if inst.preload is False. If it is False, raising error""" if inst.preload is False: raise RuntimeError('Modifying data of Instance is only supported ' 'when preloading is used. Use preload=True ' '(or string) in the constructor.') def _fix_artifact(data, window, picks, first_samp, last_samp, mode): """Modify original data by using parameter data""" from scipy.interpolate import interp1d if mode == 'linear': x = np.array([first_samp, last_samp]) f = interp1d(x, data[:, (first_samp, last_samp)]) xnew = np.arange(first_samp, last_samp) interp_data = f(xnew) data[picks, first_samp:last_samp] = interp_data if mode == 'window': data[picks, first_samp:last_samp] = \ data[picks, first_samp:last_samp] * window[np.newaxis, :] def fix_stim_artifact(inst, events=None, event_id=None, tmin=0., tmax=0.01, mode='linear', stim_channel=None, copy=False): """Eliminate stimulation's artifacts from instance Parameters ---------- inst : instance of Raw or Epochs or Evoked The data. events : array, shape (n_events, 3) The list of events. Required only when inst is Raw. event_id : int The id of the events generating the stimulation artifacts. If None, read all events. Required only when inst is Raw. tmin : float Start time of the interpolation window in seconds. tmax : float End time of the interpolation window in seconds. mode : 'linear' | 'window' Way to fill the artifacted time interval. 'linear' does linear interpolation 'window' applies a (1 - hanning) window. stim_channel : str | None Stim channel to use. copy : bool If True, data will be copied. Else data may be modified in place. Returns ------- inst : instance of Raw or Evoked or Epochs Instance with modified data """ if mode not in ('linear', 'window'): raise ValueError("mode has to be 'linear' or 'window' (got %s)" % mode) if copy: inst = inst.copy() s_start = int(np.ceil(inst.info['sfreq'] * tmin)) s_end = int(np.ceil(inst.info['sfreq'] * tmax)) if (mode == "window") and (s_end - s_start) < 4: raise ValueError('Time range is too short. Use a larger interval ' 'or set mode to "linear".') window = None if mode == 'window': window = _get_window(s_start, s_end) ch_names = inst.info['ch_names'] picks = pick_channels(ch_names, ch_names) if isinstance(inst, Raw): _check_preload(inst) if events is None: events = find_events(inst, stim_channel=stim_channel) if len(events) == 0: raise ValueError('No events are found') if event_id is None: events_sel = np.arange(len(events)) else: events_sel = (events[:, 2] == event_id) event_start = events[events_sel, 0] data, _ = inst[:, :] for event_idx in event_start: first_samp = int(event_idx) - inst.first_samp + s_start last_samp = int(event_idx) - inst.first_samp + s_end _fix_artifact(data, window, picks, first_samp, last_samp, mode) elif isinstance(inst, Epochs): _check_preload(inst) if inst.reject is not None: raise RuntimeError('Reject is already applied. Use reject=None ' 'in the constructor.') e_start = int(np.ceil(inst.info['sfreq'] * inst.tmin)) first_samp = s_start - e_start last_samp = s_end - e_start data = inst._data for epoch in data: _fix_artifact(epoch, window, picks, first_samp, last_samp, mode) elif isinstance(inst, Evoked): first_samp = s_start - inst.first last_samp = s_end - inst.first data = inst.data _fix_artifact(data, window, picks, first_samp, last_samp, mode) else: raise TypeError('Not a Raw or Epochs or Evoked (got %s).' % type(inst)) return inst
bsd-3-clause