repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
ericoporto/Chove-Agora
lib/requests_oauthlib/oauth1_session.py
5
16103
from __future__ import unicode_literals try: from urlparse import urlparse except ImportError: from urllib.parse import urlparse import logging from oauthlib.common import add_params_to_uri from oauthlib.common import urldecode as _urldecode from oauthlib.oauth1 import ( SIGNATURE_HMAC, SIGNATURE_RSA, SIGNATURE_TYPE_AUTH_HEADER ) import requests from . import OAuth1 import sys if sys.version > "3": unicode = str log = logging.getLogger(__name__) def urldecode(body): """Parse query or json to python dictionary""" try: return _urldecode(body) except: import json return json.loads(body) class TokenRequestDenied(ValueError): def __init__(self, message, status_code): super(TokenRequestDenied, self).__init__(message) self.status_code = status_code class TokenMissing(ValueError): def __init__(self, message, response): super(TokenRequestDenied, self).__init__(message) self.response = response class VerifierMissing(ValueError): pass class OAuth1Session(requests.Session): """Request signing and convenience methods for the oauth dance. What is the difference between OAuth1Session and OAuth1? OAuth1Session actually uses OAuth1 internally and it's purpose is to assist in the OAuth workflow through convenience methods to prepare authorization URLs and parse the various token and redirection responses. It also provide rudimentary validation of responses. An example of the OAuth workflow using a basic CLI app and Twitter. >>> # Credentials obtained during the registration. >>> client_key = 'client key' >>> client_secret = 'secret' >>> callback_uri = 'https://127.0.0.1/callback' >>> >>> # Endpoints found in the OAuth provider API documentation >>> request_token_url = 'https://api.twitter.com/oauth/request_token' >>> authorization_url = 'https://api.twitter.com/oauth/authorize' >>> access_token_url = 'https://api.twitter.com/oauth/access_token' >>> >>> oauth_session = OAuth1Session(client_key,client_secret=client_secret, callback_uri=callback_uri) >>> >>> # First step, fetch the request token. >>> oauth_session.fetch_request_token(request_token_url) { 'oauth_token': 'kjerht2309u', 'oauth_token_secret': 'lsdajfh923874', } >>> >>> # Second step. Follow this link and authorize >>> oauth_session.authorization_url(authorization_url) 'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&oauth_callback=https%3A%2F%2F127.0.0.1%2Fcallback' >>> >>> # Third step. Fetch the access token >>> redirect_response = raw_input('Paste the full redirect URL here.') >>> oauth_session.parse_authorization_response(redirect_response) { 'oauth_token: 'kjerht2309u', 'oauth_token_secret: 'lsdajfh923874', 'oauth_verifier: 'w34o8967345', } >>> oauth_session.fetch_access_token(access_token_url) { 'oauth_token': 'sdf0o9823sjdfsdf', 'oauth_token_secret': '2kjshdfp92i34asdasd', } >>> # Done. You can now make OAuth requests. >>> status_url = 'http://api.twitter.com/1/statuses/update.json' >>> new_status = {'status': 'hello world!'} >>> oauth_session.post(status_url, data=new_status) <Response [200]> """ def __init__(self, client_key, client_secret=None, resource_owner_key=None, resource_owner_secret=None, callback_uri=None, signature_method=SIGNATURE_HMAC, signature_type=SIGNATURE_TYPE_AUTH_HEADER, rsa_key=None, verifier=None, client_class=None, force_include_body=False, **kwargs): """Construct the OAuth 1 session. :param client_key: A client specific identifier. :param client_secret: A client specific secret used to create HMAC and plaintext signatures. :param resource_owner_key: A resource owner key, also referred to as request token or access token depending on when in the workflow it is used. :param resource_owner_secret: A resource owner secret obtained with either a request or access token. Often referred to as token secret. :param callback_uri: The URL the user is redirect back to after authorization. :param signature_method: Signature methods determine how the OAuth signature is created. The three options are oauthlib.oauth1.SIGNATURE_HMAC (default), oauthlib.oauth1.SIGNATURE_RSA and oauthlib.oauth1.SIGNATURE_PLAIN. :param signature_type: Signature type decides where the OAuth parameters are added. Either in the Authorization header (default) or to the URL query parameters or the request body. Defined as oauthlib.oauth1.SIGNATURE_TYPE_AUTH_HEADER, oauthlib.oauth1.SIGNATURE_TYPE_QUERY and oauthlib.oauth1.SIGNATURE_TYPE_BODY respectively. :param rsa_key: The private RSA key as a string. Can only be used with signature_method=oauthlib.oauth1.SIGNATURE_RSA. :param verifier: A verifier string to prove authorization was granted. :param client_class: A subclass of `oauthlib.oauth1.Client` to use with `requests_oauthlib.OAuth1` instead of the default :param force_include_body: Always include the request body in the signature creation. :param **kwargs: Additional keyword arguments passed to `OAuth1` """ super(OAuth1Session, self).__init__() self._client = OAuth1(client_key, client_secret=client_secret, resource_owner_key=resource_owner_key, resource_owner_secret=resource_owner_secret, callback_uri=callback_uri, signature_method=signature_method, signature_type=signature_type, rsa_key=rsa_key, verifier=verifier, client_class=client_class, force_include_body=force_include_body, **kwargs) self.auth = self._client @property def authorized(self): """Boolean that indicates whether this session has an OAuth token or not. If `self.authorized` is True, you can reasonably expect OAuth-protected requests to the resource to succeed. If `self.authorized` is False, you need the user to go through the OAuth authentication dance before OAuth-protected requests to the resource will succeed. """ if self._client.signature_method == SIGNATURE_RSA: # RSA only uses resource_owner_key return bool(self._client.resource_owner_key) else: # other methods of authentication use all three pieces return ( bool(self._client.client_secret) and bool(self._client.resource_owner_key) and bool(self._client.resource_owner_secret) ) def authorization_url(self, url, request_token=None, **kwargs): """Create an authorization URL by appending request_token and optional kwargs to url. This is the second step in the OAuth 1 workflow. The user should be redirected to this authorization URL, grant access to you, and then be redirected back to you. The redirection back can either be specified during client registration or by supplying a callback URI per request. :param url: The authorization endpoint URL. :param request_token: The previously obtained request token. :param kwargs: Optional parameters to append to the URL. :returns: The authorization URL with new parameters embedded. An example using a registered default callback URI. >>> request_token_url = 'https://api.twitter.com/oauth/request_token' >>> authorization_url = 'https://api.twitter.com/oauth/authorize' >>> oauth_session = OAuth1Session('client-key', client_secret='secret') >>> oauth_session.fetch_request_token(request_token_url) { 'oauth_token': 'sdf0o9823sjdfsdf', 'oauth_token_secret': '2kjshdfp92i34asdasd', } >>> oauth_session.authorization_url(authorization_url) 'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf' >>> oauth_session.authorization_url(authorization_url, foo='bar') 'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&foo=bar' An example using an explicit callback URI. >>> request_token_url = 'https://api.twitter.com/oauth/request_token' >>> authorization_url = 'https://api.twitter.com/oauth/authorize' >>> oauth_session = OAuth1Session('client-key', client_secret='secret', callback_uri='https://127.0.0.1/callback') >>> oauth_session.fetch_request_token(request_token_url) { 'oauth_token': 'sdf0o9823sjdfsdf', 'oauth_token_secret': '2kjshdfp92i34asdasd', } >>> oauth_session.authorization_url(authorization_url) 'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&oauth_callback=https%3A%2F%2F127.0.0.1%2Fcallback' """ kwargs['oauth_token'] = request_token or self._client.client.resource_owner_key log.debug('Adding parameters %s to url %s', kwargs, url) return add_params_to_uri(url, kwargs.items()) def fetch_request_token(self, url, realm=None): """Fetch a request token. This is the first step in the OAuth 1 workflow. A request token is obtained by making a signed post request to url. The token is then parsed from the application/x-www-form-urlencoded response and ready to be used to construct an authorization url. :param url: The request token endpoint URL. :param realm: A list of realms to request access to. :returns: The response in dict format. Note that a previously set callback_uri will be reset for your convenience, or else signature creation will be incorrect on consecutive requests. >>> request_token_url = 'https://api.twitter.com/oauth/request_token' >>> oauth_session = OAuth1Session('client-key', client_secret='secret') >>> oauth_session.fetch_request_token(request_token_url) { 'oauth_token': 'sdf0o9823sjdfsdf', 'oauth_token_secret': '2kjshdfp92i34asdasd', } """ self._client.client.realm = ' '.join(realm) if realm else None token = self._fetch_token(url) log.debug('Resetting callback_uri and realm (not needed in next phase).') self._client.client.callback_uri = None self._client.client.realm = None return token def fetch_access_token(self, url, verifier=None): """Fetch an access token. This is the final step in the OAuth 1 workflow. An access token is obtained using all previously obtained credentials, including the verifier from the authorization step. Note that a previously set verifier will be reset for your convenience, or else signature creation will be incorrect on consecutive requests. >>> access_token_url = 'https://api.twitter.com/oauth/access_token' >>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345' >>> oauth_session = OAuth1Session('client-key', client_secret='secret') >>> oauth_session.parse_authorization_response(redirect_response) { 'oauth_token: 'kjerht2309u', 'oauth_token_secret: 'lsdajfh923874', 'oauth_verifier: 'w34o8967345', } >>> oauth_session.fetch_access_token(access_token_url) { 'oauth_token': 'sdf0o9823sjdfsdf', 'oauth_token_secret': '2kjshdfp92i34asdasd', } """ if verifier: self._client.client.verifier = verifier if not getattr(self._client.client, 'verifier', None): raise VerifierMissing('No client verifier has been set.') token = self._fetch_token(url) log.debug('Resetting verifier attribute, should not be used anymore.') self._client.client.verifier = None return token def parse_authorization_response(self, url): """Extract parameters from the post authorization redirect response URL. :param url: The full URL that resulted from the user being redirected back from the OAuth provider to you, the client. :returns: A dict of parameters extracted from the URL. >>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345' >>> oauth_session = OAuth1Session('client-key', client_secret='secret') >>> oauth_session.parse_authorization_response(redirect_response) { 'oauth_token: 'kjerht2309u', 'oauth_token_secret: 'lsdajfh923874', 'oauth_verifier: 'w34o8967345', } """ log.debug('Parsing token from query part of url %s', url) token = dict(urldecode(urlparse(url).query)) log.debug('Updating internal client token attribute.') self._populate_attributes(token) return token def _populate_attributes(self, token): if 'oauth_token' in token: self._client.client.resource_owner_key = token['oauth_token'] else: raise TokenMissing( 'Response does not contain a token: {resp}'.format(resp=token), token, ) if 'oauth_token_secret' in token: self._client.client.resource_owner_secret = ( token['oauth_token_secret']) if 'oauth_verifier' in token: self._client.client.verifier = token['oauth_verifier'] def _fetch_token(self, url): log.debug('Fetching token from %s using client %s', url, self._client.client) r = self.post(url) if r.status_code >= 400: error = "Token request failed with code %s, response was '%s'." raise TokenRequestDenied(error % (r.status_code, r.text), r.status_code) log.debug('Decoding token from response "%s"', r.text) try: token = dict(urldecode(r.text)) except ValueError as e: error = ("Unable to decode token from token response. " "This is commonly caused by an unsuccessful request where" " a non urlencoded error message is returned. " "The decoding error was %s""" % e) raise ValueError(error) log.debug('Obtained token %s', token) log.debug('Updating internal client attributes from token data.') self._populate_attributes(token) return token def rebuild_auth(self, prepared_request, response): """ When being redirected we should always strip Authorization header, since nonce may not be reused as per OAuth spec. """ if 'Authorization' in prepared_request.headers: # If we get redirected to a new host, we should strip out # any authentication headers. prepared_request.headers.pop('Authorization', True) prepared_request.prepare_auth(self.auth) return
apache-2.0
heke123/chromium-crosswalk
tools/perf/profile_creators/cookie_profile_extender.py
14
3339
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import multiprocessing import os try: import sqlite3 # Not present on ChromeOS DUT. except ImportError: pass import page_sets from profile_creators import fast_navigation_profile_extender class CookieProfileExtender( fast_navigation_profile_extender.FastNavigationProfileExtender): """This extender fills in the cookie database. By default, Chrome purges the cookie DB down to 3300 cookies. However, it won't purge cookies accessed in the last month. This means the extender needs to be careful not to create an artificially high number of cookies. """ _COOKIE_DB_EXPECTED_SIZE = 3300 def __init__(self, finder_options): # The rate limiting factors are fetching network resources and executing # javascript. There's not much to be done about the former, and having one # tab per logical core appears close to optimum for the latter. maximum_batch_size = multiprocessing.cpu_count() # Web page replay cannot handle too many requests over a duration of 4 # minutes (maximum segment lifetime), as it may exhaust the socket pool. # Artificially limit the rate to no more than 5 simultaneous tab loads. if not finder_options.use_live_sites: maximum_batch_size = min(5, maximum_batch_size) super(CookieProfileExtender, self).__init__( finder_options, maximum_batch_size) # A list of urls that have not yet been navigated to. This list will shrink # over time. Each navigation will add a diminishing number of new cookies, # since there's a high probability that the cookie is already present. self._page_set = page_sets.ProfileSafeUrlsPageSet() urls = [] for story in self._page_set.stories: urls.append(story.url) self._navigation_urls = urls def GetUrlIterator(self): """Superclass override.""" return iter(self._navigation_urls) def ShouldExitAfterBatchNavigation(self): """Superclass override.""" return self._IsCookieDBFull() def WebPageReplayArchivePath(self): return self._page_set.WprFilePathForStory( self._page_set.stories[0]) def FetchWebPageReplayArchives(self): """Superclass override.""" self._page_set.wpr_archive_info.DownloadArchivesIfNeeded() @staticmethod def _CookieCountInDB(db_path): """The number of cookies in the db at |db_path|.""" connection = sqlite3.connect(db_path) try: cursor = connection.cursor() cursor.execute("select count(*) from cookies") cookie_count = cursor.fetchone()[0] except: raise finally: connection.close() return cookie_count def _IsCookieDBFull(self): """Chrome does not immediately flush cookies to its database. It's possible that this method will return a false negative.""" cookie_db_path = os.path.join(self.profile_path, "Default", "Cookies") try: cookie_count = CookieProfileExtender._CookieCountInDB(cookie_db_path) except sqlite3.OperationalError: # There will occasionally be contention for the SQLite database. This # shouldn't happen often, so ignore the errors. return False return cookie_count > CookieProfileExtender._COOKIE_DB_EXPECTED_SIZE
bsd-3-clause
RafaelTorrealba/odoo
addons/report_webkit/convert.py
322
2581
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com) # All Right Reserved # # Author : Nicolas Bessi (Camptocamp) # # WARNING: This program as such is intended to be used by professional # programmers who take the whole responsability of assessing all potential # consequences resulting from its eventual inadequacies and bugs # End users who are looking for a ready-to-use solution with commercial # garantees and support are strongly adviced to contract a Free Software # Service Company # # This program is Free Software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # ############################################################################## from openerp.tools import convert original_xml_import = convert.xml_import class WebkitXMLImport(original_xml_import): # Override of xml import in order to add webkit_header tag in report tag. # As discussed with the R&D Team, the current XML processing API does # not offer enough flexibity to do it in a cleaner way. # The solution is not meant to be long term solution, but at least # allows chaining of several overrides of the _tag_report method, # and does not require a copy/paste of the original code. def _tag_report(self, cr, rec, data_node=None, mode=None): report_id = super(WebkitXMLImport, self)._tag_report(cr, rec, data_node) if rec.get('report_type') == 'webkit': header = rec.get('webkit_header') if header: if header in ('False', '0', 'None'): webkit_header_id = False else: webkit_header_id = self.id_get(cr, header) self.pool.get('ir.actions.report.xml').write(cr, self.uid, report_id, {'webkit_header': webkit_header_id}) return report_id convert.xml_import = WebkitXMLImport
agpl-3.0
felipenaselva/felipe.repository
script.module.requests/lib/requests/packages/urllib3/util/ssl_.py
148
12046
from __future__ import absolute_import import errno import warnings import hmac from binascii import hexlify, unhexlify from hashlib import md5, sha1, sha256 from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning SSLContext = None HAS_SNI = False IS_PYOPENSSL = False # Maps the length of a digest to a possible hash function producing this digest HASHFUNC_MAP = { 32: md5, 40: sha1, 64: sha256, } def _const_compare_digest_backport(a, b): """ Compare two digests of equal length in constant time. The digests must be of type str/bytes. Returns True if the digests match, and False otherwise. """ result = abs(len(a) - len(b)) for l, r in zip(bytearray(a), bytearray(b)): result |= l ^ r return result == 0 _const_compare_digest = getattr(hmac, 'compare_digest', _const_compare_digest_backport) try: # Test for SSL features import ssl from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23 from ssl import HAS_SNI # Has SNI? except ImportError: pass try: from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION except ImportError: OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000 OP_NO_COMPRESSION = 0x20000 # A secure default. # Sources for more information on TLS ciphers: # # - https://wiki.mozilla.org/Security/Server_Side_TLS # - https://www.ssllabs.com/projects/best-practices/index.html # - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/ # # The general intent is: # - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE), # - prefer ECDHE over DHE for better performance, # - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and # security, # - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common, # - disable NULL authentication, MD5 MACs and DSS for security reasons. DEFAULT_CIPHERS = ':'.join([ 'ECDH+AESGCM', 'ECDH+CHACHA20', 'DH+AESGCM', 'DH+CHACHA20', 'ECDH+AES256', 'DH+AES256', 'ECDH+AES128', 'DH+AES', 'RSA+AESGCM', 'RSA+AES', '!aNULL', '!eNULL', '!MD5', ]) try: from ssl import SSLContext # Modern SSL? except ImportError: import sys class SSLContext(object): # Platform-specific: Python 2 & 3.1 supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or (3, 2) <= sys.version_info) def __init__(self, protocol_version): self.protocol = protocol_version # Use default values from a real SSLContext self.check_hostname = False self.verify_mode = ssl.CERT_NONE self.ca_certs = None self.options = 0 self.certfile = None self.keyfile = None self.ciphers = None def load_cert_chain(self, certfile, keyfile): self.certfile = certfile self.keyfile = keyfile def load_verify_locations(self, cafile=None, capath=None): self.ca_certs = cafile if capath is not None: raise SSLError("CA directories not supported in older Pythons") def set_ciphers(self, cipher_suite): if not self.supports_set_ciphers: raise TypeError( 'Your version of Python does not support setting ' 'a custom cipher suite. Please upgrade to Python ' '2.7, 3.2, or later if you need this functionality.' ) self.ciphers = cipher_suite def wrap_socket(self, socket, server_hostname=None, server_side=False): warnings.warn( 'A true SSLContext object is not available. This prevents ' 'urllib3 from configuring SSL appropriately and may cause ' 'certain SSL connections to fail. You can upgrade to a newer ' 'version of Python to solve this. For more information, see ' 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' '#ssl-warnings', InsecurePlatformWarning ) kwargs = { 'keyfile': self.keyfile, 'certfile': self.certfile, 'ca_certs': self.ca_certs, 'cert_reqs': self.verify_mode, 'ssl_version': self.protocol, 'server_side': server_side, } if self.supports_set_ciphers: # Platform-specific: Python 2.7+ return wrap_socket(socket, ciphers=self.ciphers, **kwargs) else: # Platform-specific: Python 2.6 return wrap_socket(socket, **kwargs) def assert_fingerprint(cert, fingerprint): """ Checks if given fingerprint matches the supplied certificate. :param cert: Certificate as bytes object. :param fingerprint: Fingerprint as string of hexdigits, can be interspersed by colons. """ fingerprint = fingerprint.replace(':', '').lower() digest_length = len(fingerprint) hashfunc = HASHFUNC_MAP.get(digest_length) if not hashfunc: raise SSLError( 'Fingerprint of invalid length: {0}'.format(fingerprint)) # We need encode() here for py32; works on py2 and p33. fingerprint_bytes = unhexlify(fingerprint.encode()) cert_digest = hashfunc(cert).digest() if not _const_compare_digest(cert_digest, fingerprint_bytes): raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".' .format(fingerprint, hexlify(cert_digest))) def resolve_cert_reqs(candidate): """ Resolves the argument to a numeric constant, which can be passed to the wrap_socket function/method from the ssl module. Defaults to :data:`ssl.CERT_NONE`. If given a string it is assumed to be the name of the constant in the :mod:`ssl` module or its abbrevation. (So you can specify `REQUIRED` instead of `CERT_REQUIRED`. If it's neither `None` nor a string we assume it is already the numeric constant which can directly be passed to wrap_socket. """ if candidate is None: return CERT_NONE if isinstance(candidate, str): res = getattr(ssl, candidate, None) if res is None: res = getattr(ssl, 'CERT_' + candidate) return res return candidate def resolve_ssl_version(candidate): """ like resolve_cert_reqs """ if candidate is None: return PROTOCOL_SSLv23 if isinstance(candidate, str): res = getattr(ssl, candidate, None) if res is None: res = getattr(ssl, 'PROTOCOL_' + candidate) return res return candidate def create_urllib3_context(ssl_version=None, cert_reqs=None, options=None, ciphers=None): """All arguments have the same meaning as ``ssl_wrap_socket``. By default, this function does a lot of the same work that ``ssl.create_default_context`` does on Python 3.4+. It: - Disables SSLv2, SSLv3, and compression - Sets a restricted set of server ciphers If you wish to enable SSLv3, you can do:: from urllib3.util import ssl_ context = ssl_.create_urllib3_context() context.options &= ~ssl_.OP_NO_SSLv3 You can do the same to enable compression (substituting ``COMPRESSION`` for ``SSLv3`` in the last line above). :param ssl_version: The desired protocol version to use. This will default to PROTOCOL_SSLv23 which will negotiate the highest protocol that both the server and your installation of OpenSSL support. :param cert_reqs: Whether to require the certificate verification. This defaults to ``ssl.CERT_REQUIRED``. :param options: Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``, ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``. :param ciphers: Which cipher suites to allow the server to select. :returns: Constructed SSLContext object with specified options :rtype: SSLContext """ context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23) # Setting the default here, as we may have no ssl module on import cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs if options is None: options = 0 # SSLv2 is easily broken and is considered harmful and dangerous options |= OP_NO_SSLv2 # SSLv3 has several problems and is now dangerous options |= OP_NO_SSLv3 # Disable compression to prevent CRIME attacks for OpenSSL 1.0+ # (issue #309) options |= OP_NO_COMPRESSION context.options |= options if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6 context.set_ciphers(ciphers or DEFAULT_CIPHERS) context.verify_mode = cert_reqs if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2 # We do our own verification, including fingerprints and alternative # hostnames. So disable it here context.check_hostname = False return context def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, ca_certs=None, server_hostname=None, ssl_version=None, ciphers=None, ssl_context=None, ca_cert_dir=None): """ All arguments except for server_hostname, ssl_context, and ca_cert_dir have the same meaning as they do when using :func:`ssl.wrap_socket`. :param server_hostname: When SNI is supported, the expected hostname of the certificate :param ssl_context: A pre-made :class:`SSLContext` object. If none is provided, one will be created using :func:`create_urllib3_context`. :param ciphers: A string of ciphers we wish the client to support. This is not supported on Python 2.6 as the ssl module does not support it. :param ca_cert_dir: A directory containing CA certificates in multiple separate files, as supported by OpenSSL's -CApath flag or the capath argument to SSLContext.load_verify_locations(). """ context = ssl_context if context is None: # Note: This branch of code and all the variables in it are no longer # used by urllib3 itself. We should consider deprecating and removing # this code. context = create_urllib3_context(ssl_version, cert_reqs, ciphers=ciphers) if ca_certs or ca_cert_dir: try: context.load_verify_locations(ca_certs, ca_cert_dir) except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2 raise SSLError(e) # Py33 raises FileNotFoundError which subclasses OSError # These are not equivalent unless we check the errno attribute except OSError as e: # Platform-specific: Python 3.3 and beyond if e.errno == errno.ENOENT: raise SSLError(e) raise elif getattr(context, 'load_default_certs', None) is not None: # try to load OS default certs; works well on Windows (require Python3.4+) context.load_default_certs() if certfile: context.load_cert_chain(certfile, keyfile) if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI return context.wrap_socket(sock, server_hostname=server_hostname) warnings.warn( 'An HTTPS request has been made, but the SNI (Subject Name ' 'Indication) extension to TLS is not available on this platform. ' 'This may cause the server to present an incorrect TLS ' 'certificate, which can cause validation failures. You can upgrade to ' 'a newer version of Python to solve this. For more information, see ' 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' '#ssl-warnings', SNIMissingWarning ) return context.wrap_socket(sock)
gpl-2.0
anarchivist/pyflag
src/plugins/Flash/AdvancedCommands.py
1
13927
""" These Flash commands allow more sophisticated operations, most of which may not be needed by most users. Some operations are specifically designed for testing and have little use in practice. """ import pyflag.pyflagsh as pyflagsh import pyflag.Registry as Registry import pyflag.DB as DB import fnmatch import pyflag.FileSystem as FileSystem import pyflag.Scanner as Scanner import time, types import pyflag.pyflaglog as pyflaglog import BasicCommands import pyflag.ScannerUtils as ScannerUtils import pyflag.conf config=pyflag.conf.ConfObject() class scan_path(pyflagsh.command): """ This takes a path as an argument and runs the specified scanner on the path this might be of more use than specifying inodes for the average user since if you load two disk images, then you might have /disk1 and /disk2 and want to just run scans over one of them, which is simpler to specify using /disk1. """ def help(self): return "scan VFSPath [list of scanners]: Scans the VFS path with the scanners specified" def complete(self, text,state): if len(self.args)>2 or len(self.args)==2 and not text: scanners = [ x for x in Registry.SCANNERS.scanners if x.startswith(text) ] +\ [ x for x in Registry.SCANNERS.get_groups() if x.startswith(text) ] return scanners[state] else: dbh = DB.DBO(self.environment._CASE) dbh.execute("select substr(path,1,%r) as abbrev,path from file where path like '%s%%' group by abbrev limit %s,1",(len(text)+1,text,state)) return dbh.fetch()['path'] def wait_for_scan(self, cookie): """ Waits for scanners to complete """ pdbh = DB.DBO() pdbh.check_index('jobs','cookie') ## Often this process owns a worker as well. In that case we can wake it up: import pyflag.Farm as Farm Farm.wake_workers() ## Wait until there are no more jobs left. while 1: pdbh.execute("select count(*) as total from jobs where cookie=%r and arg1=%r", (cookie, self.environment._CASE)) row = pdbh.fetch() if row['total']==0: break time.sleep(1) def execute(self): scanners=[] if len(self.args)<2: yield self.help() return elif type(self.args[1]) == types.ListType: scanners = self.args[1] else: for i in range(1,len(self.args)): scanners.extend(fnmatch.filter(Registry.SCANNERS.scanners, self.args[i])) ## Assume that people always want recursive - I think this makes sense path = self.args[0] if not path.endswith("*"): path = path + "*" ## FIXME For massive images this should be broken up, as in the old GUI method dbh=DB.DBO(self.environment._CASE) dbh.execute("select inode.inode from inode join file on file.inode = inode.inode where file.path rlike %r", fnmatch.translate(path)) pdbh = DB.DBO() pdbh.mass_insert_start('jobs') ## This is a cookie used to identify our requests so that we ## can check they have been done later. cookie = int(time.time()) for row in dbh: inode = row['inode'] pdbh.mass_insert( command = 'Scan', arg1 = self.environment._CASE, arg2 = row['inode'], arg3 = ','.join(scanners), cookie=cookie, )# pdbh.mass_insert_commit() ## Wait for the scanners to finish: self.wait_for_scan(cookie) yield "Scanning complete" import pyflag.FlagFramework as FlagFramework class init_flag_db(pyflagsh.command): """ Creates the main flag db if needed """ def execute(self): try: dbh = DB.DBO() except: dbh = DB.DBO('mysql') dbh.execute("create database `%s`" % config.FLAGDB) dbh = DB.DBO() FlagFramework.post_event("init_default_db", None) yield "Done" class delete_iosource(pyflagsh.command): """ Deletes an iosource from the current case """ def complete(self, text, state): dbh = DB.DBO(self.environment._CASE) dbh.execute("select substr(name,1,%r) as abbrev,name from iosources where name like '%s%%' group by abbrev limit %s,1",(len(text)+1,text,state)) return dbh.fetch()['name'] def execute(self): for iosource in self.args: dbh = DB.DBO(self.environment._CASE) dbh2 = dbh.clone() dbh.delete('inode', where=DB.expand("inode like 'I%s|%%'", iosource)) dbh.execute("select * from filesystems where iosource = %r", iosource) for row in dbh: dbh2.delete('file', where=DB.expand("path like '%s%%'", iosource)) dbh.delete("iosources", where=DB.expand("name=%r", iosource)) yield "Removed IOSource %s" % iosource class scan(pyflagsh.command): """ Scan a glob of inodes with a glob of scanners """ def help(self): return "scan inode [list of scanners]: Scans the inodes with the scanners specified" def complete(self, text,state): if len(self.args)>2 or len(self.args)==2 and not text: scanners = [ x for x in Registry.SCANNERS.scanners if x.startswith(text) ] + \ [ x for x in Registry.SCANNERS.get_groups() if x.startswith(text) ] return scanners[state] else: dbh = DB.DBO(self.environment._CASE) dbh.execute("select substr(inode,1,%r) as abbrev,inode from inode where inode like '%s%%' group by abbrev limit %s,1",(len(text)+1,text,state)) return dbh.fetch()['inode'] def execute(self): if len(self.args)<2: yield self.help() return ## Try to glob the inode list: dbh=DB.DBO(self.environment._CASE) dbh.execute("select inode from inode where inode rlike %r",fnmatch.translate(self.args[0])) pdbh = DB.DBO() pdbh.mass_insert_start('jobs') ## This is a cookie used to identify our requests so that we ## can check they have been done later. cookie = int(time.time()) scanners = [] for i in range(1,len(self.args)): scanners.extend(fnmatch.filter(Registry.SCANNERS.scanners, self.args[i])) scanners = ScannerUtils.fill_in_dependancies(scanners) for row in dbh: inode = row['inode'] pdbh.mass_insert( command = 'Scan', arg1 = self.environment._CASE, arg2 = row['inode'], arg3 = ','.join(scanners), cookie=cookie, ) pdbh.mass_insert_commit() ## Wait for the scanners to finish: if self.environment.interactive: self.wait_for_scan(cookie) yield "Scanning complete" def wait_for_scan(self, cookie): """ Waits for scanners to complete """ pdbh = DB.DBO() ## Often this process owns a worker as well. In that case we can wake it up: import pyflag.Farm as Farm Farm.wake_workers() ## Wait until there are no more jobs left. while 1: pdbh.execute("select count(*) as total from jobs where cookie=%r and arg1=%r", (cookie, self.environment._CASE)) row = pdbh.fetch() if row and row['total']==0: break time.sleep(1) class scan_file(scan,BasicCommands.ls): """ Scan a file in the VFS by name """ def help(self): return "scan file [list of scanners]: Scan the file with the scanners specified " def complete(self, text,state): if len(self.args)>2 or len(self.args)==2 and not text: scanners = [ x for x in Registry.SCANNERS.scanners if x.startswith(text) ] +\ [ x for x in Registry.SCANNERS.get_groups() if x.startswith(text) ] return scanners[state] else: dbh = DB.DBO(self.environment._CASE) dbh.execute("select substr(path,1,%r) as abbrev,path from file where path like '%s%%' group by abbrev limit %s,1",(len(text)+1,text,state)) return dbh.fetch()['path'] def execute(self): if len(self.args)<2: yield self.help() return pdbh = DB.DBO() pdbh.mass_insert_start('jobs') cookie = int(time.time()) scanners = [] for i in range(1,len(self.args)): scanners.extend(fnmatch.filter(Registry.SCANNERS.scanners, self.args[i])) for path in self.glob_files(self.args[:1]): path, inode, inode_id = self.environment._FS.lookup(path = path) ## This is a cookie used to identify our requests so that we ## can check they have been done later. pdbh.mass_insert( command = 'Scan', arg1 = self.environment._CASE, arg2 = inode, arg3 = ','.join(scanners), cookie=cookie, ) pdbh.mass_insert_commit() ## Wait for the scanners to finish: if 1 or self.environment.interactive: self.wait_for_scan(cookie) yield "Scanning complete" ## ## This allows people to reset based on the VFS path ## class scanner_reset_path(scan): """ Reset all files under a specified path """ def help(self): return "scanner_reset_path path [list of scanners]: Resets the inodes under the path given with the scanners specified" def execute(self): if len(self.args)<2: yield self.help() return scanners = [] if type(self.args[1]) == types.ListType: scanners = self.args[1] else: for i in range(1,len(self.args)): scanners.extend(fnmatch.filter(Registry.SCANNERS.scanners, self.args[i])) print "GETTING FACTORIES" factories = Scanner.get_factories(self.environment._CASE, scanners) print "OK NOW RESETING EM" for f in factories: f.reset_entire_path(self.args[0]) print "HOKAY" yield "Reset Complete" ## There is little point in distributing this because its very quick anyway. class scanner_reset(scan): """ Reset multiple inodes as specified by a glob """ def help(self): return "reset inode [list of scanners]: Resets the inodes with the scanners specified" def execute(self): if len(self.args)<2: yield self.help() return scanners = [] for i in range(1,len(self.args)): scanners.extend(fnmatch.filter(Registry.SCANNERS.scanners, self.args[i])) factories = Scanner.get_factories(self.environment._CASE, scanners) for f in factories: f.multiple_inode_reset(self.args[0]) yield "Resetting complete" class load_and_scan(scan): """ Load a filesystem and scan it at the same time """ def help(self): return """load_and_scan iosource mount_point fstype [list of scanners]: Loads the iosource into the right mount point and scans all new inodes using the scanner list. This allows scanning to start as soon as VFS inodes are produced and before the VFS is fully populated. """ def complete(self, text,state): if len(self.args)>4 or len(self.args)==4 and not text: scanners = [ x for x in Registry.SCANNERS.scanners if x.startswith(text) ] + \ [ x for x in Registry.SCANNERS.get_groups() if x.startswith(text) ] return scanners[state] elif len(self.args)>3 or len(self.args)==3 and not text: fstypes = [ x for x in Registry.FILESYSTEMS.class_names if x.startswith(text) ] return fstypes[state] elif len(self.args)>2 or len(self.args)==2 and not text: return elif len(self.args)>1 or len(self.args)==1 and not text: dbh = DB.DBO(self.environment._CASE) dbh.execute("select substr(value,1,%r) as abbrev,value from meta where property='iosource' and value like '%s%%' group by abbrev limit %s,1",(len(text)+1,text,state)) return dbh.fetch()['value'] def execute(self): if len(self.args)<3: yield self.help() return iosource=self.args[0] mnt_point=self.args[1] filesystem=self.args[2] query = {} dbh = DB.DBO() dbh.mass_insert_start('jobs') ## This works out all the scanners that were specified: tmp = [] for i in range(3,len(self.args)): ## Is it a parameter? if "=" in self.args[i]: prop,value = self.args[i].split("=",1) query[prop] = value else: tmp.extend([x for x in fnmatch.filter( Registry.SCANNERS.scanners, self.args[i]) ]) scanners = [ ] for item in tmp: if item not in scanners: scanners.append(item) ## Load the filesystem: try: fs = Registry.FILESYSTEMS.dispatch(filesystem) except KeyError: yield "Unable to find a filesystem of %s" % filesystem return fs=fs(self.environment._CASE, query) fs.cookie = int(time.time()) fs.load(mnt_point, iosource, scanners) ## Wait for all the scanners to finish self.wait_for_scan(fs.cookie) yield "Loading complete"
gpl-2.0
florian-dacosta/OCB
openerp/addons/base/tests/test_res_lang.py
384
2104
import unittest2 import openerp.tests.common as common class test_res_lang(common.TransactionCase): def test_00_intersperse(self): from openerp.addons.base.res.res_lang import intersperse assert intersperse("", []) == ("", 0) assert intersperse("0", []) == ("0", 0) assert intersperse("012", []) == ("012", 0) assert intersperse("1", []) == ("1", 0) assert intersperse("12", []) == ("12", 0) assert intersperse("123", []) == ("123", 0) assert intersperse("1234", []) == ("1234", 0) assert intersperse("123456789", []) == ("123456789", 0) assert intersperse("&ab%#@1", []) == ("&ab%#@1", 0) assert intersperse("0", []) == ("0", 0) assert intersperse("0", [1]) == ("0", 0) assert intersperse("0", [2]) == ("0", 0) assert intersperse("0", [200]) == ("0", 0) assert intersperse("12345678", [1], '.') == ('1234567.8', 1) assert intersperse("12345678", [1], '.') == ('1234567.8', 1) assert intersperse("12345678", [2], '.') == ('123456.78', 1) assert intersperse("12345678", [2,1], '.') == ('12345.6.78', 2) assert intersperse("12345678", [2,0], '.') == ('12.34.56.78', 3) assert intersperse("12345678", [-1,2], '.') == ('12345678', 0) assert intersperse("12345678", [2,-1], '.') == ('123456.78', 1) assert intersperse("12345678", [2,0,1], '.') == ('12.34.56.78', 3) assert intersperse("12345678", [2,0,0], '.') == ('12.34.56.78', 3) assert intersperse("12345678", [2,0,-1], '.') == ('12.34.56.78', 3) assert intersperse("12345678", [3,3,3,3], '.') == ('12.345.678', 2) assert intersperse("abc1234567xy", [2], '.') == ('abc1234567.xy', 1) assert intersperse("abc1234567xy8", [2], '.') == ('abc1234567x.y8', 1) # ... w.r.t. here. assert intersperse("abc12", [3], '.') == ('abc12', 0) assert intersperse("abc12", [2], '.') == ('abc12', 0) assert intersperse("abc12", [1], '.') == ('abc1.2', 1) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
mobarski/sandbox
parallel/p7cat.py
1
1316
## p7cat.py - parallel concatenation ## (c) 2017 by mobarski (at) gmail (dot) com ## licence: MIT ## version: x1 from __future__ import print_function import sys import os from multiprocessing import Process from time import time def write_part(path_in, path_out, offset, blocksize=4096): fi = open(path_in,'rb') fo = open(path_out,'r+b') fo.seek(offset) while True: block = fi.read(blocksize) fo.write(block) if len(block)<blocksize: break fi.close() fo.close() if __name__ == "__main__": t0 = time() print("\n\tP7 CONCAT START\n") outpath = sys.argv[1] filenames = sys.argv[2:] #print('\tOUT',outpath) #print('\tIN\n',filenames) meta = {} # filename -> size, offset offset = 0 for path in filenames: size = os.path.getsize(path) meta[path] = (size,offset) offset += size # allocate disk space out = open(outpath,'wb') out.seek(offset-1) out.write(b'\x00') out.close() proc = {} for path in filenames: size,offset = meta[path] p = Process(target=write_part, args=(path, outpath, offset)) p.start() print("\tBEGIN pid:{0} size:{2} offset:{1}".format(p.pid,offset,size)) proc[path] = p sys.stdout.flush() for path in filenames: p = proc[path] p.join() print("\tEND pid:{0}".format(p.pid)) print("\n\tRUN_TIME_TOTAL:{0:.1f}s\n".format(time()-t0))
mit
ruziniu/v2ex
v2ex/babel/ext/bleach/encoding.py
51
2028
import datetime from decimal import Decimal import types def is_protected_type(obj): """Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_unicode(strings_only=True). """ return isinstance(obj, ( types.NoneType, int, long, datetime.datetime, datetime.date, datetime.time, float, Decimal) ) def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_unicode, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ if strings_only and is_protected_type(s): return s try: if not isinstance(s, basestring,): if hasattr(s, '__unicode__'): s = unicode(s) else: try: s = unicode(str(s), encoding, errors) except UnicodeEncodeError: if not isinstance(s, Exception): raise # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII data without special # handling to display as a string. We need to handle this # without raising a further exception. We do an # approximation to what the Exception's standard str() # output should be. s = ' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in s]) elif not isinstance(s, unicode): # Note: We use .decode() here, instead of unicode(s, encoding, # errors), so that if s is a SafeString, it ends up being a # SafeUnicode at the end. s = s.decode(encoding, errors) except UnicodeDecodeError, e: raise UnicodeDecodeError(*e.args) return s
bsd-3-clause
pigeonflight/strider-plone
docker/appengine/lib/django-1.5/tests/regressiontests/inspectdb/models.py
48
1091
from django.db import models class People(models.Model): name = models.CharField(max_length=255) class Message(models.Model): from_field = models.ForeignKey(People, db_column='from_id') class PeopleData(models.Model): people_pk = models.ForeignKey(People, primary_key=True) ssn = models.CharField(max_length=11) class PeopleMoreData(models.Model): people_unique = models.ForeignKey(People, unique=True) license = models.CharField(max_length=255) class DigitsInColumnName(models.Model): all_digits = models.CharField(max_length=11, db_column='123') leading_digit = models.CharField(max_length=11, db_column='4extra') leading_digits = models.CharField(max_length=11, db_column='45extra') class SpecialColumnName(models.Model): field = models.IntegerField(db_column='field') # Underscores field_field_0 = models.IntegerField(db_column='Field_') field_field_1 = models.IntegerField(db_column='Field__') field_field_2 = models.IntegerField(db_column='__field') # Other chars prc_x = models.IntegerField(db_column='prc(%) x')
mit
stoq/stoqdrivers
docs/fiscal-driver-template.py
1
5263
# # Stoqdrivers template driver # # Copyright (C) 2007 Async Open Source <http://www.async.com.br> # All rights reserved # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, # USA. # import datetime from decimal import Decimal from zope.interface import implementer from stoqdrivers.enum import TaxType from stoqdrivers.interfaces import ICouponPrinter from stoqdrivers.printers.capabilities import Capability from stoqdrivers.printers.fiscal import SintegraData from stoqdrivers.serialbase import SerialBase from stoqdrivers.translation import stoqdrivers_gettext _ = stoqdrivers_gettext @implementer(ICouponPrinter) class TemplateDriver(SerialBase): supported = True model_name = "Template Driver" coupon_printer_charset = "ascii" def __init__(self, port, consts=None): SerialBase.__init__(self, port) # # This implements the ICouponPrinter Interface # # Coupon methods def coupon_identify_customer(self, customer, address, document): pass def coupon_open(self): pass def coupon_cancel(self): pass def coupon_close(self, message): coupon_id = 123 return coupon_id def coupon_add_item(self, code, description, price, taxcode, quantity, unit, discount, markup, unit_desc): item_id = 123 return item_id def coupon_cancel_item(self, item_id): pass def coupon_add_payment(self, payment_method, value, description): return Decimal("123") def coupon_totalize(self, discount, markup, taxcode): return Decimal("123") # Till / Daily flow def summarize(self): # Leitura X pass def close_till(self, previous_day): # Redução Z pass def till_add_cash(self, value): # Suprimento pass def till_remove_cash(self, value): # Sangria pass def till_read_memory(self, start, end): # Leitura Memory Fiscal data pass def till_read_memory_by_reductions(self, start, end): # Leitura Memory Fiscal reduções pass # Introspection def get_capabilities(self): return dict( item_code=Capability(max_len=13), item_id=Capability(digits=4), items_quantity=Capability(min_size=1, digits=4, decimals=3), item_price=Capability(digits=6, decimals=2), item_description=Capability(max_len=29), payment_value=Capability(digits=12, decimals=2), promotional_message=Capability(max_len=320), payment_description=Capability(max_len=48), customer_name=Capability(max_len=30), customer_id=Capability(max_len=28), customer_address=Capability(max_len=80), add_cash_value=Capability(min_size=0.1, digits=12, decimals=2), remove_cash_value=Capability(min_size=0.1, digits=12, decimals=2), ) def get_constants(self): return self._consts def get_tax_constants(self): constants = [] constants.append((TaxType.CUSTOM, '01', Decimal('18.00'))) constants.append((TaxType.CUSTOM, '02', Decimal('25.00'))) constants.extend([ (TaxType.SUBSTITUTION, 'FF', None), (TaxType.EXEMPTION, 'II', None), (TaxType.NONE, 'NN', None), ]) return constants def get_payment_constants(self): methods = [] methods.append(('01', 'DINHEIRO')) methods.append(('02', 'CHEQUE')) return methods def get_sintegra(self): taxes = [] taxes.append(('2500', Decimal("0"))) taxes.append(('1800', Decimal("0"))) taxes.append(('CANC', Decimal("0"))) taxes.append(('DESC', Decimal("0"))) taxes.append(('I', Decimal("0"))) taxes.append(('N', Decimal("0"))) taxes.append(('F', Decimal("0"))) return SintegraData( opening_date=datetime.date(2000, 1, 1), serial=self._get_serial(), serial_id='001', coupon_start=0, coupon_end=100, cro=230, crz=1232, coo=320, period_total=Decimal("1123"), total=Decimal("2311123"), taxes=taxes) # Device detection, asynchronous def query_status(self): return 'XXX' def status_reply_complete(self, reply): return len(reply) == 23 def get_serial(self): return 'ABC12345678'
lgpl-2.1
kustodian/ansible
lib/ansible/modules/database/influxdb/influxdb_write.py
8
2452
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2017, René Moser <mail@renemoser.net> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: influxdb_write short_description: Write data points into InfluxDB. description: - Write data points into InfluxDB. version_added: 2.5 author: "René Moser (@resmo)" requirements: - "python >= 2.6" - "influxdb >= 0.9" options: data_points: description: - Data points as dict to write into the database. required: true database_name: description: - Name of the database. required: true extends_documentation_fragment: influxdb ''' EXAMPLES = r''' - name: Write points into database influxdb_write: hostname: "{{influxdb_ip_address}}" database_name: "{{influxdb_database_name}}" data_points: - measurement: connections tags: host: server01 region: us-west time: "{{ ansible_date_time.iso8601 }}" fields: value: 2000 - measurement: connections tags: host: server02 region: us-east time: "{{ ansible_date_time.iso8601 }}" fields: value: 3000 ''' RETURN = r''' # only defaults ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native from ansible.module_utils.influxdb import InfluxDb class AnsibleInfluxDBWrite(InfluxDb): def write_data_point(self, data_points): client = self.connect_to_influxdb() try: client.write_points(data_points) except Exception as e: self.module.fail_json(msg=to_native(e)) def main(): argument_spec = InfluxDb.influxdb_argument_spec() argument_spec.update( data_points=dict(required=True, type='list'), database_name=dict(required=True, type='str'), ) module = AnsibleModule( argument_spec=argument_spec, ) influx = AnsibleInfluxDBWrite(module) data_points = module.params.get('data_points') influx.write_data_point(data_points) module.exit_json(changed=True) if __name__ == '__main__': main()
gpl-3.0
aerickson/xbmc
tools/EventClients/examples/python/example_simple.py
228
1272
#!/usr/bin/python # This is a simple example showing how you can send a key press event # to XBMC using the XBMCClient class import sys sys.path.append("../../lib/python") import time from xbmcclient import XBMCClient def main(): host = "localhost" port = 9777 # Create an XBMCClient object and connect xbmc = XBMCClient("Example Remote", "../../icons/bluetooth.png") xbmc.connect() # wait for notification window to close (in XBMC) (optional) time.sleep(5) # send a up key press using the xbox gamepad map "XG" and button # name "dpadup" ( see PacketBUTTON doc for more details) xbmc.send_button(map="XG", button="dpadup") # wait for a few seconds to see its effect time.sleep(5) # send a right key press using the keyboard map "KB" and button # name "right" xbmc.send_keyboard_button("right") # wait for a few seconds to see its effect time.sleep(5) # that's enough, release the button. xbmc.release_button() # ok we're done, close the connection # Note that closing the connection clears any repeat key that is # active. So in this example, the actual release button event above # need not have been sent. xbmc.close() if __name__=="__main__": main()
gpl-2.0
waheedahmed/edx-platform
openedx/core/djangoapps/api_admin/views.py
1
9646
"""Views for API management.""" import logging from django.conf import settings from django.contrib.sites.shortcuts import get_current_site from django.core.urlresolvers import reverse_lazy, reverse from django.http.response import JsonResponse from django.shortcuts import redirect from django.utils.translation import ugettext as _ from django.views.generic import View from django.views.generic.base import TemplateView from django.views.generic.edit import CreateView from oauth2_provider.generators import generate_client_secret, generate_client_id from oauth2_provider.models import get_application_model from oauth2_provider.views import ApplicationRegistration from slumber.exceptions import HttpNotFoundError from edxmako.shortcuts import render_to_response from openedx.core.djangoapps.api_admin.decorators import require_api_access from openedx.core.djangoapps.api_admin.forms import ApiAccessRequestForm, CatalogForm from openedx.core.djangoapps.api_admin.models import ApiAccessRequest, Catalog from openedx.core.djangoapps.api_admin.utils import course_discovery_api_client log = logging.getLogger(__name__) Application = get_application_model() # pylint: disable=invalid-name class ApiRequestView(CreateView): """Form view for requesting API access.""" form_class = ApiAccessRequestForm template_name = 'api_admin/api_access_request_form.html' success_url = reverse_lazy('api_admin:api-status') def get(self, request): """ If the requesting user has already requested API access, redirect them to the client creation page. """ if ApiAccessRequest.api_access_status(request.user) is not None: return redirect(reverse('api_admin:api-status')) return super(ApiRequestView, self).get(request) def form_valid(self, form): form.instance.user = self.request.user form.instance.site = get_current_site(self.request) return super(ApiRequestView, self).form_valid(form) class ApiRequestStatusView(ApplicationRegistration): """View for confirming our receipt of an API request.""" success_url = reverse_lazy('api_admin:api-status') def get(self, request, form=None): # pylint: disable=arguments-differ """ If the user has not created an API request, redirect them to the request form. Otherwise, display the status of their API request. We take `form` as an optional argument so that we can display validation errors correctly on the page. """ if form is None: form = self.get_form_class()() user = request.user try: api_request = ApiAccessRequest.objects.get(user=user) except ApiAccessRequest.DoesNotExist: return redirect(reverse('api_admin:api-request')) try: application = Application.objects.get(user=user) except Application.DoesNotExist: application = None # We want to fill in a few fields ourselves, so remove them # from the form so that the user doesn't see them. for field in ('client_type', 'client_secret', 'client_id', 'authorization_grant_type'): form.fields.pop(field) return render_to_response('api_admin/status.html', { 'status': api_request.status, 'api_support_link': settings.API_DOCUMENTATION_URL, 'api_support_email': settings.API_ACCESS_MANAGER_EMAIL, 'form': form, 'application': application, }) def get_form(self, form_class=None): form = super(ApiRequestStatusView, self).get_form(form_class) # Copy the data, since it's an immutable QueryDict. copied_data = form.data.copy() # Now set the fields that were removed earlier. We give them # confidential client credentials, and generate their client # ID and secret. copied_data.update({ 'authorization_grant_type': Application.GRANT_CLIENT_CREDENTIALS, 'client_type': Application.CLIENT_CONFIDENTIAL, 'client_secret': generate_client_secret(), 'client_id': generate_client_id(), }) form.data = copied_data return form def form_valid(self, form): # Delete any existing applications if the user has decided to regenerate their credentials Application.objects.filter(user=self.request.user).delete() return super(ApiRequestStatusView, self).form_valid(form) def form_invalid(self, form): return self.get(self.request, form) @require_api_access def post(self, request): return super(ApiRequestStatusView, self).post(request) class ApiTosView(TemplateView): """View to show the API Terms of Service.""" template_name = 'api_admin/terms_of_service.html' class CatalogSearchView(View): """View to search for catalogs belonging to a user.""" def get(self, request): """Display a form to search for catalogs belonging to a user.""" return render_to_response('api_admin/catalogs/search.html') def post(self, request): """Redirect to the list view for the given user.""" username = request.POST.get('username') # If no username is provided, bounce back to this page. if not username: return redirect(reverse('api_admin:catalog-search')) return redirect(reverse('api_admin:catalog-list', kwargs={'username': username})) class CatalogListView(View): """View to list existing catalogs and create new ones.""" template = 'api_admin/catalogs/list.html' def _get_catalogs(self, client, username): """Retrieve catalogs for a user. Returns the empty list if none are found.""" try: response = client.api.v1.catalogs.get(username=username) return [Catalog(attributes=catalog) for catalog in response['results']] except HttpNotFoundError: return [] def get(self, request, username): """Display a list of a user's catalogs.""" client = course_discovery_api_client(request.user) catalogs = self._get_catalogs(client, username) return render_to_response(self.template, { 'username': username, 'catalogs': catalogs, 'form': CatalogForm(initial={'viewers': [username]}), 'preview_url': reverse('api_admin:catalog-preview'), 'catalog_api_url': client.api.v1.courses.url(), }) def post(self, request, username): """Create a new catalog for a user.""" form = CatalogForm(request.POST) client = course_discovery_api_client(request.user) if not form.is_valid(): catalogs = self._get_catalogs(client, username) return render_to_response(self.template, { 'form': form, 'catalogs': catalogs, 'username': username, 'preview_url': reverse('api_admin:catalog-preview'), 'catalog_api_url': client.api.v1.courses.url(), }, status=400) attrs = form.instance.attributes catalog = client.api.v1.catalogs.post(attrs) return redirect(reverse('api_admin:catalog-edit', kwargs={'catalog_id': catalog['id']})) class CatalogEditView(View): """View to edit an individual catalog.""" def get(self, request, catalog_id): """Display a form to edit this catalog.""" client = course_discovery_api_client(request.user) response = client.api.v1.catalogs(catalog_id).get() catalog = Catalog(attributes=response) form = CatalogForm(instance=catalog) return render_to_response('api_admin/catalogs/edit.html', { 'catalog': catalog, 'form': form, 'preview_url': reverse('api_admin:catalog-preview'), 'catalog_api_url': client.api.v1.courses.url(), }) def post(self, request, catalog_id): """Update or delete this catalog.""" client = course_discovery_api_client(request.user) if request.POST.get('delete-catalog') == 'on': client.api.v1.catalogs(catalog_id).delete() return redirect(reverse('api_admin:catalog-search')) form = CatalogForm(request.POST) if not form.is_valid(): response = client.api.v1.catalogs(catalog_id).get() catalog = Catalog(attributes=response) return render_to_response('api_admin/catalogs/edit.html', { 'catalog': catalog, 'form': form, 'preview_url': reverse('api_admin:catalog-preview'), 'catalog_api_url': client.api.v1.courses.url(), }, status=400) catalog = client.api.v1.catalogs(catalog_id).patch(form.instance.attributes) return redirect(reverse('api_admin:catalog-edit', kwargs={'catalog_id': catalog['id']})) class CatalogPreviewView(View): """Endpoint to preview courses for a query.""" def get(self, request): """ Return the results of a query against the course catalog API. If no query parameter is given, returns an empty result set. """ client = course_discovery_api_client(request.user) # Just pass along the request params including limit/offset pagination if 'q' in request.GET: results = client.api.v1.courses.get(**request.GET) # Ensure that we don't just return all the courses if no query is given else: results = {'count': 0, 'results': [], 'next': None, 'prev': None} return JsonResponse(results)
agpl-3.0
zhangsu/amphtml
validator/build.py
6
22339
#!/usr/bin/env python # # Copyright 2015 The AMP HTML Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the license. # """A build script which (thus far) works on Ubuntu 14.""" from __future__ import print_function import argparse import glob import logging import os import platform import re import subprocess import sys def Die(msg): """Prints error and exits with status 1. Args: msg: The error message to emit """ print(msg, file=sys.stderr) sys.exit(1) def EnsureNodeJsIsInstalled(): """Ensure Node.js is installed and that 'node' is the command to run.""" logging.info('entering ...') try: output = subprocess.check_output(['node', '--eval', 'console.log("42")']) if b'42' == output.strip(): return except (subprocess.CalledProcessError, OSError): pass Die('Node.js not found. Try "apt-get install nodejs" or follow the install instructions at https://github.com/ampproject/amphtml/blob/master/validator/README.md#installation') def CheckPrereqs(): """Checks that various prerequisites for this script are satisfied.""" logging.info('entering ...') if platform.system() != 'Linux' and platform.system() != 'Darwin': Die('Sorry, this script assumes Linux or Mac OS X thus far. ' 'Please feel free to edit the source and fix it to your needs.') # Ensure source files are available. for f in [ 'validator-main.protoascii', 'validator.proto', 'validator_gen_js.py', 'package.json', 'engine/validator.js', 'engine/validator_test.js', 'engine/validator-in-browser.js', 'engine/tokenize-css.js', 'engine/definitions.js', 'engine/parse-css.js', 'engine/parse-srcset.js', 'engine/parse-url.js' ]: if not os.path.exists(f): Die('%s not found. Must run in amp_validator source directory.' % f) # Ensure protoc is available. try: libprotoc_version = subprocess.check_output(['protoc', '--version']) except (subprocess.CalledProcessError, OSError): Die('Protobuf compiler not found. Try "apt-get install protobuf-compiler" or follow the install instructions at https://github.com/ampproject/amphtml/blob/master/validator/README.md#installation.') # Ensure 'libprotoc 2.5.0' or newer. m = re.search(b'^(\\w+) (\\d+)\\.(\\d+)\\.(\\d+)', libprotoc_version) if (m.group(1) != b'libprotoc' or (int(m.group(2)), int(m.group(3)), int(m.group(4))) < (2, 5, 0)): Die('Expected libprotoc 2.5.0 or newer, saw: %s' % libprotoc_version) # Ensure that the Python protobuf package is installed. for m in ['descriptor', 'text_format', 'json_format']: module = 'google.protobuf.%s' % m try: __import__(module) except ImportError: # Python3 needs pip3. Python 2 needs pip. if sys.version_info < (3, 0): Die('%s not found. Try "pip install protobuf" or follow the install ' 'instructions at https://github.com/ampproject/amphtml/blob/master/' 'validator/README.md#installation' % module) else: Die('%s not found. Try "pip3 install protobuf" or follow the install ' 'instructions at https://github.com/ampproject/amphtml/blob/master/' 'validator/README.md#installation' % module) # Ensure that yarn is installed. try: subprocess.check_output(['yarn', '--version']) except (subprocess.CalledProcessError, OSError): Die('Yarn package manager not found. Run ' '"curl -o- -L https://yarnpkg.com/install.sh | bash" ' 'or see https://yarnpkg.com/docs/install.') # Ensure JVM installed. TODO: Check for version? try: subprocess.check_output(['java', '-version'], stderr=subprocess.STDOUT) except (subprocess.CalledProcessError, OSError): Die('Java missing. Try "apt-get install openjdk-7-jre" or follow the install instructions at https://github.com/ampproject/amphtml/blob/master/validator/README.md#installation') logging.info('... done') def SetupOutDir(out_dir): """Sets up a clean output directory. Args: out_dir: directory name of the output directory. Must not have slashes, dots, etc. """ logging.info('entering ...') assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir if os.path.exists(out_dir): subprocess.check_call(['rm', '-rf', out_dir]) os.mkdir(out_dir) logging.info('... done') def InstallNodeDependencies(): """Installs the dependencies using yarn.""" logging.info('entering ...') # Install the project dependencies specified in package.json into # node_modules. logging.info('installing AMP Validator engine dependencies ...') subprocess.check_call( ['yarn', 'install'], stdout=(open(os.devnull, 'wb') if os.environ.get('TRAVIS') else sys.stdout)) logging.info('installing AMP Validator nodejs dependencies ...') subprocess.check_call( ['yarn', 'install'], cwd='nodejs', stdout=(open(os.devnull, 'wb') if os.environ.get('TRAVIS') else sys.stdout)) logging.info('... done') def GenValidatorPb2Py(out_dir): """Calls the proto compiler to generate validator_pb2.py. Args: out_dir: directory name of the output directory. Must not have slashes, dots, etc. """ logging.info('entering ...') assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir subprocess.check_call( ['protoc', 'validator.proto', '--python_out=%s' % out_dir]) open('%s/__init__.py' % out_dir, 'w').close() logging.info('... done') def GenValidatorProtoascii(out_dir): """Assembles the validator protoascii file from the main and extensions. Args: out_dir: directory name of the output directory. Must not have slashes, dots, etc. """ logging.info('entering ...') assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir protoascii_segments = [open('validator-main.protoascii').read()] extensions = glob.glob('extensions/*/validator-*.protoascii') # In the Github project, the extensions are located in a sibling directory # to the validator rather than a child directory. if not extensions: extensions = glob.glob('../extensions/*/validator-*.protoascii') extensions.sort() for extension in extensions: protoascii_segments.append(open(extension).read()) f = open('%s/validator.protoascii' % out_dir, 'w') f.write(''.join(protoascii_segments)) f.close() logging.info('... done') def GenValidatorProtoGeneratedJs(out_dir): """Calls validator_gen_js to generate validator-proto-generated.js. Args: out_dir: directory name of the output directory. Must not have slashes, dots, etc. """ logging.info('entering ...') assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir # These imports happen late, within this method because they don't necessarily # exist when the module starts running, and the ones that probably do # are checked by CheckPrereqs. # pylint: disable=g-import-not-at-top from google.protobuf import text_format from google.protobuf import descriptor from dist import validator_pb2 import validator_gen_js # pylint: enable=g-import-not-at-top out = [] validator_gen_js.GenerateValidatorGeneratedJs( specfile=None, validator_pb2=validator_pb2, generate_proto_only=True, generate_spec_only=False, text_format=text_format, html_format=None, descriptor=descriptor, out=out) out.append('') f = open('%s/validator-proto-generated.js' % out_dir, 'w') f.write('\n'.join(out)) f.close() logging.info('... done') def GenValidatorGeneratedJs(out_dir): """Calls validator_gen_js to generate validator-generated.js and validator-generated.json. Args: out_dir: directory name of the output directory. Must not have slashes, dots, etc. """ logging.info('entering ...') assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir # These imports happen late, within this method because they don't necessarily # exist when the module starts running, and the ones that probably do # are checked by CheckPrereqs. # pylint: disable=g-import-not-at-top from google.protobuf import text_format from google.protobuf import json_format from google.protobuf import descriptor from dist import validator_pb2 import validator_gen_js # pylint: enable=g-import-not-at-top out = [] validator_gen_js.GenerateValidatorGeneratedJs( specfile='%s/validator.protoascii' % out_dir, validator_pb2=validator_pb2, generate_proto_only=False, generate_spec_only=True, text_format=text_format, html_format=None, descriptor=descriptor, out=out) out.append('') f = open('%s/validator-generated.js' % out_dir, 'w') f.write('\n'.join(out)) f.close() out = [] validator_gen_js.GenerateValidatorGeneratedJson( specfile='%s/validator.protoascii' % out_dir, validator_pb2=validator_pb2, text_format=text_format, json_format=json_format, out=out) out.append('') f = open('%s/validator-generated.json' % out_dir, 'w') f.write('\n'.join(out)) f.close() logging.info('... done') def CompileWithClosure(js_files, definitions, entry_points, output_file): """Compiles the arguments with the Closure compiler for transpilation to ES5. Args: js_files: list of files to compile definitions: list of definitions flags to closure compiler entry_points: entry points (these won't be minimized) output_file: name of the Javascript output file """ cmd = [ 'java', '-jar', 'node_modules/google-closure-compiler-java/compiler.jar', '--language_out=ES5_STRICT', '--dependency_mode=STRICT', '--js_output_file=%s' % output_file ] cmd += ['--entry_point=%s' % e for e in entry_points] cmd += ['--output_manifest=%s' % ('%s.manifest' % output_file)] cmd += [ 'node_modules/google-closure-library/closure/**.js', '!node_modules/google-closure-library/closure/**_test.js', 'node_modules/google-closure-library/third_party/closure/**.js', '!node_modules/google-closure-library/third_party/closure/**_test.js' ] cmd += js_files cmd += definitions subprocess.check_call(cmd) def CompileValidatorMinified(out_dir): """Generates a minified validator script, which can be imported to validate. Args: out_dir: output directory """ logging.info('entering ...') CompileWithClosure( js_files=[ 'engine/definitions.js', 'engine/htmlparser.js', 'engine/parse-css.js', 'engine/parse-srcset.js', 'engine/parse-url.js', 'engine/tokenize-css.js', '%s/validator-generated.js' % out_dir, '%s/validator-proto-generated.js' % out_dir, 'engine/validator-in-browser.js', 'engine/validator.js', 'engine/amp4ads-parse-css.js', 'engine/keyframes-parse-css.js', 'engine/htmlparser-interface.js' ], definitions=[], entry_points=[ 'amp.validator.validateString', 'amp.validator.renderValidationResult', 'amp.validator.renderErrorMessage' ], output_file='%s/validator_minified.js' % out_dir) logging.info('... done') def RunSmokeTest(out_dir): """Runs a smoke test (minimum valid AMP and empty html file). Args: out_dir: output directory """ logging.info('entering ...') # Run index.js on the minimum valid amp and observe that it passes. p = subprocess.Popen( [ 'node', 'nodejs/index.js', '--validator_js', '%s/validator_minified.js' % out_dir, 'testdata/feature_tests/minimum_valid_amp.html', '--format=text' ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = p.communicate() if (b'testdata/feature_tests/minimum_valid_amp.html: PASS\n', b'', p.returncode) != (stdout, stderr, 0): Die('Smoke test failed. returncode=%d stdout="%s" stderr="%s"' % (p.returncode, stdout, stderr)) # Run index.js on an empty file and observe that it fails. p = subprocess.Popen( [ 'node', 'nodejs/index.js', '--validator_js', '%s/validator_minified.js' % out_dir, 'testdata/feature_tests/empty.html', '--format=text' ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = p.communicate() if p.returncode != 1: Die('smoke test failed. Expected p.returncode==1, saw: %s' % p.returncode) if not stderr.startswith(b'testdata/feature_tests/empty.html:1:0 ' b'The mandatory tag \'html'): Die('smoke test failed; stderr was: "%s"' % stderr) logging.info('... done') def RunIndexTest(): """Runs the index_test.js, which tests the NodeJS API. """ logging.info('entering ...') p = subprocess.Popen( ['node', './index_test.js'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd='nodejs') (stdout, stderr) = p.communicate() if p.returncode != 0: Die('index_test.js failed. returncode=%d stdout="%s" stderr="%s"' % (p.returncode, stdout, stderr)) logging.info('... done') def CompileValidatorTestMinified(out_dir): """Runs closure compiler for validator_test.js. Args: out_dir: directory name of the output directory. Must not have slashes, dots, etc. """ logging.info('entering ...') CompileWithClosure( js_files=[ 'engine/definitions.js', 'engine/htmlparser.js', 'engine/parse-css.js', 'engine/parse-srcset.js', 'engine/parse-url.js', 'engine/tokenize-css.js', '%s/validator-generated.js' % out_dir, '%s/validator-proto-generated.js' % out_dir, 'engine/validator-in-browser.js', 'engine/validator.js', 'engine/amp4ads-parse-css.js', 'engine/keyframes-parse-css.js', 'engine/htmlparser-interface.js', 'engine/validator_test.js' ], definitions=[], entry_points=['amp.validator.ValidatorTest'], output_file='%s/validator_test_minified.js' % out_dir) logging.info('... success') def CompileHtmlparserTestMinified(out_dir): """Runs closure compiler for htmlparser_test.js. Args: out_dir: directory name of the output directory. Must not have slashes, dots, etc. """ logging.info('entering ...') CompileWithClosure( js_files=[ 'engine/htmlparser.js', 'engine/htmlparser-interface.js', 'engine/htmlparser_test.js' ], definitions=[], entry_points=['amp.htmlparser.HtmlParserTest'], output_file='%s/htmlparser_test_minified.js' % out_dir) logging.info('... success') def CompileParseCssTestMinified(out_dir): """Runs closure compiler for parse-css_test.js. Args: out_dir: directory name of the output directory. Must not have slashes, dots, etc. """ logging.info('entering ...') CompileWithClosure( js_files=[ 'engine/definitions.js', 'engine/parse-css.js', 'engine/parse-url.js', 'engine/tokenize-css.js', 'engine/css-selectors.js', 'engine/json-testutil.js', 'engine/parse-css_test.js', '%s/validator-generated.js' % out_dir, '%s/validator-proto-generated.js' % out_dir ], definitions=[], entry_points=['parse_css.ParseCssTest'], output_file='%s/parse-css_test_minified.js' % out_dir) logging.info('... success') def CompileParseUrlTestMinified(out_dir): """Runs closure compiler for parse-url_test.js. Args: out_dir: directory name of the output directory. Must not have slashes, dots, etc. """ logging.info('entering ...') CompileWithClosure( js_files=[ 'engine/definitions.js', 'engine/parse-url.js', 'engine/parse-css.js', 'engine/tokenize-css.js', 'engine/css-selectors.js', 'engine/json-testutil.js', 'engine/parse-url_test.js', '%s/validator-generated.js' % out_dir, '%s/validator-proto-generated.js' % out_dir ], definitions=[], entry_points=['parse_url.ParseURLTest'], output_file='%s/parse-url_test_minified.js' % out_dir) logging.info('... success') def CompileAmp4AdsParseCssTestMinified(out_dir): """Runs closure compiler for amp4ads-parse-css_test.js. Args: out_dir: directory name of the output directory. Must not have slashes, dots, etc. """ logging.info('entering ...') CompileWithClosure( js_files=[ 'engine/definitions.js', 'engine/amp4ads-parse-css_test.js', 'engine/parse-css.js', 'engine/parse-url.js', 'engine/amp4ads-parse-css.js', 'engine/tokenize-css.js', 'engine/css-selectors.js', 'engine/json-testutil.js', '%s/validator-generated.js' % out_dir, '%s/validator-proto-generated.js' % out_dir ], definitions=[], entry_points=['parse_css.Amp4AdsParseCssTest'], output_file='%s/amp4ads-parse-css_test_minified.js' % out_dir) logging.info('... success') def CompileKeyframesParseCssTestMinified(out_dir): """Runs closure compiler for keyframes-parse-css_test.js. Args: out_dir: directory name of the output directory. Must not have slashes, dots, etc. """ logging.info('entering ...') CompileWithClosure( js_files=[ 'engine/definitions.js', 'engine/keyframes-parse-css_test.js', 'engine/parse-css.js', 'engine/parse-url.js', 'engine/keyframes-parse-css.js', 'engine/tokenize-css.js', 'engine/css-selectors.js', 'engine/json-testutil.js', '%s/validator-generated.js' % out_dir, '%s/validator-proto-generated.js' % out_dir ], definitions=[], entry_points=['parse_css.KeyframesParseCssTest'], output_file='%s/keyframes-parse-css_test_minified.js' % out_dir) logging.info('... success') def CompileParseSrcsetTestMinified(out_dir): """Runs closure compiler for parse-srcset_test.js. Args: out_dir: directory name of the output directory. Must not have slashes, dots, etc. """ logging.info('entering ...') CompileWithClosure( js_files=[ 'engine/definitions.js', 'engine/parse-srcset.js', 'engine/json-testutil.js', 'engine/parse-srcset_test.js', '%s/validator-generated.js' % out_dir, '%s/validator-proto-generated.js' % out_dir ], definitions=[], entry_points=['parse_srcset.ParseSrcsetTest'], output_file='%s/parse-srcset_test_minified.js' % out_dir) logging.info('... success') def GenerateTestRunner(out_dir): """Generates a test runner: a nodejs script that runs our minified tests. Args: out_dir: directory name of the output directory. Must not have slashes, dots, etc. """ logging.info('entering ...') f = open('%s/test_runner' % out_dir, 'w') extensions_dir = 'extensions' # In the Github project, the extensions are located in a sibling directory # to the validator rather than a child directory. if not os.path.isdir(extensions_dir): extensions_dir = '../extensions' f.write("""#!/usr/bin/env node global.assert = require('assert'); global.fs = require('fs'); global.path = require('path'); var JasmineRunner = require('jasmine'); var jasmine = new JasmineRunner(); process.env.TESTDATA_ROOTS = 'testdata:%s' require('./validator_test_minified'); require('./htmlparser_test_minified'); require('./parse-css_test_minified'); require('./parse-url_test_minified'); require('./amp4ads-parse-css_test_minified'); require('./keyframes-parse-css_test_minified'); require('./parse-srcset_test_minified'); jasmine.onComplete(function (passed) { process.exit(passed ? 0 : 1); }); jasmine.execute(); """ % extensions_dir) os.chmod('%s/test_runner' % out_dir, 0o750) logging.info('... success') def RunTests(update_tests, out_dir): """Runs all the minified tests. Args: update_tests: a boolean indicating whether or not to update the test output files. out_dir: directory name of the output directory. Must not have slashes, dots, etc. """ logging.info('entering ...') env = os.environ.copy() if update_tests: env['UPDATE_VALIDATOR_TEST'] = '1' subprocess.check_call(['node', '%s/test_runner' % out_dir], env=env) logging.info('... success') def Main(parsed_args): """The main method, which executes all build steps and runs the tests.""" logging.basicConfig( format='[[%(filename)s %(funcName)s]] - %(message)s', level=(logging.ERROR if os.environ.get('TRAVIS') else logging.INFO)) EnsureNodeJsIsInstalled() CheckPrereqs() InstallNodeDependencies() SetupOutDir(out_dir='dist') GenValidatorProtoascii(out_dir='dist') GenValidatorPb2Py(out_dir='dist') GenValidatorProtoGeneratedJs(out_dir='dist') GenValidatorGeneratedJs(out_dir='dist') CompileValidatorMinified(out_dir='dist') RunSmokeTest(out_dir='dist') RunIndexTest() CompileValidatorTestMinified(out_dir='dist') CompileHtmlparserTestMinified(out_dir='dist') CompileParseCssTestMinified(out_dir='dist') CompileParseUrlTestMinified(out_dir='dist') CompileAmp4AdsParseCssTestMinified(out_dir='dist') CompileKeyframesParseCssTestMinified(out_dir='dist') CompileParseSrcsetTestMinified(out_dir='dist') GenerateTestRunner(out_dir='dist') RunTests(update_tests=parsed_args.update_tests, out_dir='dist') if __name__ == '__main__': parser = argparse.ArgumentParser( description='Build script for the AMP Validator.') parser.add_argument( '--update_tests', action='store_true', help=('If True, validator_test will overwrite the .out test files with ' 'the encountered test output.')) Main(parser.parse_args())
apache-2.0
yanchen036/tensorflow
tensorflow/python/debug/lib/session_debug_file_test.py
19
5085
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for debugger functionalities in tf.Session with file:// URLs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import shutil import tempfile from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session from tensorflow.python.debug.lib import debug_data from tensorflow.python.debug.lib import debug_utils from tensorflow.python.debug.lib import session_debug_testlib from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables from tensorflow.python.platform import googletest class SessionDebugFileTest(session_debug_testlib.SessionDebugTestBase): def _debug_urls(self, run_number=None): return ["file://%s" % self._debug_dump_dir(run_number=run_number)] def _debug_dump_dir(self, run_number=None): if run_number is None: return self._dump_root else: return os.path.join(self._dump_root, "run_%d" % run_number) def testAllowsDifferentWatchesOnDifferentRuns(self): """Test watching different tensors on different runs of the same graph.""" with session.Session( config=session_debug_testlib.no_rewrite_session_config()) as sess: u_init_val = [[5.0, 3.0], [-1.0, 0.0]] v_init_val = [[2.0], [-1.0]] # Use node names with overlapping namespace (i.e., parent directory) to # test concurrent, non-racing directory creation. u_name = "diff_Watch/u" v_name = "diff_Watch/v" u_init = constant_op.constant(u_init_val, shape=[2, 2]) u = variables.Variable(u_init, name=u_name) v_init = constant_op.constant(v_init_val, shape=[2, 1]) v = variables.Variable(v_init, name=v_name) w = math_ops.matmul(u, v, name="diff_Watch/matmul") u.initializer.run() v.initializer.run() for i in range(2): run_options = config_pb2.RunOptions(output_partition_graphs=True) run_dump_root = self._debug_dump_dir(run_number=i) debug_urls = self._debug_urls(run_number=i) if i == 0: # First debug run: Add debug tensor watch for u. debug_utils.add_debug_tensor_watch( run_options, "%s/read" % u_name, 0, debug_urls=debug_urls) else: # Second debug run: Add debug tensor watch for v. debug_utils.add_debug_tensor_watch( run_options, "%s/read" % v_name, 0, debug_urls=debug_urls) run_metadata = config_pb2.RunMetadata() # Invoke Session.run(). sess.run(w, options=run_options, run_metadata=run_metadata) self.assertEqual(self._expected_partition_graph_count, len(run_metadata.partition_graphs)) dump = debug_data.DebugDumpDir( run_dump_root, partition_graphs=run_metadata.partition_graphs) self.assertTrue(dump.loaded_partition_graphs()) # Each run should have generated only one dumped tensor, not two. self.assertEqual(1, dump.size) if i == 0: self.assertAllClose([u_init_val], dump.get_tensors("%s/read" % u_name, 0, "DebugIdentity")) self.assertGreaterEqual( dump.get_rel_timestamps("%s/read" % u_name, 0, "DebugIdentity")[0], 0) else: self.assertAllClose([v_init_val], dump.get_tensors("%s/read" % v_name, 0, "DebugIdentity")) self.assertGreaterEqual( dump.get_rel_timestamps("%s/read" % v_name, 0, "DebugIdentity")[0], 0) class SessionDebugConcurrentTest( session_debug_testlib.DebugConcurrentRunCallsTest): def setUp(self): self._num_concurrent_runs = 3 self._dump_roots = [] for _ in range(self._num_concurrent_runs): self._dump_roots.append(tempfile.mkdtemp()) def tearDown(self): ops.reset_default_graph() for dump_root in self._dump_roots: if os.path.isdir(dump_root): shutil.rmtree(dump_root) def _get_concurrent_debug_urls(self): return [("file://%s" % dump_root) for dump_root in self._dump_roots] if __name__ == "__main__": googletest.main()
apache-2.0
wetdesert/rad2py
assignments/program4A.py
8
2159
#!/usr/bin/env python # coding:utf-8 """PSP Program 4A - Linear Regression Parameter """ __author__ = "Mariano Reingart (reingart@gmail.com)" __copyright__ = "Copyright (C) 2011 Mariano Reingart" __license__ = "GPL 3.0" def mean(values): """Calculate the average of the numbers given: >>> mean([1, 2, 3]) 2.0 >>> mean([1, 2]) 1.5 >>> mean([1, 3]) 2.0 """ return sum(values) / float(len(values)) def linear_regression(x_values, y_values): """Calculate the linear regression parameters for a set of n values >>> x = 10.0, 8.0, 13.0, 9.0, 11.0, 14.0, 6.0, 4.0, 12.0, 7.0, 5.0 >>> y = 8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68 >>> b0, b1 = linear_regression(x, y) >>> round(b0, 3) 3.0 >>> round(b1, 3) 0.5 >>> x = 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 19.0, 8.0, 8.0, 8.0 >>> y = 6.58, 5.76, 7.71, 8.84, 8.47, 7.04, 5.25, 12.50, 5.56, 7.91, 6.89 >>> b0, b1 = linear_regression(x, y) >>> round(b0, 3) 3.002 >>> round(b1, 3) 0.5 """ # calculate aux variables x_avg = mean(x_values) y_avg = mean(y_values) n = len(x_values) sum_xy = sum([(x_values[i] * y_values[i]) for i in range(n)]) sum_x2 = sum([(x_values[i] ** 2) for i in range(n)]) # calculate regression coefficients b1 = (sum_xy - (n * x_avg * y_avg)) / (sum_x2 - n * (x_avg ** 2)) b0 = y_avg - b1 * x_avg return (b0, b1) if __name__ == "__main__": # Table D8 "Size Estimating regression data" est_loc = [130, 650, 99, 150, 128, 302, 95, 945, 368, 961] est_new_chg_loc = [163, 765, 141, 166, 137, 355, 136, 1206, 433, 1130] act_new_chg_loc = [186, 699, 132, 272, 291, 331, 199, 1890, 788, 1601] # Estimated Object versus Actual New and changed LOC b0, b1 = linear_regression(est_loc, act_new_chg_loc) print b0, b1 assert round(b0, 2) == -22.55 assert round(b1, 4) == 1.7279 # Estimated New and Changed LOC versus Actal and Changed LOC b0, b1 = linear_regression(est_new_chg_loc, act_new_chg_loc) assert round(b0, 2) == -23.92 assert round(b1, 4) == 1.4310 print b0, b1
gpl-3.0
roselleebarle04/opencog
tests/python/blending_test/link_connector_test.py
22
27877
__author__ = 'DongMin Kim' from opencog.atomspace import * from test_conceptual_blending_base import TestConceptualBlendingBase # Only run the unit tests if the required dependencies have been installed # (see: https://github.com/opencog/opencog/issues/337) try: __import__("nose.tools") except ImportError: import unittest raise unittest.SkipTest( "ImportError exception: " + "Can't find Nose. " + "make sure the required dependencies are installed." ) else: # noinspection PyPackageRequirements from nose.tools import * try: __import__("opencog.scheme_wrapper") except ImportError: import unittest raise unittest.SkipTest( "ImportError exception: " + "Can't find Scheme wrapper for Python. " + "make sure the required dependencies are installed." ) else: from opencog.scheme_wrapper import * try: __import__("blending.blend") except ImportError: import unittest raise unittest.SkipTest( "ImportError exception: " + "Can't find Python Conceptual Blender. " + "make sure the required dependencies are installed." ) else: from blending.blend import ConceptualBlending try: from blending.util.py_cog_execute import PyCogExecute PyCogExecute().load_scheme() except (ImportError, RuntimeError): import unittest raise unittest.SkipTest( "Can't load Scheme." + "make sure the you installed atomspace to /usr/local/share/opencog." ) # noinspection PyArgumentList, PyTypeChecker class TestLinkConnector(TestConceptualBlendingBase): """ 2.4 LinkConnector tests. """ """ 2.4.1. ConnectSimple tests. """ __test__ = True def __default_connect_simple(self): self.a.add_link( types.InheritanceLink, [ self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "default-config") ] ) self.a.add_link( types.ExecutionLink, [ self.a.add_node(types.SchemaNode, "BLEND:atoms-chooser"), self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "ChooseAll") ] ) self.a.add_link( types.ExecutionLink, [ self.a.add_node(types.SchemaNode, "BLEND:blending-decider"), self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "DecideBestSTI") ] ) self.a.add_link( types.ExecutionLink, [ self.a.add_node(types.SchemaNode, "BLEND:link-connector"), self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "ConnectSimple") ] ) def test_connect_simple(self): self.__default_connect_simple() # Test blender makes only one new blend node. result = self.blender.run( self.a.get_atoms_by_type(types.Node), self.a.add_node(types.ConceptNode, "my-config") ) assert_equal(len(result), 1) # Test blender makes new blend node correctly. blended_node = result[0] for link in blended_node.incoming: dst_nodes_name = map(lambda atom: atom.name, link.out) if "metal" in dst_nodes_name: # A. Not duplicated link. assert_almost_equal(link.tv.mean, 0.6) assert_almost_equal(link.tv.confidence, 0.8) elif "move" in dst_nodes_name: # B. Duplicated, not conflicted link. # (0.9 * 0.8 + 0.7 * 0.9) / (0.8 + 0.9) = 0.794117 assert_less_equal(link.tv.mean, 0.81) assert_greater_equal(link.tv.mean, 0.79) # (0.8 + 0.9) / 2 = 0.85 assert_less_equal(link.tv.confidence, 0.86) assert_greater_equal(link.tv.confidence, 0.84) elif "vehicle" in dst_nodes_name: # C.1 Duplicated, conflicted link. # (0.9 * 0.8 + 0.1 * 0.9) / (0.8 + 0.9) = 0.476471 assert_less_equal(link.tv.mean, 0.48) assert_greater_equal(link.tv.mean, 0.46) # (0.8 + 0.9) / 2 = 0.85 assert_less_equal(link.tv.confidence, 0.86) assert_greater_equal(link.tv.confidence, 0.84) elif "person" in dst_nodes_name: # C.2 Duplicated, conflicted link. # (0.1 * 0.8 + 0.8 * 0.9) / (0.8 + 0.9) = 0.470588 assert_less_equal(link.tv.mean, 0.48) assert_greater_equal(link.tv.mean, 0.46) # (0.8 + 0.9) / 2 = 0.85 assert_less_equal(link.tv.confidence, 0.86) assert_greater_equal(link.tv.confidence, 0.84) """ 2.4.2. ConnectConflictRandom tests. """ def __default_connect_conflict_random(self): self.a.add_link( types.InheritanceLink, [ self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "default-config") ] ) self.a.add_link( types.ExecutionLink, [ self.a.add_node(types.SchemaNode, "BLEND:atoms-chooser"), self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "ChooseAll") ] ) self.a.add_link( types.ExecutionLink, [ self.a.add_node(types.SchemaNode, "BLEND:blending-decider"), self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "DecideBestSTI") ] ) self.a.add_link( types.ExecutionLink, [ self.a.add_node(types.SchemaNode, "BLEND:link-connector"), self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "ConnectConflictRandom") ] ) def test_connect_conflict_random(self): self.__default_connect_conflict_random() # Test blender makes only one new blend node. result = self.blender.run( self.a.get_atoms_by_type(types.Node), self.a.add_node(types.ConceptNode, "my-config") ) assert_equal(len(result), 1) # Test blender makes new blend node correctly. blended_node = result[0] for link in blended_node.incoming: dst_nodes_name = map(lambda atom: atom.name, link.out) if "metal" in dst_nodes_name: # A. Not duplicated link. assert_almost_equal(link.tv.mean, 0.6) assert_almost_equal(link.tv.confidence, 0.8) elif "move" in dst_nodes_name: # B. Duplicated, not conflicted link. # (0.9 * 0.8 + 0.7 * 0.9) / (0.8 + 0.9) = 0.794117 assert_less_equal(link.tv.mean, 0.81) assert_greater_equal(link.tv.mean, 0.79) # (0.8 + 0.9) / 2 = 0.85 assert_less_equal(link.tv.confidence, 0.86) assert_greater_equal(link.tv.confidence, 0.84) elif "vehicle" in dst_nodes_name: # C.1 Duplicated, conflicted link. if "car" in dst_nodes_name: # Randomly selected. - car's link - (0.9, 0.8) assert_equal(link.tv.mean, 0.9) assert_equal(link.tv.confidence, 0.8) elif "man" in dst_nodes_name: # Randomly selected. - man's link - (0.1, 0.9) assert_equal(link.tv.mean, 0.1) assert_equal(link.tv.confidence, 0.9) elif "person" in dst_nodes_name: # C.2 Duplicated, conflicted link. if "car" in dst_nodes_name: # Randomly selected. - car's link - (0.1, 0.8) assert_equal(link.tv.mean, 0.1) assert_equal(link.tv.confidence, 0.8) elif "man" in dst_nodes_name: # Randomly selected. - man's link - (0.8, 0.9) assert_equal(link.tv.mean, 0.8) assert_equal(link.tv.confidence, 0.9) def test_connect_conflict_random_with_strength_diff_limit(self): self.__default_connect_conflict_random() # Test blender thinks links are conflict # if they have difference value above 0.9 of strength. connect_strength_diff_limit_link = self.a.add_link( types.ExecutionLink, [ self.a.add_node(types.SchemaNode, "BLEND:connect-strength-diff-limit" ), self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "0.9") ] ) # Test blender makes only one new blend node. result = self.blender.run( self.a.get_atoms_by_type(types.Node), self.a.add_node(types.ConceptNode, "my-config") ) self.a.remove(connect_strength_diff_limit_link) assert_equal(len(result), 1) # Test blender makes new blend node correctly. blended_node = result[0] # There not exists conflict links # because threshold of strength value was set in 0.9. for link in blended_node.incoming: dst_nodes_name = map(lambda atom: atom.name, link.out) if "metal" in dst_nodes_name: # A. Not duplicated link. assert_almost_equal(link.tv.mean, 0.6) assert_almost_equal(link.tv.confidence, 0.8) elif "move" in dst_nodes_name: # B. Duplicated, not conflicted link. # (0.9 * 0.8 + 0.7 * 0.9) / (0.8 + 0.9) = 0.794117 assert_less_equal(link.tv.mean, 0.81) assert_greater_equal(link.tv.mean, 0.79) # (0.8 + 0.9) / 2 = 0.85 assert_less_equal(link.tv.confidence, 0.86) assert_greater_equal(link.tv.confidence, 0.84) elif "vehicle" in dst_nodes_name: # C.1 Duplicated, conflicted link. # (0.9 * 0.8 + 0.1 * 0.9) / (0.8 + 0.9) = 0.476471 assert_less_equal(link.tv.mean, 0.48) assert_greater_equal(link.tv.mean, 0.46) # (0.8 + 0.9) / 2 = 0.85 assert_less_equal(link.tv.confidence, 0.86) assert_greater_equal(link.tv.confidence, 0.84) elif "person" in dst_nodes_name: # C.2 Duplicated, conflicted link. # (0.1 * 0.8 + 0.8 * 0.9) / (0.8 + 0.9) = 0.470588 assert_less_equal(link.tv.mean, 0.48) assert_greater_equal(link.tv.mean, 0.46) # (0.8 + 0.9) / 2 = 0.85 assert_less_equal(link.tv.confidence, 0.86) assert_greater_equal(link.tv.confidence, 0.84) # Test blender thinks links are conflict # if they have difference value above 0.01 of strength. connect_strength_diff_limit_link = self.a.add_link( types.ExecutionLink, [ self.a.add_node(types.SchemaNode, "BLEND:connect-strength-diff-limit" ), self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "0.01") ] ) # Test blender makes only one new blend node. result = self.blender.run( self.a.get_atoms_by_type(types.Node), self.a.add_node(types.ConceptNode, "my-config") ) self.a.remove(connect_strength_diff_limit_link) assert_equal(len(result), 1) # Test blender makes new blend node correctly. blended_node = result[0] # There exists 3 conflict links # because threshold of strength value was set in 0.01. for link in blended_node.incoming: dst_nodes_name = map(lambda atom: atom.name, link.out) if "metal" in dst_nodes_name: # A. Not duplicated link. assert_almost_equal(link.tv.mean, 0.6) assert_almost_equal(link.tv.confidence, 0.8) elif "move" in dst_nodes_name: # B. Duplicated, not conflicted link. if "car" in dst_nodes_name: # Randomly selected. - car's link - (0.9, 0.8) assert_equal(link.tv.mean, 0.9) assert_equal(link.tv.confidence, 0.8) elif "man" in dst_nodes_name: # Randomly selected. - man's link - (0.7, 0.9) assert_equal(link.tv.mean, 0.7) assert_equal(link.tv.confidence, 0.9) elif "vehicle" in dst_nodes_name: # C.1 Duplicated, conflicted link. if "car" in dst_nodes_name: # Randomly selected. - car's link - (0.9, 0.8) assert_equal(link.tv.mean, 0.9) assert_equal(link.tv.confidence, 0.8) elif "man" in dst_nodes_name: # Randomly selected. - man's link - (0.1, 0.9) assert_equal(link.tv.mean, 0.1) assert_equal(link.tv.confidence, 0.9) elif "person" in dst_nodes_name: # C.2 Duplicated, conflicted link. if "car" in dst_nodes_name: # Randomly selected. - car's link - (0.1, 0.8) assert_equal(link.tv.mean, 0.1) assert_equal(link.tv.confidence, 0.8) elif "man" in dst_nodes_name: # Randomly selected. - man's link - (0.8, 0.9) assert_equal(link.tv.mean, 0.8) assert_equal(link.tv.confidence, 0.9) def test_connect_conflict_random_with_confidence_above_limit(self): self.__default_connect_conflict_random() # Test blender thinks links are conflict # if they have confidence value above 0.9 both. connect_strength_diff_limit_link = self.a.add_link( types.ExecutionLink, [ self.a.add_node(types.SchemaNode, "BLEND:connect-confidence-above-limit" ), self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "0.9") ] ) # Test blender makes only one new blend node. result = self.blender.run( self.a.get_atoms_by_type(types.Node), self.a.add_node(types.ConceptNode, "my-config") ) self.a.remove(connect_strength_diff_limit_link) assert_equal(len(result), 1) # Test blender makes new blend node correctly. blended_node = result[0] # There not exists conflict links # because threshold of confidence value was set in 0.9. for link in blended_node.incoming: dst_nodes_name = map(lambda atom: atom.name, link.out) if "metal" in dst_nodes_name: # A. Not duplicated link. assert_almost_equal(link.tv.mean, 0.6) assert_almost_equal(link.tv.confidence, 0.8) elif "move" in dst_nodes_name: # B. Duplicated, not conflicted link. # (0.9 * 0.8 + 0.7 * 0.9) / (0.8 + 0.9) = 0.794117 assert_less_equal(link.tv.mean, 0.81) assert_greater_equal(link.tv.mean, 0.79) # (0.8 + 0.9) / 2 = 0.85 assert_less_equal(link.tv.confidence, 0.86) assert_greater_equal(link.tv.confidence, 0.84) elif "vehicle" in dst_nodes_name: # C.1 Duplicated, conflicted link. # (0.9 * 0.8 + 0.1 * 0.9) / (0.8 + 0.9) = 0.476471 assert_less_equal(link.tv.mean, 0.48) assert_greater_equal(link.tv.mean, 0.46) # (0.8 + 0.9) / 2 = 0.85 assert_less_equal(link.tv.confidence, 0.86) assert_greater_equal(link.tv.confidence, 0.84) elif "person" in dst_nodes_name: # C.2 Duplicated, conflicted link. # (0.1 * 0.8 + 0.8 * 0.9) / (0.8 + 0.9) = 0.470588 assert_less_equal(link.tv.mean, 0.48) assert_greater_equal(link.tv.mean, 0.46) # (0.8 + 0.9) / 2 = 0.85 assert_less_equal(link.tv.confidence, 0.86) assert_greater_equal(link.tv.confidence, 0.84) # Test blender thinks links are conflict # if they have confidence value above 0.4 both. connect_strength_diff_limit_link = self.a.add_link( types.ExecutionLink, [ self.a.add_node(types.SchemaNode, "BLEND:connect-confidence-above-limit" ), self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "0.4") ] ) # Test blender makes only one new blend node. result = self.blender.run( self.a.get_atoms_by_type(types.Node), self.a.add_node(types.ConceptNode, "my-config") ) self.a.remove(connect_strength_diff_limit_link) assert_equal(len(result), 1) # Test blender makes new blend node correctly. blended_node = result[0] # There exists 2 conflict links # because threshold of strength value was set in 0.4. for link in blended_node.incoming: dst_nodes_name = map(lambda atom: atom.name, link.out) if "metal" in dst_nodes_name: # A. Not duplicated link. assert_almost_equal(link.tv.mean, 0.6) assert_almost_equal(link.tv.confidence, 0.8) elif "move" in dst_nodes_name: # B. Duplicated, not conflicted link. # (0.9 * 0.8 + 0.7 * 0.9) / (0.8 + 0.9) = 0.794117 assert_less_equal(link.tv.mean, 0.81) assert_greater_equal(link.tv.mean, 0.79) # (0.8 + 0.9) / 2 = 0.85 assert_less_equal(link.tv.confidence, 0.86) assert_greater_equal(link.tv.confidence, 0.84) elif "vehicle" in dst_nodes_name: # C.1 Duplicated, conflicted link. if "car" in dst_nodes_name: # Randomly selected. - car's link - (0.9, 0.8) assert_equal(link.tv.mean, 0.9) assert_equal(link.tv.confidence, 0.8) elif "man" in dst_nodes_name: # Randomly selected. - man's link - (0.1, 0.9) assert_equal(link.tv.mean, 0.1) assert_equal(link.tv.confidence, 0.9) elif "person" in dst_nodes_name: # C.2 Duplicated, conflicted link. if "car" in dst_nodes_name: # Randomly selected. - car's link - (0.1, 0.8) assert_equal(link.tv.mean, 0.1) assert_equal(link.tv.confidence, 0.8) elif "man" in dst_nodes_name: # Randomly selected. - man's link - (0.8, 0.9) assert_equal(link.tv.mean, 0.8) assert_equal(link.tv.confidence, 0.9) """ 2.4.3. ConnectConflictAllViable tests. """ def __default_connect_conflict_all_viable(self): self.a.add_link( types.InheritanceLink, [ self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "default-config") ] ) self.a.add_link( types.ExecutionLink, [ self.a.add_node(types.SchemaNode, "BLEND:atoms-chooser"), self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "ChooseAll") ] ) self.a.add_link( types.ExecutionLink, [ self.a.add_node(types.SchemaNode, "BLEND:blending-decider"), self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "DecideBestSTI") ] ) self.a.add_link( types.ExecutionLink, [ self.a.add_node(types.SchemaNode, "BLEND:link-connector"), self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "ConnectConflictAllViable") ] ) def test_connect_conflict_all_viable(self): self.__default_connect_conflict_all_viable() # Test blender makes all viable new blend nodes = 2^2 = 4. result = self.blender.run( self.a.get_atoms_by_type(types.Node), self.a.add_node(types.ConceptNode, "my-config") ) assert_equal(len(result), 4) def test_connect_conflict_all_viable_with_strength_diff_limit(self): self.__default_connect_conflict_all_viable() # Test blender thinks links are conflict # if they have difference value above 0.9 of strength. connect_strength_diff_limit_link = self.a.add_link( types.ExecutionLink, [ self.a.add_node(types.SchemaNode, "BLEND:connect-strength-diff-limit" ), self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "0.9") ] ) # Test blender makes only one new blend node. result = self.blender.run( self.a.get_atoms_by_type(types.Node), self.a.add_node(types.ConceptNode, "my-config") ) self.a.remove(connect_strength_diff_limit_link) assert_equal(len(result), 1) # Test blender thinks links are conflict # if they have difference value above 0.01 of strength. connect_strength_diff_limit_link = self.a.add_link( types.ExecutionLink, [ self.a.add_node(types.SchemaNode, "BLEND:connect-strength-diff-limit" ), self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "0.01") ] ) # Test blender makes all viable new blend nodes = 2^3 = 8. result = self.blender.run( self.a.get_atoms_by_type(types.Node), self.a.add_node(types.ConceptNode, "my-config") ) self.a.remove(connect_strength_diff_limit_link) assert_equal(len(result), 8) def test_connect_conflict_all_viable_with_confidence_above_limit(self): self.__default_connect_conflict_all_viable() # Test blender thinks links are conflict # if they have confidence value above 0.9 both. connect_strength_diff_limit_link = self.a.add_link( types.ExecutionLink, [ self.a.add_node(types.SchemaNode, "BLEND:connect-strength-diff-limit" ), self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "0.9") ] ) # Test blender makes only one new blend node. result = self.blender.run( self.a.get_atoms_by_type(types.Node), self.a.add_node(types.ConceptNode, "my-config") ) self.a.remove(connect_strength_diff_limit_link) assert_equal(len(result), 1) # Test blender thinks links are conflict # if they have confidence value above 0.4 both. connect_strength_diff_limit_link = self.a.add_link( types.ExecutionLink, [ self.a.add_node(types.SchemaNode, "BLEND:connect-strength-diff-limit" ), self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "0.4") ] ) # Test blender makes all viable new blend nodes = 2^2 = 4. result = self.blender.run( self.a.get_atoms_by_type(types.Node), self.a.add_node(types.ConceptNode, "my-config") ) self.a.remove(connect_strength_diff_limit_link) assert_equal(len(result), 4) """ 2.4.4. ConnectConflictInteractionInformation tests. """ def __default_connect_conflict_interaction_information(self): self.a.add_link( types.InheritanceLink, [ self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "default-config") ] ) self.a.add_link( types.ExecutionLink, [ self.a.add_node(types.SchemaNode, "BLEND:atoms-chooser"), self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "ChooseNull") ] ) self.a.add_link( types.ExecutionLink, [ self.a.add_node(types.SchemaNode, "BLEND:blending-decider"), self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "DecideNull") ] ) self.a.add_link( types.ExecutionLink, [ self.a.add_node(types.SchemaNode, "BLEND:link-connector"), self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "ConnectConflictInteractionInformation" ) ] ) self.a.add_link( types.ExecutionLink, [ self.a.add_node(types.SchemaNode, "BLEND:connect-check-type"), self.a.add_node(types.ConceptNode, "my-config"), self.a.add_node(types.ConceptNode, "SimilarityLink") ] ) def test_connect_conflict_interaction_information(self): self.__default_connect_conflict_interaction_information() # Test blender makes only one new blend node. result = self.blender.run( [ self.a.add_node(types.ConceptNode, "car"), self.a.add_node(types.ConceptNode, "man") ], self.a.add_node(types.ConceptNode, "my-config") ) assert_equal(len(result), 1) # Test blender makes new blend node correctly. blended_node = result[0] # In this test case, interaction information algorithm says # a link set includes 'move, metal, person, vehicle' is most surprising. # # [Selected]: move, metal, person, vehicle, : 0.643179893494 # move, metal, person, : 0.488187074661 # move, metal, vehicle, : 0.4154522717 # move, metal, : 0.0 dst_nodes_name = list() for link in blended_node.incoming: dst_nodes_name.extend(map(lambda atom: atom.name, link.out)) assert_in("move", dst_nodes_name) assert_in("metal", dst_nodes_name) assert_in("person", dst_nodes_name) assert_in("vehicle", dst_nodes_name)
agpl-3.0
follow99/django
django/views/decorators/cache.py
586
2304
from functools import wraps from django.middleware.cache import CacheMiddleware from django.utils.cache import add_never_cache_headers, patch_cache_control from django.utils.decorators import ( available_attrs, decorator_from_middleware_with_args, ) def cache_page(*args, **kwargs): """ Decorator for views that tries getting the page from the cache and populates the cache if the page isn't in the cache yet. The cache is keyed by the URL and some data from the headers. Additionally there is the key prefix that is used to distinguish different cache areas in a multi-site setup. You could use the get_current_site().domain, for example, as that is unique across a Django project. Additionally, all headers from the response's Vary header will be taken into account on caching -- just like the middleware does. """ # We also add some asserts to give better error messages in case people are # using other ways to call cache_page that no longer work. if len(args) != 1 or callable(args[0]): raise TypeError("cache_page has a single mandatory positional argument: timeout") cache_timeout = args[0] cache_alias = kwargs.pop('cache', None) key_prefix = kwargs.pop('key_prefix', None) if kwargs: raise TypeError("cache_page has two optional keyword arguments: cache and key_prefix") return decorator_from_middleware_with_args(CacheMiddleware)( cache_timeout=cache_timeout, cache_alias=cache_alias, key_prefix=key_prefix ) def cache_control(**kwargs): def _cache_controller(viewfunc): @wraps(viewfunc, assigned=available_attrs(viewfunc)) def _cache_controlled(request, *args, **kw): response = viewfunc(request, *args, **kw) patch_cache_control(response, **kwargs) return response return _cache_controlled return _cache_controller def never_cache(view_func): """ Decorator that adds headers to a response so that it will never be cached. """ @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view_func(request, *args, **kwargs): response = view_func(request, *args, **kwargs) add_never_cache_headers(response) return response return _wrapped_view_func
bsd-3-clause
bixbydev/Bixby
google/gdata/test_config.py
72
17854
#!/usr/bin/env python # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest import getpass import inspect import atom.mock_http_core import gdata.gauth """Loads configuration for tests which connect to Google servers. Settings used in tests are stored in a ConfigCollection instance in this module called options. If your test needs to get a test related setting, use import gdata.test_config option_value = gdata.test_config.options.get_value('x') The above will check the command line for an '--x' argument, and if not found will either use the default value for 'x' or prompt the user to enter one. Your test can override the value specified by the user by performing: gdata.test_config.options.set_value('x', 'y') If your test uses a new option which you would like to allow the user to specify on the command line or via a prompt, you can use the register_option method as follows: gdata.test_config.options.register( 'option_name', 'Prompt shown to the user', secret=False #As for password. 'This is the description of the option, shown when help is requested.', 'default value, provide only if you do not want the user to be prompted') """ class Option(object): def __init__(self, name, prompt, secret=False, description=None, default=None): self.name = name self.prompt = prompt self.secret = secret self.description = description self.default = default def get(self): value = self.default # Check for a command line parameter. for i in xrange(len(sys.argv)): if sys.argv[i].startswith('--%s=' % self.name): value = sys.argv[i].split('=')[1] elif sys.argv[i] == '--%s' % self.name: value = sys.argv[i + 1] # If the param was not on the command line, ask the user to input the # value. # In order for this to prompt the user, the default value for the option # must be None. if value is None: prompt = '%s: ' % self.prompt if self.secret: value = getpass.getpass(prompt) else: print 'You can specify this on the command line using --%s' % self.name value = raw_input(prompt) return value class ConfigCollection(object): def __init__(self, options=None): self.options = options or {} self.values = {} def register_option(self, option): self.options[option.name] = option def register(self, *args, **kwargs): self.register_option(Option(*args, **kwargs)) def get_value(self, option_name): if option_name in self.values: return self.values[option_name] value = self.options[option_name].get() if value is not None: self.values[option_name] = value return value def set_value(self, option_name, value): self.values[option_name] = value def render_usage(self): message_parts = [] for opt_name, option in self.options.iteritems(): message_parts.append('--%s: %s' % (opt_name, option.description)) return '\n'.join(message_parts) options = ConfigCollection() # Register the default options. options.register( 'username', 'Please enter the email address of your test account', description=('The email address you want to sign in with. ' 'Make sure this is a test account as these tests may edit' ' or delete data.')) options.register( 'password', 'Please enter the password for your test account', secret=True, description='The test account password.') options.register( 'clearcache', 'Delete cached data? (enter true or false)', description=('If set to true, any temporary files which cache test' ' requests and responses will be deleted.'), default='true') options.register( 'savecache', 'Save requests and responses in a temporary file? (enter true or false)', description=('If set to true, requests to the server and responses will' ' be saved in temporary files.'), default='false') options.register( 'runlive', 'Run the live tests which contact the server? (enter true or false)', description=('If set to true, the tests will make real HTTP requests to' ' the servers. This slows down test execution and may' ' modify the users data, be sure to use a test account.'), default='true') options.register( 'host', 'Run the live tests against the given host', description='Examples: docs.google.com, spreadsheets.google.com, etc.', default='') options.register( 'ssl', 'Run the live tests over SSL (enter true or false)', description='If set to true, all tests will be performed over HTTPS (SSL)', default='false') options.register( 'clean', 'Clean ALL data first before and after each test (enter true or false)', description='If set to true, all tests will remove all data (DANGEROUS)', default='false') options.register( 'appsusername', 'Please enter the email address of your test Apps domain account', description=('The email address you want to sign in with. ' 'Make sure this is a test account on your Apps domain as ' 'these tests may edit or delete data.')) options.register( 'appspassword', 'Please enter the password for your test Apps domain account', secret=True, description='The test Apps account password.') # Other options which may be used if needed. BLOG_ID_OPTION = Option( 'blogid', 'Please enter the ID of your test blog', description=('The blog ID for the blog which should have test posts added' ' to it. Example 7682659670455539811')) TEST_IMAGE_LOCATION_OPTION = Option( 'imgpath', 'Please enter the full path to a test image to upload', description=('This test image will be uploaded to a service which' ' accepts a media file, it must be a jpeg.')) SPREADSHEET_ID_OPTION = Option( 'spreadsheetid', 'Please enter the ID of a spreadsheet to use in these tests', description=('The spreadsheet ID for the spreadsheet which should be' ' modified by theses tests.')) APPS_DOMAIN_OPTION = Option( 'appsdomain', 'Please enter your Google Apps domain', description=('The domain the Google Apps is hosted on or leave blank' ' if n/a')) SITES_NAME_OPTION = Option( 'sitename', 'Please enter name of your Google Site', description='The webspace name of the Site found in its URL.') PROJECT_NAME_OPTION = Option( 'project_name', 'Please enter the name of your project hosting project', description=('The name of the project which should have test issues added' ' to it. Example gdata-python-client')) ISSUE_ASSIGNEE_OPTION = Option( 'issue_assignee', 'Enter the email address of the target owner of the updated issue.', description=('The email address of the user a created issue\'s owner will ' ' become. Example testuser2@gmail.com')) GA_TABLE_ID = Option( 'table_id', 'Enter the Table ID of the Google Analytics profile to test', description=('The Table ID of the Google Analytics profile to test.' ' Example ga:1174')) TARGET_USERNAME_OPTION = Option( 'targetusername', 'Please enter the username (without domain) of the user which will be' ' affected by the tests', description=('The username of the user to be tested')) YT_DEVELOPER_KEY_OPTION = Option( 'developerkey', 'Please enter your YouTube developer key', description=('The YouTube developer key for your account')) YT_CLIENT_ID_OPTION = Option( 'clientid', 'Please enter your YouTube client ID', description=('The YouTube client ID for your account')) YT_VIDEO_ID_OPTION= Option( 'videoid', 'Please enter the ID of a YouTube video you uploaded', description=('The video ID of a YouTube video uploaded to your account')) # Functions to inject a cachable HTTP client into a service client. def configure_client(client, case_name, service_name, use_apps_auth=False): """Sets up a mock client which will reuse a saved session. Should be called during setUp of each unit test. Handles authentication to allow the GDClient to make requests which require an auth header. Args: client: a gdata.GDClient whose http_client member should be replaced with a atom.mock_http_core.MockHttpClient so that repeated executions can used cached responses instead of contacting the server. case_name: str The name of the test case class. Examples: 'BloggerTest', 'ContactsTest'. Used to save a session for the ClientLogin auth token request, so the case_name should be reused if and only if the same username, password, and service are being used. service_name: str The service name as used for ClientLogin to identify the Google Data API being accessed. Example: 'blogger', 'wise', etc. use_apps_auth: bool (optional) If set to True, use appsusername and appspassword command-line args instead of username and password respectively. """ # Use a mock HTTP client which will record and replay the HTTP traffic # from these tests. client.http_client = atom.mock_http_core.MockHttpClient() client.http_client.cache_case_name = case_name # Getting the auth token only needs to be done once in the course of test # runs. auth_token_key = '%s_auth_token' % service_name if (auth_token_key not in options.values and options.get_value('runlive') == 'true'): client.http_client.cache_test_name = 'client_login' cache_name = client.http_client.get_cache_file_name() if options.get_value('clearcache') == 'true': client.http_client.delete_session(cache_name) client.http_client.use_cached_session(cache_name) if not use_apps_auth: username = options.get_value('username') password = options.get_value('password') else: username = options.get_value('appsusername') password = options.get_value('appspassword') auth_token = client.client_login(username, password, case_name, service=service_name) options.values[auth_token_key] = gdata.gauth.token_to_blob(auth_token) if client.alt_auth_service is not None: options.values[client.alt_auth_service] = gdata.gauth.token_to_blob( client.alt_auth_token) client.http_client.close_session() # Allow a config auth_token of False to prevent the client's auth header # from being modified. if auth_token_key in options.values: client.auth_token = gdata.gauth.token_from_blob( options.values[auth_token_key]) if client.alt_auth_service is not None: client.alt_auth_token = gdata.gauth.token_from_blob( options.values[client.alt_auth_service]) if options.get_value('host'): client.host = options.get_value('host') def configure_cache(client, test_name): """Loads or begins a cached session to record HTTP traffic. Should be called at the beginning of each test method. Args: client: a gdata.GDClient whose http_client member has been replaced with a atom.mock_http_core.MockHttpClient so that repeated executions can used cached responses instead of contacting the server. test_name: str The name of this test method. Examples: 'TestClass.test_x_works', 'TestClass.test_crud_operations'. This is used to name the recording of the HTTP requests and responses, so it should be unique to each test method in the test case. """ # Auth token is obtained in configure_client which is called as part of # setUp. client.http_client.cache_test_name = test_name cache_name = client.http_client.get_cache_file_name() if options.get_value('clearcache') == 'true': client.http_client.delete_session(cache_name) client.http_client.use_cached_session(cache_name) def close_client(client): """Saves the recoded responses to a temp file if the config file allows. This should be called in the unit test's tearDown method. Checks to see if the 'savecache' option is set to 'true', to make sure we only save sessions to repeat if the user desires. """ if client and options.get_value('savecache') == 'true': # If this was a live request, save the recording. client.http_client.close_session() def configure_service(service, case_name, service_name): """Sets up a mock GDataService v1 client to reuse recorded sessions. Should be called during setUp of each unit test. This is a duplicate of configure_client, modified to handle old v1 service classes. """ service.http_client.v2_http_client = atom.mock_http_core.MockHttpClient() service.http_client.v2_http_client.cache_case_name = case_name # Getting the auth token only needs to be done once in the course of test # runs. auth_token_key = 'service_%s_auth_token' % service_name if (auth_token_key not in options.values and options.get_value('runlive') == 'true'): service.http_client.v2_http_client.cache_test_name = 'client_login' cache_name = service.http_client.v2_http_client.get_cache_file_name() if options.get_value('clearcache') == 'true': service.http_client.v2_http_client.delete_session(cache_name) service.http_client.v2_http_client.use_cached_session(cache_name) service.ClientLogin(options.get_value('username'), options.get_value('password'), service=service_name, source=case_name) options.values[auth_token_key] = service.GetClientLoginToken() service.http_client.v2_http_client.close_session() if auth_token_key in options.values: service.SetClientLoginToken(options.values[auth_token_key]) def configure_service_cache(service, test_name): """Loads or starts a session recording for a v1 Service object. Duplicates the behavior of configure_cache, but the target for this function is a v1 Service object instead of a v2 Client. """ service.http_client.v2_http_client.cache_test_name = test_name cache_name = service.http_client.v2_http_client.get_cache_file_name() if options.get_value('clearcache') == 'true': service.http_client.v2_http_client.delete_session(cache_name) service.http_client.v2_http_client.use_cached_session(cache_name) def close_service(service): if service and options.get_value('savecache') == 'true': # If this was a live request, save the recording. service.http_client.v2_http_client.close_session() def build_suite(classes): """Creates a TestSuite for all unit test classes in the list. Assumes that each of the classes in the list has unit test methods which begin with 'test'. Calls unittest.makeSuite. Returns: A new unittest.TestSuite containing a test suite for all classes. """ suites = [unittest.makeSuite(a_class, 'test') for a_class in classes] return unittest.TestSuite(suites) def check_data_classes(test, classes): import inspect for data_class in classes: test.assert_(data_class.__doc__ is not None, 'The class %s should have a docstring' % data_class) if hasattr(data_class, '_qname'): qname_versions = None if isinstance(data_class._qname, tuple): qname_versions = data_class._qname else: qname_versions = (data_class._qname,) for versioned_qname in qname_versions: test.assert_(isinstance(versioned_qname, str), 'The class %s has a non-string _qname' % data_class) test.assert_(not versioned_qname.endswith('}'), 'The _qname for class %s is only a namespace' % ( data_class)) for attribute_name, value in data_class.__dict__.iteritems(): # Ignore all elements that start with _ (private members) if not attribute_name.startswith('_'): try: if not (isinstance(value, str) or inspect.isfunction(value) or (isinstance(value, list) and issubclass(value[0], atom.core.XmlElement)) or type(value) == property # Allow properties. or inspect.ismethod(value) # Allow methods. or inspect.ismethoddescriptor(value) # Allow method descriptors. # staticmethod et al. or issubclass(value, atom.core.XmlElement)): test.fail( 'XmlElement member should have an attribute, XML class,' ' or list of XML classes as attributes.') except TypeError: test.fail('Element %s in %s was of type %s' % ( attribute_name, data_class._qname, type(value))) def check_clients_with_auth(test, classes): for client_class in classes: test.assert_(hasattr(client_class, 'api_version')) test.assert_(isinstance(client_class.auth_service, (str, unicode, int))) test.assert_(hasattr(client_class, 'auth_service')) test.assert_(isinstance(client_class.auth_service, (str, unicode))) test.assert_(hasattr(client_class, 'auth_scopes')) test.assert_(isinstance(client_class.auth_scopes, (list, tuple)))
gpl-3.0
chfoo/fogchamp
util/csv2json.py
1
4170
'''Convert CSV files into JSON files needed for the visualizer page.''' import argparse import json import os import functools from util.readers.addarash1 import AddarashReader from util.readers.bulbapedia import BulbapediaReader from util.readers.chfoo import ChfooReader from util.readers.editornotes import EditorNotesReader from util.readers.nkekev import NkekevReader from util.readers.pokedex import PokedexReader def main(): arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--output-dir', default='./') arg_parser.add_argument('--metadata-dir', default='metadata/') args = arg_parser.parse_args() nkekev_dir = os.path.join(args.metadata_dir, 'nkekev') chfoo_dir = os.path.join(args.metadata_dir, 'chfoo') addarash1_dir = os.path.join(args.metadata_dir, 'addarash1') pokedex_dir = os.path.join(args.metadata_dir, 'pokedex', 'pokedex', 'data', 'csv') bulbapedia_dir = os.path.join(args.metadata_dir, 'bulbapedia') editor_notes_dir = os.path.join(args.metadata_dir, 'editor_notes') output_dir = args.output_dir pokedex_reader = PokedexReader(pokedex_dir) nkekev_reader = NkekevReader(nkekev_dir) chfoo_reader = ChfooReader(chfoo_dir) addarash1_reader = AddarashReader(addarash1_dir) bulbapedia_reader = BulbapediaReader(bulbapedia_dir) editor_notes_reader = EditorNotesReader(editor_notes_dir) # Build each Pokemon's stats movesets_funcs = [ ('pbr-2.0', functools.partial( addarash1_reader.read_pbr_2_0, nkekev_reader, chfoo_reader)), ('pbr-gold-1.2-2015-11-07', functools.partial( addarash1_reader.read_pbr_gold_1_2_2015_11_07, nkekev_reader, chfoo_reader)), ('pbr-gold-1.2', functools.partial(addarash1_reader.read_pbr_gold_1_2, nkekev_reader, chfoo_reader)), ('pbr-seel', functools.partial(chfoo_reader.read_pbr_seel, nkekev_reader)), ('pbr-platinum', nkekev_reader.read_pbr_platinum), ('pbr-gold', nkekev_reader.read_pbr_gold), ] for move_slug, func in movesets_funcs: pokemon_stats = {} pokemon_slugs = [] pokemon_types = pokedex_reader.read_pokemon_types() pokemon_weights = pokedex_reader.read_pokemon_weights() for pokemon_stat in func(): slug = pokemon_stat.pop('slug') pokemon_slugs.append(slug) pokemon_stats[slug] = pokemon_stat pokemon_stats[slug]['types'] = pokemon_types[pokemon_stat['number']] pokemon_stats[slug]['weight'] = pokemon_weights[pokemon_stat['number']] json_path = os.path.join(output_dir, '{}.json'.format(move_slug)) with open(json_path, 'w') as file: file.write(json.dumps({ 'stats': pokemon_stats, 'pokemon_slugs': pokemon_slugs }, indent=2, sort_keys=True)) # Build all the moves move_stats = {} for move in pokedex_reader.read_moves(): slug = move.pop('slug') move_stats[slug] = move bulbapedia_reader.downgrade_move_changes(move_stats) editor_notes_reader.add_move_notes(move_stats) json_path = os.path.join(output_dir, 'moves.json') with open(json_path, 'w') as file: file.write(json.dumps(move_stats, indent=2, sort_keys=True)) # Build descriptions and misc abilities = {} for ability in pokedex_reader.read_abilities(): slug = ability.pop('slug') abilities[slug] = ability editor_notes_reader.add_ability_notes(abilities) types_efficacy = pokedex_reader.read_type_efficacy() items = {} for item in pokedex_reader.read_items(): slug = item.pop('slug') items[slug] = item item_renames = bulbapedia_reader.get_item_renames_map() json_path = os.path.join(output_dir, 'descriptions.json') with open(json_path, 'w') as file: file.write(json.dumps({ 'abilities': abilities, 'types_efficacy': types_efficacy, 'items': items, 'item_renames': item_renames, }, indent=2, sort_keys=True)) if __name__ == '__main__': main()
mit
vpelletier/neoppod
neo/lib/event.py
1
9556
# # Copyright (C) 2006-2016 Nexedi SA # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os, thread from time import time from select import epoll, EPOLLIN, EPOLLOUT, EPOLLERR, EPOLLHUP from errno import EAGAIN, EEXIST, EINTR, ENOENT from . import logging from .locking import Lock class EpollEventManager(object): """This class manages connections and events based on epoll(5).""" _timeout = None _trigger_exit = False def __init__(self): self.connection_dict = {} # Initialize a dummy 'unregistered' for the very rare case a registered # connection is closed before the first call to poll. We don't care # leaking a few integers for connections closed between 2 polls. self.unregistered = [] self.reader_set = set() self.writer_set = set() self.epoll = epoll() self._pending_processing = [] self._trigger_fd, w = os.pipe() os.close(w) self._trigger_lock = Lock() def close(self): os.close(self._trigger_fd) for c in self.connection_dict.values(): c.close() del self.__dict__ def getConnectionList(self): # XXX: use index return [x for x in self.connection_dict.itervalues() if not x.isAborted()] def getClientList(self): # XXX: use index return [c for c in self.getConnectionList() if c.isClient()] def getServerList(self): # XXX: use index return [c for c in self.getConnectionList() if c.isServer()] def getConnectionListByUUID(self, uuid): """ Return the connection associated to the UUID, None if the UUID is None, invalid or not found""" # XXX: use index # XXX: consider remove UUID from connection and thus this method if uuid is None: return None result = [] append = result.append for conn in self.getConnectionList(): if conn.getUUID() == uuid: append(conn) return result # epoll_wait always waits for EPOLLERR & EPOLLHUP so we're forced # to unregister when we want to ignore all events for a connection. def register(self, conn, timeout_only=False): fd = conn.getConnector().getDescriptor() self.connection_dict[fd] = conn if timeout_only: self.wakeup() else: self.epoll.register(fd) self.addReader(conn) def unregister(self, conn): new_pending_processing = [x for x in self._pending_processing if x is not conn] # Check that we removed at most one entry from # self._pending_processing . assert len(new_pending_processing) > len(self._pending_processing) - 2 self._pending_processing = new_pending_processing fd = conn.getConnector().getDescriptor() try: del self.connection_dict[fd] self.unregistered.append(fd) self.epoll.unregister(fd) except KeyError: pass except IOError, e: if e.errno != ENOENT: raise else: self.reader_set.discard(fd) self.writer_set.discard(fd) def isIdle(self): return not (self._pending_processing or self.writer_set) def _addPendingConnection(self, conn): pending_processing = self._pending_processing if conn not in pending_processing: pending_processing.append(conn) def poll(self, blocking=1): if not self._pending_processing: # Fetch messages from polled file descriptors self._poll(blocking) if not self._pending_processing: return to_process = self._pending_processing.pop(0) try: to_process.process() finally: # ...and requeue if there are pending messages if to_process.hasPendingMessages(): self._addPendingConnection(to_process) # Non-blocking call: as we handled a packet, we should just offer # poll a chance to fetch & send already-available data, but it must # not delay us. self._poll(0) def _poll(self, blocking): if blocking: timeout = self._timeout timeout_object = self for conn in self.connection_dict.itervalues(): t = conn.getTimeout() if t and (timeout is None or t < timeout): timeout = t timeout_object = conn # Make sure epoll_wait does not return too early, because it has a # granularity of 1ms and Python 2.7 rounds the timeout towards zero. # See also https://bugs.python.org/issue20452 (fixed in Python 3). blocking = .001 + max(0, timeout - time()) if timeout else -1 try: event_list = self.epoll.poll(blocking) except IOError, exc: if exc.errno in (0, EAGAIN): logging.info('epoll.poll triggered undocumented error %r', exc.errno) elif exc.errno != EINTR: raise return if event_list: self.unregistered = unregistered = [] wlist = [] elist = [] for fd, event in event_list: if event & EPOLLIN: conn = self.connection_dict[fd] if conn.readable(): self._addPendingConnection(conn) if event & EPOLLOUT: wlist.append(fd) if event & (EPOLLERR | EPOLLHUP): elist.append(fd) for fd in wlist: if fd not in unregistered: self.connection_dict[fd].writable() for fd in elist: if fd in unregistered: continue try: conn = self.connection_dict[fd] except KeyError: assert fd == self._trigger_fd, fd with self._trigger_lock: self.epoll.unregister(fd) if self._trigger_exit: del self._trigger_exit thread.exit() continue if conn.readable(): self._addPendingConnection(conn) elif blocking > 0: logging.debug('timeout triggered for %r', timeout_object) timeout_object.onTimeout() def onTimeout(self): on_timeout = self._on_timeout del self._on_timeout self._timeout = None on_timeout() def setTimeout(self, *args): self._timeout, self._on_timeout = args def wakeup(self, exit=False): with self._trigger_lock: self._trigger_exit |= exit try: self.epoll.register(self._trigger_fd) except IOError, e: # Ignore if 'wakeup' is called several times in a row. if e.errno != EEXIST: raise def addReader(self, conn): connector = conn.getConnector() assert connector is not None, conn.whoSetConnector() fd = connector.getDescriptor() if fd not in self.reader_set: self.reader_set.add(fd) self.epoll.modify(fd, EPOLLIN | ( fd in self.writer_set and EPOLLOUT)) def removeReader(self, conn): connector = conn.getConnector() assert connector is not None, conn.whoSetConnector() fd = connector.getDescriptor() if fd in self.reader_set: self.reader_set.remove(fd) self.epoll.modify(fd, fd in self.writer_set and EPOLLOUT) def addWriter(self, conn): connector = conn.getConnector() assert connector is not None, conn.whoSetConnector() fd = connector.getDescriptor() if fd not in self.writer_set: self.writer_set.add(fd) self.epoll.modify(fd, EPOLLOUT | ( fd in self.reader_set and EPOLLIN)) def removeWriter(self, conn): connector = conn.getConnector() assert connector is not None, conn.whoSetConnector() fd = connector.getDescriptor() if fd in self.writer_set: self.writer_set.remove(fd) self.epoll.modify(fd, fd in self.reader_set and EPOLLIN) def log(self): logging.info('Event Manager:') logging.info(' Readers: %r', list(self.reader_set)) logging.info(' Writers: %r', list(self.writer_set)) logging.info(' Connections:') pending_set = set(self._pending_processing) for fd, conn in self.connection_dict.items(): logging.info(' %r: %r (pending=%r)', fd, conn, conn in pending_set) # Default to EpollEventManager. EventManager = EpollEventManager
gpl-2.0
cntnboys/410Lab6
build/django/tests/comment_tests/tests/__init__.py
13
2811
from django.contrib.auth.models import User from django.contrib.comments.forms import CommentForm from django.contrib.comments.models import Comment from django.contrib.contenttypes.models import ContentType from django.contrib.sites.models import Site from django.test import TestCase, override_settings from ..models import Article, Author # Shortcut CT = ContentType.objects.get_for_model # Helper base class for comment tests that need data. @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',)) class CommentTestCase(TestCase): fixtures = ["comment_tests"] urls = 'comment_tests.urls_default' def createSomeComments(self): # Two anonymous comments on two different objects c1 = Comment.objects.create( content_type = CT(Article), object_pk = "1", user_name = "Joe Somebody", user_email = "jsomebody@example.com", user_url = "http://example.com/~joe/", comment = "First!", site = Site.objects.get_current(), ) c2 = Comment.objects.create( content_type = CT(Author), object_pk = "1", user_name = "Joe Somebody", user_email = "jsomebody@example.com", user_url = "http://example.com/~joe/", comment = "First here, too!", site = Site.objects.get_current(), ) # Two authenticated comments: one on the same Article, and # one on a different Author user = User.objects.create( username = "frank_nobody", first_name = "Frank", last_name = "Nobody", email = "fnobody@example.com", password = "", is_staff = False, is_active = True, is_superuser = False, ) c3 = Comment.objects.create( content_type = CT(Article), object_pk = "1", user = user, user_url = "http://example.com/~frank/", comment = "Damn, I wanted to be first.", site = Site.objects.get_current(), ) c4 = Comment.objects.create( content_type = CT(Author), object_pk = "2", user = user, user_url = "http://example.com/~frank/", comment = "You get here first, too?", site = Site.objects.get_current(), ) return c1, c2, c3, c4 def getData(self): return { 'name' : 'Jim Bob', 'email' : 'jim.bob@example.com', 'url' : '', 'comment' : 'This is my comment', } def getValidData(self, obj): f = CommentForm(obj) d = self.getData() d.update(f.initial) return d
apache-2.0
jythontools/pip
pip/_vendor/requests/packages/chardet/constants.py
3008
1335
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### _debug = 0 eDetecting = 0 eFoundIt = 1 eNotMe = 2 eStart = 0 eError = 1 eItsMe = 2 SHORTCUT_THRESHOLD = 0.95
mit
ultcoin/UltimateCoin
share/qt/make_spinner.py
4415
1035
#!/usr/bin/env python # W.J. van der Laan, 2011 # Make spinning .mng animation from a .png # Requires imagemagick 6.7+ from __future__ import division from os import path from PIL import Image from subprocess import Popen SRC='img/reload_scaled.png' DST='../../src/qt/res/movies/update_spinner.mng' TMPDIR='/tmp' TMPNAME='tmp-%03i.png' NUMFRAMES=35 FRAMERATE=10.0 CONVERT='convert' CLOCKWISE=True DSIZE=(16,16) im_src = Image.open(SRC) if CLOCKWISE: im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT) def frame_to_filename(frame): return path.join(TMPDIR, TMPNAME % frame) frame_files = [] for frame in xrange(NUMFRAMES): rotation = (frame + 0.5) / NUMFRAMES * 360.0 if CLOCKWISE: rotation = -rotation im_new = im_src.rotate(rotation, Image.BICUBIC) im_new.thumbnail(DSIZE, Image.ANTIALIAS) outfile = frame_to_filename(frame) im_new.save(outfile, 'png') frame_files.append(outfile) p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST]) p.communicate()
mit
kpech21/Greek-Stemmer
tests/lemmatizers/test_verb.py
1
1883
# -*- coding: utf-8 -*- import pytest from greek_stemmer.lemmatizers.verb import stem class TestVerbStem: # rule-set 1: check irregular verbs verb_stem_ruleset1 = [ ('', 'VB', ''), ('ΕΙΜΑΙ', 'VB', 'ΕΙ'), ('ΕΙΜΑΣΤΕ', 'VBS', 'ΕΙ'), ('ΠΩ', 'VB', 'Π'), ('ΖΕΙΤΕ', 'VBS', 'Ζ'), ('ΖΟΥΣΑΜΕ', 'VBDS', 'Ζ'), ('ΔΕΙ', 'VB', 'Δ') ] @pytest.mark.parametrize('word, pos, output', verb_stem_ruleset1) def test_verb_stem_with_ruleset1(self, word, pos, output): assert stem(word, pos) == output # rule-set 2: ACTIVE VOICE, Singular - PASSIVE VOICE, Singular verb_stem_ruleset2 = [ ('', 'VB', ''), ('ΠΑΙΖΕΙ', 'VB', 'ΠΑΙΖ'), ('ΤΡΟΦΟΔΟΤΟΥΜΑΙ', 'VB', 'ΤΡΟΦΟΔΟΤ'), ('ΒΙΑΖΟΣΟΥΝΑ', 'VBD', 'ΒΙΑΖ'), ('ΔΙΑΣΚΕΔΑΖΑ', 'VBD', 'ΔΙΑΣΚΕΔΑΖ'), ('ΤΡΟΦΟΔΟΤΕΙ', 'VBF', 'ΤΡΟΦΟΔΟΤ'), ('ΕΧΩ', 'MD', 'ΕΧ') ] @pytest.mark.parametrize('word, pos, output', verb_stem_ruleset2) def test_verb_stem_with_ruleset2(self, word, pos, output): assert stem(word, pos) == output # rule-set 3: ACTIVE VOICE, Plural - PASSIVE VOICE, Plural verb_stem_ruleset3 = [ ('', 'VBS', ''), ('ΑΠΟΤΕΛΕΙΣΤΕ', 'VBS', 'ΑΠΟΤΕΛ'), ('ΔΕΙΤΕ', 'VBS', 'Δ'), ('ΠΕΡΙΠΟΙΟΝΤΟΥΣΑΝ', 'VBDS', 'ΠΕΡΙΠΟ'), ('ΠΑΙΖΑΝ', 'VBDS', 'ΠΑΙΖ'), ('ΤΡΟΦΟΔΟΤΟΥΝ', 'VBFS', 'ΤΡΟΦΟΔΟΤ'), ('ΟΙΚΕΙΟΠΟΙΟΥΝΤΑΙ', 'VBS', 'ΟΙΚΕΙΟΠΟΙΟΥ') ] @pytest.mark.parametrize('word, pos, output', verb_stem_ruleset3) def test_verb_stem_with_various_ruleset3(self, word, pos, output): assert stem(word, pos) == output
lgpl-3.0
ecrc/girih
scripts/test_pochoir_increasing_grid_size.py
2
4325
#!/usr/bin/env python def run_pochoir_test(dry_run, th, kernel, nx, ny, nz, nt, target_dir, outfile, pinning_cmd, pinning_args): import os import subprocess from string import Template from scripts.utils import ensure_dir job_template=Template( """$pinning_cmd $pinning_args $exec_path $nx $ny $nz $nt | tee $outpath""") #"""echo 'OpenMP Threads: $th' | tee $outpath; $pinning_cmd $pinning_args $exec_path $nx $ny $nz $nt | tee $outpath""") # set the output path target_dir = os.path.join(os.path.abspath("."),target_dir) ensure_dir(target_dir) outpath = os.path.join(target_dir, outfile) # set the executable if(kernel==0): exec_name ='3dfd' elif(kernel==1): exec_name ='3d7pt' elif(kernel==4): exec_name ='3d25pt_var' elif(kernel==5): exec_name ='3d7pt_var' else: raise exec_path = os.path.join(os.path.abspath("."),exec_name) job_cmd = job_template.substitute(th=th, nx=nx, ny=ny, nz=nz, nt=nt, kernel=kernel, outpath=outpath, exec_path=exec_path, pinning_cmd=pinning_cmd, pinning_args=pinning_args) print job_cmd if(dry_run==0): sts = subprocess.call(job_cmd, shell=True) return job_cmd def igs_test(dry_run, target_dir, exp_name, th, group='', params=[]): from scripts.conf.conf import machine_conf, machine_info import itertools # Test using rasonable time # T = scale * size / perf # scale = T*perf/size desired_time = 5 if(machine_info['hostname']=='Haswell_18core'): k_perf_order = {0:1000, 1:2500, 4:200, 5:1000} else: k_perf_order = {0:500, 1:1000, 4:100, 5:600} k_time_scale={} for k, v in k_perf_order.items(): k_time_scale[k] = desired_time*v if(machine_info['hostname']=='Haswell_18core'): increment = 128 kernels_limits = [897, 897, 0, 0, 897, 897] else: increment = 64 kernels_limits = [961, 961, 0, 0, 577, 769] radius = {0:4, 1:1, 4:4, 5:1} points = [64] + list(range(128, 5000, increment)) count=0 for kernel in [0, 1, 4, 5]: for N in points: if (N < kernels_limits[kernel]): key = (kernel, N) if key in params: continue outfile=('pochoir_kernel%d_N%d_%s_%s.txt' % (kernel, N, group, exp_name[-13:])) nt = max(int(k_time_scale[kernel]/(N**3/1e6)), 30) N = N + 2 * radius[kernel] # Pochoir takes the whole size including the halo region # print outfile run_pochoir_test(dry_run=dry_run, th=th, kernel=kernel, nx=N, ny=N, nz=N, nt=nt, outfile=outfile, target_dir=target_dir, pinning_cmd=machine_conf['pinning_cmd'], pinning_args=machine_conf['pinning_args']) count = count+1 return count def main(): from scripts.utils import create_project_tarball, get_stencil_num from scripts.conf.conf import machine_conf, machine_info import os, sys from csv import DictReader import time,datetime dry_run = 1 if len(sys.argv)<2 else int(sys.argv[1]) time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H_%M') exp_name = "pochoir_increasing_grid_size_at_%s_%s" % (machine_info['hostname'], time_stamp) tarball_dir='results/'+exp_name if(dry_run==0): create_project_tarball(tarball_dir, "project_"+exp_name) target_dir='results/' + exp_name # parse the results to find out which of the already exist data = [] data_file = os.path.join('results', 'summary.csv') try: with open(data_file, 'rb') as output_file: raw_data = DictReader(output_file) for k in raw_data: k['stencil'] = get_stencil_num(k) data.append(k) except: pass params = set() for k in data: try: params.add( (k['stencil'], int(k['Global NX'])) ) except: print k raise #update the pinning information to use all cores th = machine_info['n_cores'] count = 0 for group in ['MEM', 'TLB_DATA', 'L2', 'L3', 'DATA']:#, 'ENERGY']: if(machine_info['hostname']=='IVB_10core'): if group=='TLB_DATA': group='TLB' machine_conf['pinning_args'] = " -m -g " + group + " -c " + "%d-%d "%(0, th-1) + '-- numactl --physcpubind=%d-%d'%(0,th-1) # for k in params: print k count = count + igs_test(dry_run, target_dir, exp_name, th=th, params=params, group=group) print "experiments count =" + str(count) if __name__ == "__main__": main()
bsd-3-clause
computersalat/ansible
test/lib/ansible_test/_internal/provider/source/git.py
43
2654
"""Source provider for a content root managed by git version control.""" from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os from ... import types as t from ...git import ( Git, ) from ...encoding import ( to_bytes, ) from ...util import ( SubprocessError, ) from . import ( SourceProvider, ) class GitSource(SourceProvider): """Source provider for a content root managed by git version control.""" @staticmethod def is_content_root(path): # type: (str) -> bool """Return True if the given path is a content root for this provider.""" return os.path.exists(os.path.join(path, '.git')) def get_paths(self, path): # type: (str) -> t.List[str] """Return the list of available content paths under the given path.""" paths = self.__get_paths(path) try: submodule_paths = Git(path).get_submodule_paths() except SubprocessError: if path == self.root: raise # older versions of git require submodule commands to be executed from the top level of the working tree # git version 2.18.1 (centos8) does not have this restriction # git version 1.8.3.1 (centos7) does # fall back to using the top level directory of the working tree only when needed # this avoids penalizing newer git versions with a potentially slower analysis due to additional submodules rel_path = os.path.relpath(path, self.root) + os.path.sep submodule_paths = Git(self.root).get_submodule_paths() submodule_paths = [os.path.relpath(p, rel_path) for p in submodule_paths if p.startswith(rel_path)] for submodule_path in submodule_paths: paths.extend(os.path.join(submodule_path, p) for p in self.__get_paths(os.path.join(path, submodule_path))) # git reports submodule directories as regular files paths = [p for p in paths if p not in submodule_paths] return paths @staticmethod def __get_paths(path): # type: (str) -> t.List[str] """Return the list of available content paths under the given path.""" git = Git(path) paths = git.get_file_names(['--cached', '--others', '--exclude-standard']) deleted_paths = git.get_file_names(['--deleted']) paths = sorted(set(paths) - set(deleted_paths)) # directory symlinks are reported by git as regular files but they need to be treated as directories paths = [path + os.path.sep if os.path.isdir(to_bytes(path)) else path for path in paths] return paths
gpl-3.0
cython-testbed/pandas
pandas/tests/io/parser/test_textreader.py
4
11387
# -*- coding: utf-8 -*- """ Tests the TextReader class in parsers.pyx, which is integral to the C engine in parsers.py """ import pytest from pandas.compat import StringIO, BytesIO, map from pandas import compat import os import sys from numpy import nan import numpy as np from pandas import DataFrame from pandas.io.parsers import (read_csv, TextFileReader) from pandas.util.testing import assert_frame_equal import pandas.util.testing as tm from pandas._libs.parsers import TextReader import pandas._libs.parsers as parser class TestTextReader(object): @pytest.fixture(autouse=True) def setup_method(self, datapath): self.dirpath = datapath('io', 'parser', 'data') self.csv1 = os.path.join(self.dirpath, 'test1.csv') self.csv2 = os.path.join(self.dirpath, 'test2.csv') self.xls1 = os.path.join(self.dirpath, 'test.xls') def test_file_handle(self): with open(self.csv1, 'rb') as f: reader = TextReader(f) reader.read() def test_string_filename(self): reader = TextReader(self.csv1, header=None) reader.read() def test_file_handle_mmap(self): with open(self.csv1, 'rb') as f: reader = TextReader(f, memory_map=True, header=None) reader.read() def test_StringIO(self): with open(self.csv1, 'rb') as f: text = f.read() src = BytesIO(text) reader = TextReader(src, header=None) reader.read() def test_string_factorize(self): # should this be optional? data = 'a\nb\na\nb\na' reader = TextReader(StringIO(data), header=None) result = reader.read() assert len(set(map(id, result[0]))) == 2 def test_skipinitialspace(self): data = ('a, b\n' 'a, b\n' 'a, b\n' 'a, b') reader = TextReader(StringIO(data), skipinitialspace=True, header=None) result = reader.read() tm.assert_numpy_array_equal(result[0], np.array(['a', 'a', 'a', 'a'], dtype=np.object_)) tm.assert_numpy_array_equal(result[1], np.array(['b', 'b', 'b', 'b'], dtype=np.object_)) def test_parse_booleans(self): data = 'True\nFalse\nTrue\nTrue' reader = TextReader(StringIO(data), header=None) result = reader.read() assert result[0].dtype == np.bool_ def test_delimit_whitespace(self): data = 'a b\na\t\t "b"\n"a"\t \t b' reader = TextReader(StringIO(data), delim_whitespace=True, header=None) result = reader.read() tm.assert_numpy_array_equal(result[0], np.array(['a', 'a', 'a'], dtype=np.object_)) tm.assert_numpy_array_equal(result[1], np.array(['b', 'b', 'b'], dtype=np.object_)) def test_embedded_newline(self): data = 'a\n"hello\nthere"\nthis' reader = TextReader(StringIO(data), header=None) result = reader.read() expected = np.array(['a', 'hello\nthere', 'this'], dtype=np.object_) tm.assert_numpy_array_equal(result[0], expected) def test_euro_decimal(self): data = '12345,67\n345,678' reader = TextReader(StringIO(data), delimiter=':', decimal=',', header=None) result = reader.read() expected = np.array([12345.67, 345.678]) tm.assert_almost_equal(result[0], expected) def test_integer_thousands(self): data = '123,456\n12,500' reader = TextReader(StringIO(data), delimiter=':', thousands=',', header=None) result = reader.read() expected = np.array([123456, 12500], dtype=np.int64) tm.assert_almost_equal(result[0], expected) def test_integer_thousands_alt(self): data = '123.456\n12.500' reader = TextFileReader(StringIO(data), delimiter=':', thousands='.', header=None) result = reader.read() expected = DataFrame([123456, 12500]) tm.assert_frame_equal(result, expected) @tm.capture_stderr def test_skip_bad_lines(self): # too many lines, see #2430 for why data = ('a:b:c\n' 'd:e:f\n' 'g:h:i\n' 'j:k:l:m\n' 'l:m:n\n' 'o:p:q:r') reader = TextReader(StringIO(data), delimiter=':', header=None) pytest.raises(parser.ParserError, reader.read) reader = TextReader(StringIO(data), delimiter=':', header=None, error_bad_lines=False, warn_bad_lines=False) result = reader.read() expected = {0: np.array(['a', 'd', 'g', 'l'], dtype=object), 1: np.array(['b', 'e', 'h', 'm'], dtype=object), 2: np.array(['c', 'f', 'i', 'n'], dtype=object)} assert_array_dicts_equal(result, expected) reader = TextReader(StringIO(data), delimiter=':', header=None, error_bad_lines=False, warn_bad_lines=True) reader.read() val = sys.stderr.getvalue() assert 'Skipping line 4' in val assert 'Skipping line 6' in val def test_header_not_enough_lines(self): data = ('skip this\n' 'skip this\n' 'a,b,c\n' '1,2,3\n' '4,5,6') reader = TextReader(StringIO(data), delimiter=',', header=2) header = reader.header expected = [['a', 'b', 'c']] assert header == expected recs = reader.read() expected = {0: np.array([1, 4], dtype=np.int64), 1: np.array([2, 5], dtype=np.int64), 2: np.array([3, 6], dtype=np.int64)} assert_array_dicts_equal(recs, expected) def test_escapechar(self): data = ('\\"hello world\"\n' '\\"hello world\"\n' '\\"hello world\"') reader = TextReader(StringIO(data), delimiter=',', header=None, escapechar='\\') result = reader.read() expected = {0: np.array(['"hello world"'] * 3, dtype=object)} assert_array_dicts_equal(result, expected) def test_eof_has_eol(self): # handling of new line at EOF pass def test_na_substitution(self): pass def test_numpy_string_dtype(self): data = """\ a,1 aa,2 aaa,3 aaaa,4 aaaaa,5""" def _make_reader(**kwds): return TextReader(StringIO(data), delimiter=',', header=None, **kwds) reader = _make_reader(dtype='S5,i4') result = reader.read() assert result[0].dtype == 'S5' ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaaa'], dtype='S5') assert (result[0] == ex_values).all() assert result[1].dtype == 'i4' reader = _make_reader(dtype='S4') result = reader.read() assert result[0].dtype == 'S4' ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaa'], dtype='S4') assert (result[0] == ex_values).all() assert result[1].dtype == 'S4' def test_pass_dtype(self): data = """\ one,two 1,a 2,b 3,c 4,d""" def _make_reader(**kwds): return TextReader(StringIO(data), delimiter=',', **kwds) reader = _make_reader(dtype={'one': 'u1', 1: 'S1'}) result = reader.read() assert result[0].dtype == 'u1' assert result[1].dtype == 'S1' reader = _make_reader(dtype={'one': np.uint8, 1: object}) result = reader.read() assert result[0].dtype == 'u1' assert result[1].dtype == 'O' reader = _make_reader(dtype={'one': np.dtype('u1'), 1: np.dtype('O')}) result = reader.read() assert result[0].dtype == 'u1' assert result[1].dtype == 'O' def test_usecols(self): data = """\ a,b,c 1,2,3 4,5,6 7,8,9 10,11,12""" def _make_reader(**kwds): return TextReader(StringIO(data), delimiter=',', **kwds) reader = _make_reader(usecols=(1, 2)) result = reader.read() exp = _make_reader().read() assert len(result) == 2 assert (result[1] == exp[1]).all() assert (result[2] == exp[2]).all() def test_cr_delimited(self): def _test(text, **kwargs): nice_text = text.replace('\r', '\r\n') result = TextReader(StringIO(text), **kwargs).read() expected = TextReader(StringIO(nice_text), **kwargs).read() assert_array_dicts_equal(result, expected) data = 'a,b,c\r1,2,3\r4,5,6\r7,8,9\r10,11,12' _test(data, delimiter=',') data = 'a b c\r1 2 3\r4 5 6\r7 8 9\r10 11 12' _test(data, delim_whitespace=True) data = 'a,b,c\r1,2,3\r4,5,6\r,88,9\r10,11,12' _test(data, delimiter=',') sample = ('A,B,C,D,E,F,G,H,I,J,K,L,M,N,O\r' 'AAAAA,BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0\r' ',BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0') _test(sample, delimiter=',') data = 'A B C\r 2 3\r4 5 6' _test(data, delim_whitespace=True) data = 'A B C\r2 3\r4 5 6' _test(data, delim_whitespace=True) def test_empty_field_eof(self): data = 'a,b,c\n1,2,3\n4,,' result = TextReader(StringIO(data), delimiter=',').read() expected = {0: np.array([1, 4], dtype=np.int64), 1: np.array(['2', ''], dtype=object), 2: np.array(['3', ''], dtype=object)} assert_array_dicts_equal(result, expected) # GH5664 a = DataFrame([['b'], [nan]], columns=['a'], index=['a', 'c']) b = DataFrame([[1, 1, 1, 0], [1, 1, 1, 0]], columns=list('abcd'), index=[1, 1]) c = DataFrame([[1, 2, 3, 4], [6, nan, nan, nan], [8, 9, 10, 11], [13, 14, nan, nan]], columns=list('abcd'), index=[0, 5, 7, 12]) for _ in range(100): df = read_csv(StringIO('a,b\nc\n'), skiprows=0, names=['a'], engine='c') assert_frame_equal(df, a) df = read_csv(StringIO('1,1,1,1,0\n' * 2 + '\n' * 2), names=list("abcd"), engine='c') assert_frame_equal(df, b) df = read_csv(StringIO('0,1,2,3,4\n5,6\n7,8,9,10,11\n12,13,14'), names=list('abcd'), engine='c') assert_frame_equal(df, c) def test_empty_csv_input(self): # GH14867 df = read_csv(StringIO(), chunksize=20, header=None, names=['a', 'b', 'c']) assert isinstance(df, TextFileReader) def assert_array_dicts_equal(left, right): for k, v in compat.iteritems(left): assert tm.assert_numpy_array_equal(np.asarray(v), np.asarray(right[k]))
bsd-3-clause
ghandiosm/Test
addons/account_budget/__openerp__.py
27
2220
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. { 'name': 'Budgets Management', 'version': '1.0', 'category': 'Accounting & Finance', 'description': """ This module allows accountants to manage analytic and crossovered budgets. ========================================================================== Once the Budgets are defined (in Invoicing/Budgets/Budgets), the Project Managers can set the planned amount on each Analytic Account. The accountant has the possibility to see the total of amount planned for each Budget in order to ensure the total planned is not greater/lower than what he planned for this Budget. Each list of record can also be switched to a graphical view of it. Three reports are available: ---------------------------- 1. The first is available from a list of Budgets. It gives the spreading, for these Budgets, of the Analytic Accounts. 2. The second is a summary of the previous one, it only gives the spreading, for the selected Budgets, of the Analytic Accounts. 3. The last one is available from the Analytic Chart of Accounts. It gives the spreading, for the selected Analytic Accounts of Budgets. """, 'website': 'https://www.odoo.com/page/accounting', 'depends': ['account'], 'data': [ 'security/ir.model.access.csv', 'security/account_budget_security.xml', 'account_budget_view.xml', 'account_budget_report.xml', 'account_budget_workflow.xml', 'wizard/account_budget_analytic_view.xml', 'wizard/account_budget_report_view.xml', 'wizard/account_budget_crossovered_summary_report_view.xml', 'wizard/account_budget_crossovered_report_view.xml', 'views/report_analyticaccountbudget.xml', 'views/report_budget.xml', 'views/report_crossoveredbudget.xml', ], 'demo': ['account_budget_demo.xml', 'account_budget_demo.yml'], 'test': [ '../account/test/account_minimal_test.xml', 'account_budget_demo.yml', 'test/account_budget.yml', 'test/account_budget_report.yml', ], 'installable': True, 'auto_install': False, }
gpl-3.0
BigDataforYou/movie_recommendation_workshop_1
big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langgreekmodel.py
2763
12628
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # 255: Control characters that usually does not exist in any text # 254: Carriage/Return # 253: symbol (punctuation) that does not belong to word # 252: 0 - 9 # Character Mapping Table: Latin7_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40 79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50 253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60 78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90 253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0 253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0 110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0 35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0 124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0 9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0 ) win1253_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40 79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50 253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60 78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90 253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0 253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0 110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0 35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0 124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0 9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0 ) # Model Table: # total sequences: 100% # first 512 sequences: 98.2851% # first 1024 sequences:1.7001% # rest sequences: 0.0359% # negative sequences: 0.0148% GreekLangModel = ( 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0, 3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0, 0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0, 2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0, 0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0, 2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0, 2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0, 0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0, 2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0, 0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0, 3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0, 3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0, 2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0, 2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0, 0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0, 0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0, 0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0, 0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2, 0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0, 0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2, 0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0, 0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2, 0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2, 0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0, 0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2, 0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0, 0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0, 0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, 0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0, 0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0, 0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0, 0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2, 0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0, 0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2, 0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0, 0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2, 0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0, 0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2, 0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0, 0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1, 0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0, 0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2, 0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0, 0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2, 0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2, 0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0, 0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0, 0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1, 0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, 0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0, 0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0, 0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, ) Latin7GreekModel = { 'charToOrderMap': Latin7_CharToOrderMap, 'precedenceMatrix': GreekLangModel, 'mTypicalPositiveRatio': 0.982851, 'keepEnglishLetter': False, 'charsetName': "ISO-8859-7" } Win1253GreekModel = { 'charToOrderMap': win1253_CharToOrderMap, 'precedenceMatrix': GreekLangModel, 'mTypicalPositiveRatio': 0.982851, 'keepEnglishLetter': False, 'charsetName': "windows-1253" } # flake8: noqa
mit
doantranhoang/namebench
nb_third_party/jinja2/constants.py
220
6229
# -*- coding: utf-8 -*- """ jinja.constants ~~~~~~~~~~~~~~~ Various constants. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ #: list of lorem ipsum words used by the lipsum() helper function LOREM_IPSUM_WORDS = u'''\ a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at auctor augue bibendum blandit class commodo condimentum congue consectetuer consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque penatibus per pharetra phasellus placerat platea porta porttitor posuere potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus viverra volutpat vulputate''' #: a dict of all html entities + apos HTML_ENTITIES = { 'AElig': 198, 'Aacute': 193, 'Acirc': 194, 'Agrave': 192, 'Alpha': 913, 'Aring': 197, 'Atilde': 195, 'Auml': 196, 'Beta': 914, 'Ccedil': 199, 'Chi': 935, 'Dagger': 8225, 'Delta': 916, 'ETH': 208, 'Eacute': 201, 'Ecirc': 202, 'Egrave': 200, 'Epsilon': 917, 'Eta': 919, 'Euml': 203, 'Gamma': 915, 'Iacute': 205, 'Icirc': 206, 'Igrave': 204, 'Iota': 921, 'Iuml': 207, 'Kappa': 922, 'Lambda': 923, 'Mu': 924, 'Ntilde': 209, 'Nu': 925, 'OElig': 338, 'Oacute': 211, 'Ocirc': 212, 'Ograve': 210, 'Omega': 937, 'Omicron': 927, 'Oslash': 216, 'Otilde': 213, 'Ouml': 214, 'Phi': 934, 'Pi': 928, 'Prime': 8243, 'Psi': 936, 'Rho': 929, 'Scaron': 352, 'Sigma': 931, 'THORN': 222, 'Tau': 932, 'Theta': 920, 'Uacute': 218, 'Ucirc': 219, 'Ugrave': 217, 'Upsilon': 933, 'Uuml': 220, 'Xi': 926, 'Yacute': 221, 'Yuml': 376, 'Zeta': 918, 'aacute': 225, 'acirc': 226, 'acute': 180, 'aelig': 230, 'agrave': 224, 'alefsym': 8501, 'alpha': 945, 'amp': 38, 'and': 8743, 'ang': 8736, 'apos': 39, 'aring': 229, 'asymp': 8776, 'atilde': 227, 'auml': 228, 'bdquo': 8222, 'beta': 946, 'brvbar': 166, 'bull': 8226, 'cap': 8745, 'ccedil': 231, 'cedil': 184, 'cent': 162, 'chi': 967, 'circ': 710, 'clubs': 9827, 'cong': 8773, 'copy': 169, 'crarr': 8629, 'cup': 8746, 'curren': 164, 'dArr': 8659, 'dagger': 8224, 'darr': 8595, 'deg': 176, 'delta': 948, 'diams': 9830, 'divide': 247, 'eacute': 233, 'ecirc': 234, 'egrave': 232, 'empty': 8709, 'emsp': 8195, 'ensp': 8194, 'epsilon': 949, 'equiv': 8801, 'eta': 951, 'eth': 240, 'euml': 235, 'euro': 8364, 'exist': 8707, 'fnof': 402, 'forall': 8704, 'frac12': 189, 'frac14': 188, 'frac34': 190, 'frasl': 8260, 'gamma': 947, 'ge': 8805, 'gt': 62, 'hArr': 8660, 'harr': 8596, 'hearts': 9829, 'hellip': 8230, 'iacute': 237, 'icirc': 238, 'iexcl': 161, 'igrave': 236, 'image': 8465, 'infin': 8734, 'int': 8747, 'iota': 953, 'iquest': 191, 'isin': 8712, 'iuml': 239, 'kappa': 954, 'lArr': 8656, 'lambda': 955, 'lang': 9001, 'laquo': 171, 'larr': 8592, 'lceil': 8968, 'ldquo': 8220, 'le': 8804, 'lfloor': 8970, 'lowast': 8727, 'loz': 9674, 'lrm': 8206, 'lsaquo': 8249, 'lsquo': 8216, 'lt': 60, 'macr': 175, 'mdash': 8212, 'micro': 181, 'middot': 183, 'minus': 8722, 'mu': 956, 'nabla': 8711, 'nbsp': 160, 'ndash': 8211, 'ne': 8800, 'ni': 8715, 'not': 172, 'notin': 8713, 'nsub': 8836, 'ntilde': 241, 'nu': 957, 'oacute': 243, 'ocirc': 244, 'oelig': 339, 'ograve': 242, 'oline': 8254, 'omega': 969, 'omicron': 959, 'oplus': 8853, 'or': 8744, 'ordf': 170, 'ordm': 186, 'oslash': 248, 'otilde': 245, 'otimes': 8855, 'ouml': 246, 'para': 182, 'part': 8706, 'permil': 8240, 'perp': 8869, 'phi': 966, 'pi': 960, 'piv': 982, 'plusmn': 177, 'pound': 163, 'prime': 8242, 'prod': 8719, 'prop': 8733, 'psi': 968, 'quot': 34, 'rArr': 8658, 'radic': 8730, 'rang': 9002, 'raquo': 187, 'rarr': 8594, 'rceil': 8969, 'rdquo': 8221, 'real': 8476, 'reg': 174, 'rfloor': 8971, 'rho': 961, 'rlm': 8207, 'rsaquo': 8250, 'rsquo': 8217, 'sbquo': 8218, 'scaron': 353, 'sdot': 8901, 'sect': 167, 'shy': 173, 'sigma': 963, 'sigmaf': 962, 'sim': 8764, 'spades': 9824, 'sub': 8834, 'sube': 8838, 'sum': 8721, 'sup': 8835, 'sup1': 185, 'sup2': 178, 'sup3': 179, 'supe': 8839, 'szlig': 223, 'tau': 964, 'there4': 8756, 'theta': 952, 'thetasym': 977, 'thinsp': 8201, 'thorn': 254, 'tilde': 732, 'times': 215, 'trade': 8482, 'uArr': 8657, 'uacute': 250, 'uarr': 8593, 'ucirc': 251, 'ugrave': 249, 'uml': 168, 'upsih': 978, 'upsilon': 965, 'uuml': 252, 'weierp': 8472, 'xi': 958, 'yacute': 253, 'yen': 165, 'yuml': 255, 'zeta': 950, 'zwj': 8205, 'zwnj': 8204 }
apache-2.0
tito/pymt
pymt/graphx/stencil.py
2
3406
''' Stencil: use stencil for mask drawing Usage :: with gx_stencil: # change viewport # draw stuff ''' __all__ = ( # stencil 'GlStencil', 'gx_stencil', 'stencilPush', 'stencilPop', 'stencilUse', ) from OpenGL.GL import GL_STENCIL_BUFFER_BIT, GL_STENCIL_TEST, \ GL_NEVER, GL_INCR, GL_MODELVIEW_MATRIX, GL_EQUAL, GL_KEEP, \ glColorMask, glPushAttrib, glPopAttrib, glIsEnabled, \ glEnable, glStencilOp, glStencilFunc, \ glClear, glClearStencil, glMultMatrixf, glGetFloatv from pymt.graphx.statement import gx_matrix_identity, GlDisplayList ### Stencil usage __stencil_stack = 0 __stencil_stack_dl = [] __stencil_stack_view = [] def stencilGetStackLevel(): return __stencil_stack def stencilPush(): '''Create a new stack in stencil stack. All the next draw will be done in stencil buffer until stencilUse() will be called.''' global __stencil_stack glPushAttrib(GL_STENCIL_BUFFER_BIT | GL_STENCIL_TEST) # enable stencil test if not yet enabled if not glIsEnabled(GL_STENCIL_TEST): glClearStencil(0) glClear(GL_STENCIL_BUFFER_BIT) glEnable(GL_STENCIL_TEST) # increment the draw buffer glStencilFunc(GL_NEVER, 0x0, 0x0) glStencilOp(GL_INCR, GL_INCR, GL_INCR) glColorMask(0, 0, 0, 0) # save model view m = glGetFloatv(GL_MODELVIEW_MATRIX) __stencil_stack_view.append(m) # start recording GL operation dl = GlDisplayList() dl.start() __stencil_stack_dl.append(dl) __stencil_stack += 1 def stencilPop(): '''Pop out the last stack from stencil stack''' global __stencil_stack glPopAttrib() __stencil_stack -= 1 # remove current stencil stack __stencil_stack_dl.pop() __stencil_stack_view.pop() # replay stencil stack from the start # only if it's enabled if not glIsEnabled(GL_STENCIL_TEST): return # clear stencil glClearStencil(0) glClear(GL_STENCIL_BUFFER_BIT) # increment the draw buffer glStencilFunc(GL_NEVER, 0x0, 0x0) glStencilOp(GL_INCR, GL_INCR, GL_INCR) glColorMask(0, 0, 0, 0) # replay all gl operation for idx in xrange(__stencil_stack): dl = __stencil_stack_dl[idx] view = __stencil_stack_view[idx] with gx_matrix_identity: glMultMatrixf(view) dl.draw() # draw inner content only when stencil match the buffer glColorMask(1, 1, 1, 1) glStencilFunc(GL_EQUAL, __stencil_stack, __stencil_stack) glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP) def stencilUse(): '''Switch from stencil draw to color draw. Now, all drawing will be done on color buffer, using latest stencil stack. ''' # stop recording gl operation __stencil_stack_dl[__stencil_stack-1].stop() __stencil_stack_dl[__stencil_stack-1].draw() # draw inner content only when stencil match the buffer glColorMask(1, 1, 1, 1) glStencilFunc(GL_EQUAL, __stencil_stack, __stencil_stack) glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP) class GlStencil: '''Statement of stencilPush/stencilPop, designed to be use with "with" keyword. Alias: gx_stencil. ''' def __init__(self): pass def __enter__(self): stencilPush() def __exit__(self, type, value, traceback): stencilPop() #: Alias to GlStencil() gx_stencil = GlStencil()
lgpl-3.0
Eksmo/django-tastypie
tests/core/tests/throttle.py
12
6446
import mock import time from django.core.cache import cache from django.test import TestCase from django.utils.encoding import force_text from tastypie.models import ApiAccess from tastypie.throttle import BaseThrottle, CacheThrottle, CacheDBThrottle class NoThrottleTestCase(TestCase): def test_init(self): throttle_1 = BaseThrottle() self.assertEqual(throttle_1.throttle_at, 150) self.assertEqual(throttle_1.timeframe, 3600) self.assertEqual(throttle_1.expiration, 604800) throttle_2 = BaseThrottle(throttle_at=50, timeframe=60 * 30, expiration=1) self.assertEqual(throttle_2.throttle_at, 50) self.assertEqual(throttle_2.timeframe, 1800) self.assertEqual(throttle_2.expiration, 1) def test_convert_identifier_to_key(self): throttle_1 = BaseThrottle() self.assertEqual(throttle_1.convert_identifier_to_key(''), '_accesses') self.assertEqual(throttle_1.convert_identifier_to_key('alnum10'), 'alnum10_accesses') self.assertEqual(throttle_1.convert_identifier_to_key('Mr. Pants'), 'Mr.Pants_accesses') self.assertEqual(throttle_1.convert_identifier_to_key('Mr_Pants'), 'Mr_Pants_accesses') self.assertEqual(throttle_1.convert_identifier_to_key('%^@@$&!a'), 'a_accesses') def test_should_be_throttled(self): throttle_1 = BaseThrottle() self.assertEqual(throttle_1.should_be_throttled('foobaz'), False) def test_accessed(self): throttle_1 = BaseThrottle() self.assertEqual(throttle_1.accessed('foobaz'), None) @mock.patch('tastypie.throttle.time') class CacheThrottleTestCase(TestCase): def tearDown(self): cache.clear() def test_throttling(self, mocked_time): mocked_time.time.return_value = time.time() throttle_1 = CacheThrottle(throttle_at=2, timeframe=5, expiration=2) self.assertEqual(throttle_1.should_be_throttled('daniel'), False) self.assertEqual(len(cache.get('daniel_accesses')), 0) self.assertEqual(throttle_1.accessed('daniel'), None) self.assertEqual(throttle_1.should_be_throttled('daniel'), False) self.assertEqual(len(cache.get('daniel_accesses')), 1) self.assertEqual(cache.get('cody_accesses'), None) self.assertEqual(throttle_1.accessed('daniel'), None) self.assertEqual(throttle_1.accessed('cody'), None) self.assertEqual(throttle_1.should_be_throttled('cody'), False) self.assertEqual(len(cache.get('daniel_accesses')), 2) self.assertEqual(len(cache.get('cody_accesses')), 1) # THROTTLE'D! self.assertEqual(throttle_1.should_be_throttled('daniel'), 5) self.assertEqual(len(cache.get('daniel_accesses')), 2) self.assertEqual(throttle_1.accessed('daniel'), None) self.assertEqual(throttle_1.should_be_throttled('daniel'), 5) self.assertEqual(len(cache.get('daniel_accesses')), 3) self.assertEqual(throttle_1.accessed('daniel'), None) # Should be no interplay. self.assertEqual(throttle_1.should_be_throttled('cody'), False) self.assertEqual(throttle_1.accessed('cody'), None) # Test the timeframe. mocked_time.time.return_value += throttle_1.timeframe + 1 self.assertEqual(throttle_1.should_be_throttled('daniel'), False) self.assertEqual(len(cache.get('daniel_accesses')), 0) @mock.patch('tastypie.throttle.time') class CacheDBThrottleTestCase(TestCase): def tearDown(self): cache.clear() def test_throttling(self, mocked_time): mocked_time.time.return_value = time.time() throttle_1 = CacheDBThrottle(throttle_at=2, timeframe=5, expiration=2) self.assertEqual(throttle_1.should_be_throttled('daniel'), False) self.assertEqual(len(cache.get('daniel_accesses')), 0) self.assertEqual(ApiAccess.objects.count(), 0) self.assertEqual(ApiAccess.objects.filter(identifier='daniel').count(), 0) self.assertEqual(throttle_1.accessed('daniel'), None) self.assertEqual(throttle_1.should_be_throttled('daniel'), False) self.assertEqual(len(cache.get('daniel_accesses')), 1) self.assertEqual(cache.get('cody_accesses'), None) self.assertEqual(ApiAccess.objects.count(), 1) self.assertEqual(ApiAccess.objects.filter(identifier='daniel').count(), 1) self.assertEqual(throttle_1.accessed('daniel'), None) self.assertEqual(throttle_1.accessed('cody'), None) self.assertEqual(throttle_1.should_be_throttled('cody'), False) self.assertEqual(len(cache.get('daniel_accesses')), 2) self.assertEqual(len(cache.get('cody_accesses')), 1) self.assertEqual(ApiAccess.objects.count(), 3) self.assertEqual(ApiAccess.objects.filter(identifier='daniel').count(), 2) self.assertEqual(throttle_1.accessed('cody'), None) # THROTTLE'D! self.assertEqual(throttle_1.accessed('daniel'), None) self.assertEqual(throttle_1.should_be_throttled('daniel'), 5) self.assertEqual(len(cache.get('daniel_accesses')), 3) self.assertEqual(ApiAccess.objects.count(), 5) self.assertEqual(ApiAccess.objects.filter(identifier='daniel').count(), 3) self.assertEqual(throttle_1.accessed('daniel'), None) self.assertEqual(throttle_1.should_be_throttled('daniel'), 5) self.assertEqual(len(cache.get('daniel_accesses')), 4) self.assertEqual(ApiAccess.objects.count(), 6) self.assertEqual(ApiAccess.objects.filter(identifier='daniel').count(), 4) # Should be no interplay. self.assertEqual(throttle_1.should_be_throttled('cody'), 5) self.assertEqual(throttle_1.accessed('cody'), None) self.assertEqual(ApiAccess.objects.count(), 7) self.assertEqual(ApiAccess.objects.filter(identifier='daniel').count(), 4) # Test the timeframe. mocked_time.time.return_value += throttle_1.timeframe + 1 self.assertEqual(throttle_1.should_be_throttled('daniel'), False) self.assertEqual(len(cache.get('daniel_accesses')), 0) self.assertEqual(ApiAccess.objects.count(), 7) self.assertEqual(ApiAccess.objects.filter(identifier='daniel').count(), 4) class ModelTestCase(TestCase): def test_unicode(self): access = ApiAccess(identifier="testing", accessed=0) self.assertEqual(force_text(access), 'testing @ 0')
bsd-3-clause
3quarterstack/simple_blog
django/contrib/auth/tests/urls.py
100
3405
from django.conf.urls import patterns, url from django.contrib.auth import context_processors from django.contrib.auth.urls import urlpatterns from django.contrib.auth.views import password_reset from django.contrib.auth.decorators import login_required from django.contrib.messages.api import info from django.http import HttpResponse from django.shortcuts import render_to_response from django.template import Template, RequestContext from django.views.decorators.cache import never_cache @never_cache def remote_user_auth_view(request): "Dummy view for remote user tests" t = Template("Username is {{ user }}.") c = RequestContext(request, {}) return HttpResponse(t.render(c)) def auth_processor_no_attr_access(request): r1 = render_to_response('context_processors/auth_attrs_no_access.html', RequestContext(request, {}, processors=[context_processors.auth])) # *After* rendering, we check whether the session was accessed return render_to_response('context_processors/auth_attrs_test_access.html', {'session_accessed':request.session.accessed}) def auth_processor_attr_access(request): r1 = render_to_response('context_processors/auth_attrs_access.html', RequestContext(request, {}, processors=[context_processors.auth])) return render_to_response('context_processors/auth_attrs_test_access.html', {'session_accessed':request.session.accessed}) def auth_processor_user(request): return render_to_response('context_processors/auth_attrs_user.html', RequestContext(request, {}, processors=[context_processors.auth])) def auth_processor_perms(request): return render_to_response('context_processors/auth_attrs_perms.html', RequestContext(request, {}, processors=[context_processors.auth])) def auth_processor_perm_in_perms(request): return render_to_response('context_processors/auth_attrs_perm_in_perms.html', RequestContext(request, {}, processors=[context_processors.auth])) def auth_processor_messages(request): info(request, "Message 1") return render_to_response('context_processors/auth_attrs_messages.html', RequestContext(request, {}, processors=[context_processors.auth])) def userpage(request): pass # special urls for auth test cases urlpatterns = urlpatterns + patterns('', (r'^logout/custom_query/$', 'django.contrib.auth.views.logout', dict(redirect_field_name='follow')), (r'^logout/next_page/$', 'django.contrib.auth.views.logout', dict(next_page='/somewhere/')), (r'^remote_user/$', remote_user_auth_view), (r'^password_reset_from_email/$', 'django.contrib.auth.views.password_reset', dict(from_email='staffmember@example.com')), (r'^admin_password_reset/$', 'django.contrib.auth.views.password_reset', dict(is_admin_site=True)), (r'^login_required/$', login_required(password_reset)), (r'^login_required_login_url/$', login_required(password_reset, login_url='/somewhere/')), (r'^auth_processor_no_attr_access/$', auth_processor_no_attr_access), (r'^auth_processor_attr_access/$', auth_processor_attr_access), (r'^auth_processor_user/$', auth_processor_user), (r'^auth_processor_perms/$', auth_processor_perms), (r'^auth_processor_perm_in_perms/$', auth_processor_perm_in_perms), (r'^auth_processor_messages/$', auth_processor_messages), url(r'^userpage/(.+)/$', userpage, name="userpage"), )
mit
nan86150/ImageFusion
lib/python2.7/site-packages/scipy/weave/examples/wx_example.py
100
7328
""" This is taken from the scrolled window example from the demo. Take a look at the DoDrawing2() method below. The first 6 lines or so have been translated into C++. """ from __future__ import absolute_import, print_function import sys sys.path.insert(0,'..') import inline_tools from wxPython.wx import * class MyCanvas(wxScrolledWindow): def __init__(self, parent, id=-1, size=wxDefaultSize): wxScrolledWindow.__init__(self, parent, id, wxPoint(0, 0), size, wxSUNKEN_BORDER) self.lines = [] self.maxWidth = 1000 self.maxHeight = 1000 self.SetBackgroundColour(wxNamedColor("WHITE")) EVT_LEFT_DOWN(self, self.OnLeftButtonEvent) EVT_LEFT_UP(self, self.OnLeftButtonEvent) EVT_MOTION(self, self.OnLeftButtonEvent) EVT_PAINT(self, self.OnPaint) self.SetCursor(wxStockCursor(wxCURSOR_PENCIL)) # bmp = images.getTest2Bitmap() # mask = wxMaskColour(bmp, wxBLUE) # bmp.SetMask(mask) # self.bmp = bmp self.SetScrollbars(20, 20, self.maxWidth/20, self.maxHeight/20) def getWidth(self): return self.maxWidth def getHeight(self): return self.maxHeight def OnPaint(self, event): dc = wxPaintDC(self) self.PrepareDC(dc) self.DoDrawing2(dc) def DoDrawing(self, dc): dc.BeginDrawing() dc.SetPen(wxPen(wxNamedColour('RED'))) dc.DrawRectangle(5, 5, 50, 50) dc.SetBrush(wxLIGHT_GREY_BRUSH) dc.SetPen(wxPen(wxNamedColour('BLUE'), 4)) dc.DrawRectangle(15, 15, 50, 50) dc.SetFont(wxFont(14, wxSWISS, wxNORMAL, wxNORMAL)) dc.SetTextForeground(wxColour(0xFF, 0x20, 0xFF)) te = dc.GetTextExtent("Hello World") dc.DrawText("Hello World", 60, 65) dc.SetPen(wxPen(wxNamedColour('VIOLET'), 4)) dc.DrawLine(5, 65+te[1], 60+te[0], 65+te[1]) lst = [(100,110), (150,110), (150,160), (100,160)] dc.DrawLines(lst, -60) dc.SetPen(wxGREY_PEN) dc.DrawPolygon(lst, 75) dc.SetPen(wxGREEN_PEN) dc.DrawSpline(lst+[(100,100)]) # dc.DrawBitmap(self.bmp, 200, 20, true) # dc.SetTextForeground(wxColour(0, 0xFF, 0x80)) # dc.DrawText("a bitmap", 200, 85) font = wxFont(20, wxSWISS, wxNORMAL, wxNORMAL) dc.SetFont(font) dc.SetTextForeground(wxBLACK) for a in range(0, 360, 45): dc.DrawRotatedText("Rotated text...", 300, 300, a) dc.SetPen(wxTRANSPARENT_PEN) dc.SetBrush(wxBLUE_BRUSH) dc.DrawRectangle(50,500,50,50) dc.DrawRectangle(100,500,50,50) dc.SetPen(wxPen(wxNamedColour('RED'))) dc.DrawEllipticArc(200, 500, 50, 75, 0, 90) self.DrawSavedLines(dc) dc.EndDrawing() def DoDrawing2(self, dc): red = wxNamedColour("RED") blue = wxNamedColour("BLUE") grey_brush = wxLIGHT_GREY_BRUSH code = \ """ //#line 108 "wx_example.py" dc->BeginDrawing(); dc->SetPen(wxPen(*red,4,wxSOLID)); dc->DrawRectangle(5, 5, 50, 50); dc->SetBrush(*grey_brush); dc->SetPen(wxPen(*blue, 4,wxSOLID)); dc->DrawRectangle(15, 15, 50, 50); """ inline_tools.inline(code,['dc','red','blue','grey_brush'],verbose=2) dc.SetFont(wxFont(14, wxSWISS, wxNORMAL, wxNORMAL)) dc.SetTextForeground(wxColour(0xFF, 0x20, 0xFF)) te = dc.GetTextExtent("Hello World") dc.DrawText("Hello World", 60, 65) dc.SetPen(wxPen(wxNamedColour('VIOLET'), 4)) dc.DrawLine(5, 65+te[1], 60+te[0], 65+te[1]) lst = [(100,110), (150,110), (150,160), (100,160)] dc.DrawLines(lst, -60) dc.SetPen(wxGREY_PEN) dc.DrawPolygon(lst, 75) dc.SetPen(wxGREEN_PEN) dc.DrawSpline(lst+[(100,100)]) # dc.DrawBitmap(self.bmp, 200, 20, true) # dc.SetTextForeground(wxColour(0, 0xFF, 0x80)) # dc.DrawText("a bitmap", 200, 85) font = wxFont(20, wxSWISS, wxNORMAL, wxNORMAL) dc.SetFont(font) dc.SetTextForeground(wxBLACK) for a in range(0, 360, 45): dc.DrawRotatedText("Rotated text...", 300, 300, a) dc.SetPen(wxTRANSPARENT_PEN) dc.SetBrush(wxBLUE_BRUSH) dc.DrawRectangle(50,500,50,50) dc.DrawRectangle(100,500,50,50) dc.SetPen(wxPen(wxNamedColour('RED'))) dc.DrawEllipticArc(200, 500, 50, 75, 0, 90) self.DrawSavedLines(dc) dc.EndDrawing() def DrawSavedLines(self, dc): dc.SetPen(wxPen(wxNamedColour('MEDIUM FOREST GREEN'), 4)) for line in self.lines: for coords in line: apply(dc.DrawLine, coords) def SetXY(self, event): self.x, self.y = self.ConvertEventCoords(event) def ConvertEventCoords(self, event): xView, yView = self.GetViewStart() xDelta, yDelta = self.GetScrollPixelsPerUnit() return (event.GetX() + (xView * xDelta), event.GetY() + (yView * yDelta)) def OnLeftButtonEvent(self, event): if event.LeftDown(): self.SetXY(event) self.curLine = [] self.CaptureMouse() elif event.Dragging(): dc = wxClientDC(self) self.PrepareDC(dc) dc.BeginDrawing() dc.SetPen(wxPen(wxNamedColour('MEDIUM FOREST GREEN'), 4)) coords = (self.x, self.y) + self.ConvertEventCoords(event) self.curLine.append(coords) apply(dc.DrawLine, coords) self.SetXY(event) dc.EndDrawing() elif event.LeftUp(): self.lines.append(self.curLine) self.curLine = [] self.ReleaseMouse() #--------------------------------------------------------------------------- # This example isn't currently used. class py_canvas(wx.wxWindow): def __init__(self, parent, id=-1, pos=wx.wxPyDefaultPosition, size=wx.wxPyDefaultSize, **attr): wx.wxWindow.__init__(self, parent, id, pos,size) # wx.EVT_PAINT(self,self.on_paint) background = wx.wxNamedColour('white') code = """ self->SetBackgroundColour(*background); """ inline_tools.inline(code,['self','background'],compiler='msvc') #---------------------------------------------------------------------------- class MyFrame(wxFrame): def __init__(self, parent, ID, title, pos=wxDefaultPosition, size=wxDefaultSize, style=wxDEFAULT_FRAME_STYLE): wxFrame.__init__(self, parent, ID, title, pos, size, style) # panel = wxPanel(self, -1) self.GetSize() # button = wxButton(panel, 1003, "Close Me") # button.SetPosition(wxPoint(15, 15)) # EVT_BUTTON(self, 1003, self.OnCloseMe) # EVT_CLOSE(self, self.OnCloseWindow) # canvas = py_canvas(self,-1) canvas = MyCanvas(self,-1) canvas.Show(true) class MyApp(wxApp): def OnInit(self): win = MyFrame(NULL, -1, "This is a wxFrame", size=(350, 200), style = wxDEFAULT_FRAME_STYLE) # | wxFRAME_TOOL_WINDOW ) win.Show(true) return true if __name__ == "__main__": app = MyApp(0) app.MainLoop()
mit
AndKyr/GETELEC
python/JFplot.py
1
1648
#! /usr/bin/python import numpy as np import getelec_mod as gt from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mb font = 30 # mb.rcParams["font.family"] = "Serif" mb.rcParams["font.size"] = font mb.rcParams["axes.labelsize"] = font mb.rcParams["xtick.labelsize"] = font mb.rcParams["ytick.labelsize"] = font mb.rcParams["legend.fontsize"] = font mb.rcParams["lines.linewidth"] = 2.5 fsize = (18,10) Npoints = 256 Temps = [1.e-2, 300, 800, 1500] Xfn = np.linspace(0.12, 0.35, 256) F = 1./Xfn Jem = np.copy(F) this = gt.emission_create(W = 4.5, R = 5000., approx = 2) fig1 = plt.figure(figsize=fsize) ax1 = fig1.gca() ax1.set_xlabel(r"$1/F$ [m GV$^{-1}$]") ax1.set_ylabel(r"$J$ [A nm$^{-2}$]") colors = plt.rcParams['axes.prop_cycle'].by_key()['color'] for i in range(len(Temps)): this.Temp = Temps[i] if (this.Temp < 10.): this.approx = -1 else: this.approx = 2 for j in range(len(F)): this.F = F[j] this.cur_dens() Jem[j] = this.Jem ax1.semilogy(Xfn,Jem, label = r'T = %d K'%this.Temp) # for i in range(len(Temps)): # this.Temp = Temps[i] # if (this.Temp < 10.): # this.approx = -1 # else: # this.approx = -1 # for j in range(len(F)): # this.F = F[j] # this.cur_dens() # Jem[j] = this.Jem # ax1.semilogy(Xfn,Jem, '--', color = colors[i], label = r'T = %d K'%this.Temp) # np.savetxt("J-F.dat", np.transpose(np.array([F,Jem])), delimiter = " ") ax1.grid() ax1.legend() plt.savefig("JFplot_Tparam.svg") plt.savefig("JFplot_Tparam.png") plt.show()
gpl-3.0
ahmedaljazzar/edx-platform
common/djangoapps/microsite_configuration/models.py
12
5888
""" Model to store a microsite in the database. The object is stored as a json representation of the python dict that would have been used in the settings. """ import collections from django.contrib.sites.models import Site from django.db import models from django.db.models.base import ObjectDoesNotExist from django.db.models.signals import pre_delete, pre_save from django.dispatch import receiver from jsonfield.fields import JSONField from model_utils.models import TimeStampedModel class Microsite(models.Model): """ This is where the information about the microsite gets stored to the db. To achieve the maximum flexibility, most of the fields are stored inside a json field. Notes: - The key field was required for the dict definition at the settings, and it is used in some of the microsite_configuration methods. - The site field is django site. - The values field must be validated on save to prevent the platform from crashing badly in the case the string is not able to be loaded as json. """ site = models.OneToOneField(Site, related_name='microsite', on_delete=models.CASCADE) key = models.CharField(max_length=63, db_index=True, unique=True) values = JSONField(null=False, blank=True, load_kwargs={'object_pairs_hook': collections.OrderedDict}) def __unicode__(self): return self.key def get_organizations(self): """ Helper method to return a list of organizations associated with our particular Microsite """ return MicrositeOrganizationMapping.get_organizations_for_microsite_by_pk(self.id) # pylint: disable=no-member @classmethod def get_microsite_for_domain(cls, domain): """ Returns the microsite associated with this domain. Note that we always convert to lowercase, or None if no match """ # remove any port number from the hostname domain = domain.split(':')[0] microsites = cls.objects.filter(site__domain__iexact=domain) return microsites[0] if microsites else None class MicrositeHistory(TimeStampedModel): """ This is an archive table for Microsites model, so that we can maintain a history of changes. Note that the key field is no longer unique """ site = models.ForeignKey(Site, related_name='microsite_history', on_delete=models.CASCADE) key = models.CharField(max_length=63, db_index=True) values = JSONField(null=False, blank=True, load_kwargs={'object_pairs_hook': collections.OrderedDict}) def __unicode__(self): return self.key class Meta(object): """ Meta class for this Django model """ verbose_name_plural = "Microsite histories" def _make_archive_copy(instance): """ Helper method to make a copy of a Microsite into the history table """ archive_object = MicrositeHistory( key=instance.key, site=instance.site, values=instance.values, ) archive_object.save() @receiver(pre_delete, sender=Microsite) def on_microsite_deleted(sender, instance, **kwargs): # pylint: disable=unused-argument """ Archive the exam attempt when the item is about to be deleted Make a clone and populate in the History table """ _make_archive_copy(instance) @receiver(pre_save, sender=Microsite) def on_microsite_updated(sender, instance, **kwargs): # pylint: disable=unused-argument """ Archive the microsite on an update operation """ if instance.id: # on an update case, get the original and archive it original = Microsite.objects.get(id=instance.id) _make_archive_copy(original) class MicrositeOrganizationMapping(models.Model): """ Mapping of Organization to which Microsite it belongs """ organization = models.CharField(max_length=63, db_index=True, unique=True) microsite = models.ForeignKey(Microsite, db_index=True, on_delete=models.CASCADE) def __unicode__(self): """String conversion""" return u'{microsite_key}: {organization}'.format( microsite_key=self.microsite.key, organization=self.organization ) @classmethod def get_organizations_for_microsite_by_pk(cls, microsite_pk): """ Returns a list of organizations associated with the microsite key, returned as a set """ return cls.objects.filter(microsite_id=microsite_pk).values_list('organization', flat=True) @classmethod def get_microsite_for_organization(cls, org): """ Returns the microsite object for a given organization based on the table mapping, None if no mapping exists """ try: item = cls.objects.select_related('microsite').get(organization=org) return item.microsite except ObjectDoesNotExist: return None class MicrositeTemplate(models.Model): """ A HTML template that a microsite can use """ microsite = models.ForeignKey(Microsite, db_index=True, on_delete=models.CASCADE) template_uri = models.CharField(max_length=255, db_index=True) template = models.TextField() def __unicode__(self): """String conversion""" return u'{microsite_key}: {template_uri}'.format( microsite_key=self.microsite.key, template_uri=self.template_uri ) class Meta(object): """ Meta class for this Django model """ unique_together = (('microsite', 'template_uri'),) @classmethod def get_template_for_microsite(cls, domain, template_uri): """ Returns the template object for the microsite, None if not found """ try: return cls.objects.get(microsite__site__domain=domain, template_uri=template_uri) except ObjectDoesNotExist: return None
agpl-3.0
bauruine/ansible
lib/ansible/inventory/script.py
16
5816
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ############################################# import os import subprocess import ansible.constants as C from ansible.inventory.host import Host from ansible.inventory.group import Group from ansible.module_utils.basic import json_dict_unicode_to_bytes from ansible import utils from ansible import errors import sys class InventoryScript(object): ''' Host inventory parser for ansible using external inventory scripts. ''' def __init__(self, filename=C.DEFAULT_HOST_LIST): # Support inventory scripts that are not prefixed with some # path information but happen to be in the current working # directory when '.' is not in PATH. self.filename = os.path.abspath(filename) cmd = [ self.filename, "--list" ] try: sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError, e: raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) (stdout, stderr) = sp.communicate() self.data = stdout # see comment about _meta below self.host_vars_from_top = None self.groups = self._parse(stderr) def _parse(self, err): all_hosts = {} # not passing from_remote because data from CMDB is trusted self.raw = utils.parse_json(self.data) self.raw = json_dict_unicode_to_bytes(self.raw) all = Group('all') groups = dict(all=all) group = None if 'failed' in self.raw: sys.stderr.write(err + "\n") raise errors.AnsibleError("failed to parse executable inventory script results: %s" % self.raw) for (group_name, data) in self.raw.items(): # in Ansible 1.3 and later, a "_meta" subelement may contain # a variable "hostvars" which contains a hash for each host # if this "hostvars" exists at all then do not call --host for each # host. This is for efficiency and scripts should still return data # if called with --host for backwards compat with 1.2 and earlier. if group_name == '_meta': if 'hostvars' in data: self.host_vars_from_top = data['hostvars'] continue if group_name != all.name: group = groups[group_name] = Group(group_name) else: group = all host = None if not isinstance(data, dict): data = {'hosts': data} # is not those subkeys, then simplified syntax, host with vars elif not any(k in data for k in ('hosts','vars')): data = {'hosts': [group_name], 'vars': data} if 'hosts' in data: if not isinstance(data['hosts'], list): raise errors.AnsibleError("You defined a group \"%s\" with bad " "data for the host list:\n %s" % (group_name, data)) for hostname in data['hosts']: if not hostname in all_hosts: all_hosts[hostname] = Host(hostname) host = all_hosts[hostname] group.add_host(host) if 'vars' in data: if not isinstance(data['vars'], dict): raise errors.AnsibleError("You defined a group \"%s\" with bad " "data for variables:\n %s" % (group_name, data)) for k, v in data['vars'].iteritems(): if group.name == all.name: all.set_variable(k, v) else: group.set_variable(k, v) # Separate loop to ensure all groups are defined for (group_name, data) in self.raw.items(): if group_name == '_meta': continue if isinstance(data, dict) and 'children' in data: for child_name in data['children']: if child_name in groups: groups[group_name].add_child_group(groups[child_name]) for group in groups.values(): if group.depth == 0 and group.name != 'all': all.add_child_group(group) return groups def get_host_variables(self, host): """ Runs <script> --host <hostname> to determine additional host variables """ if self.host_vars_from_top is not None: got = self.host_vars_from_top.get(host.name, {}) return got cmd = [self.filename, "--host", host.name] try: sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError, e: raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) (out, err) = sp.communicate() if out.strip() == '': return dict() try: return json_dict_unicode_to_bytes(utils.parse_json(out)) except ValueError: raise errors.AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
gpl-3.0
goyal-sidd/BLT
website/models.py
1
10857
import os from urlparse import urlparse import requests import tweepy from PIL import Image from annoying.fields import AutoOneToOneField from colorthief import ColorThief from django.conf import settings from django.contrib.auth.models import User from django.core.exceptions import ValidationError from django.core.files.base import ContentFile from django.core.files.storage import default_storage from django.db import models from django.db.models import Count from django.db.models import signals from django.db.models.signals import post_save from unidecode import unidecode class Domain(models.Model): name = models.CharField(max_length=255, unique=True) url = models.URLField() logo = models.ImageField(upload_to="logos", null=True, blank=True) webshot = models.ImageField(upload_to="webshots", null=True, blank=True) clicks = models.IntegerField(null=True, blank=True) email_event = models.CharField(max_length=255, default="", null=True, blank=True) color = models.CharField(max_length=10, null=True, blank=True) github = models.CharField(max_length=255, null=True, blank=True) email = models.EmailField(null=True, blank=True) twitter = models.CharField(max_length=30, null=True, blank=True) facebook = models.URLField(null=True, blank=True) created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __unicode__(self): return self.name @property def open_issues(self): return Issue.objects.filter(domain=self).exclude(status="closed") @property def closed_issues(self): return Issue.objects.filter(domain=self).filter(status="closed") @property def top_tester(self): return User.objects.filter(issue__domain=self).annotate(total=Count('issue')).order_by('-total').first() @property def get_name(self): parsed_url = urlparse(self.url) return parsed_url.netloc.split(".")[-2:][0].title() def get_logo(self): if self.logo: return self.logo.url image_request = requests.get("https://logo.clearbit.com/" + self.name) try: if image_request.status_code == 200: image_content = ContentFile(image_request.content) self.logo.save(self.name + ".jpg", image_content) return self.logo.url except: favicon_url = self.url + '/favicon.ico' return favicon_url @property def get_color(self): if self.color: return self.color else: if not self.logo: self.get_logo() try: color_thief = ColorThief(self.logo) self.color = '#%02x%02x%02x' % color_thief.get_color(quality=1) except: self.color = "#0000ff" self.save() return self.color @property def hostname_domain(self): parsed_url = urlparse(self.url) return parsed_url.hostname @property def domain_name(self): parsed_url = urlparse(self.url) domain = parsed_url.hostname temp = domain.rsplit('.') if (len(temp) == 3): domain = temp[1] + '.' + temp[2] return domain def get_absolute_url(self): return "/domain/" + self.name def validate_image(fieldfile_obj): filesize = fieldfile_obj.file.size megabyte_limit = 3.0 if filesize > megabyte_limit * 1024 * 1024: raise ValidationError("Max file size is %sMB" % str(megabyte_limit)) class Issue(models.Model): labels = ( (0, 'General'), (1, 'Number Error'), (2, 'Functional'), (3, 'Performance'), (4, 'Security'), (5, 'Typo'), (6, 'Design') ) user = models.ForeignKey(User, null=True, blank=True) domain = models.ForeignKey(Domain, null=True, blank=True) url = models.URLField() description = models.TextField() label = models.PositiveSmallIntegerField(choices=labels, default=0) views = models.IntegerField(null=True, blank=True) status = models.CharField(max_length=10, default="open", null=True, blank=True) user_agent = models.CharField(max_length=255, default="", null=True, blank=True) ocr = models.TextField(default="", null=True, blank=True) screenshot = models.ImageField(upload_to="screenshots", validators=[validate_image]) closed_by = models.ForeignKey(User, null=True, blank=True, related_name="closed_by") closed_date = models.DateTimeField(default=None, null=True, blank=True) github_url = models.URLField(default="", null=True, blank=True) created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __unicode__(self): return self.description @property def domain_title(self): parsed_url = urlparse(self.url) return parsed_url.netloc.split(".")[-2:][0].title() @property def hostname_domain(self): parsed_url = urlparse(self.url) return parsed_url.hostname @property def domain_name(self): parsed_url = urlparse(self.url) domain = parsed_url.hostname temp = domain.rsplit('.') if (len(temp) == 3): domain = temp[1] + '.' + temp[2] return domain def get_twitter_message(self): issue_link = " bugheist.com/issue/" + str(self.id) prefix = "Bug found on @" spacer = " | " msg = prefix + self.domain_title + spacer + self.description[:140 - ( len(prefix) + len(self.domain_title) + len(spacer) + len(issue_link))] + issue_link return msg def get_ocr(self): if self.ocr: return self.ocr else: try: import pytesseract self.ocr = pytesseract.image_to_string(Image.open(self.screenshot)) self.save() return self.ocr except: return "OCR not installed" @property def get_absolute_url(self): return "/issue/" + str(self.id) class Meta: ordering = ['-created'] TWITTER_MAXLENGTH = getattr(settings, 'TWITTER_MAXLENGTH', 140) def post_to_twitter(sender, instance, *args, **kwargs): if not kwargs.get('created'): return False try: consumer_key = os.environ['TWITTER_CONSUMER_KEY'] consumer_secret = os.environ['TWITTER_CONSUMER_SECRET'] access_key = os.environ['TWITTER_ACCESS_KEY'] access_secret = os.environ['TWITTER_ACCESS_SECRET'] except KeyError: print 'WARNING: Twitter account not configured.' return False try: text = instance.get_twitter_message() except AttributeError: text = unicode(instance) mesg = u'%s' % (text) if len(mesg) > TWITTER_MAXLENGTH: size = len(mesg + '...') - TWITTER_MAXLENGTH mesg = u'%s...' % (text[:-size]) import logging logger = logging.getLogger('testlogger') if not settings.DEBUG: try: auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_key, access_secret) api = tweepy.API(auth) file = default_storage.open(instance.screenshot.file.name, 'rb') media_ids = api.media_upload(filename=unidecode(instance.screenshot.file.name), file=file) params = dict(status=mesg, media_ids=[media_ids.media_id_string]) api.update_status(**params) except Exception, ex: print 'ERROR:', str(ex) logger.debug('rem %s' % str(ex)) return False signals.post_save.connect(post_to_twitter, sender=Issue) class Hunt(models.Model): user = models.ForeignKey(User) url = models.URLField() prize = models.IntegerField() logo = models.ImageField(upload_to="logos", null=True, blank=True) plan = models.CharField(max_length=10) txn_id = models.CharField(max_length=50, null=True, blank=True) color = models.CharField(max_length=10, null=True, blank=True) created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) @property def domain_title(self): parsed_url = urlparse(self.url) return parsed_url.netloc.split(".")[-2:][0].title() class Meta: ordering = ['-id'] class Points(models.Model): user = models.ForeignKey(User) issue = models.ForeignKey(Issue, null=True, blank=True) domain = models.ForeignKey(Domain, null=True, blank=True) score = models.IntegerField() created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) # @receiver(user_logged_in, dispatch_uid="some.unique.string.id.for.allauth.user_logged_in") # def user_logged_in_(request, user, **kwargs): # if not settings.TESTING: # action.send(user, verb='logged in') class InviteFriend(models.Model): sender = models.ForeignKey(User) recipient = models.EmailField() sent = models.DateTimeField(auto_now_add=True, db_index=True) class Meta: ordering = ('-sent',) verbose_name = 'invitation' verbose_name_plural = 'invitations' def user_images_path(instance, filename): from django.template.defaultfilters import slugify filename, ext = os.path.splitext(filename) return 'avatars/user_{0}/{1}{2}'.format(instance.user.id, slugify(filename), ext) class UserProfile(models.Model): title = ( (0, 'Unrated'), (1, 'Bronze'), (2, 'Silver'), (3, 'Gold'), (4, 'Platinum'), ) follows = models.ManyToManyField('self', related_name='follower', symmetrical=False, blank=True) user = AutoOneToOneField('auth.user', related_name="userprofile") user_avatar = models.ImageField(upload_to=user_images_path, blank=True, null=True) title = models.IntegerField(choices=title, default=0) winnings = models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True) issue_upvoted = models.ManyToManyField(Issue, blank=True, related_name="upvoted") issue_saved = models.ManyToManyField(Issue, blank=True, related_name="saved") def avatar(self, size=36): if self.user_avatar: return self.user_avatar.url for account in self.user.socialaccount_set.all(): if 'avatar_url' in account.extra_data: return account.extra_data['avatar_url'] elif 'picture' in account.extra_data: return account.extra_data['picture'] def __unicode__(self): return self.user.email def create_profile(sender, **kwargs): user = kwargs["instance"] if kwargs["created"]: profile = UserProfile(user=user) profile.save() post_save.connect(create_profile, sender=User)
agpl-3.0
mark-r-g/hydrus
tests/test_rapidclus.py
1
1819
# Mark Gatheman <markrg@protonmail.com> # # This file is part of Hydrus. # # Hydrus is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Hydrus is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Hydrus. If not, see <http://www.gnu.org/licenses/>. import random from collections import Counter import numpy as np from hydrus.rapidclus import close_outer, close_inner, choose_initial_seeds from hydrus.rapidclus import rapidclus def test_close_inner(): assert close_inner([1, 3, 6, 10, 11]) == (3, 4, 10, 11, 1) assert close_inner(range(1, 100, 5)) == (0, 1, 1, 6, 5) def test_close_outer(): assert close_outer([1, 3, 6, 10, 11], 7) == (2, 6, 1) assert close_outer([1, 3, 6, 10, 11], 0) == (0, 1, 1) assert close_outer([1, 3, 6, 10, 11], 111) == (4, 11, 100) def test_choose_initial_seeds(): assert choose_initial_seeds([1, 3, 6, 10, 11, 100], 3) == [1, 11, 100] assert choose_initial_seeds([1, 3, 6, 10, 11, 100], 5) == [1, 3, 6, 11, 100] random.seed(36261837) data = [int(random.gauss(0,1000)) for _ in range(100)] assert choose_initial_seeds(data, 5) == [-2376, -862, 521, 1948, 3239] def test_rapidclus(): random.seed(12521184) data = [random.gauss(0,1) for _ in range(1000)] assert sorted(Counter(rapidclus(data)).values()) == [34, 41, 233, 251, 441] assert rapidclus(data) == rapidclus(np.array(data))
gpl-3.0
joergdietrich/astropy
astropy/utils/tests/test_console.py
2
6316
# Licensed under a 3-clause BSD style license - see LICENSE.rst # -*- coding: utf-8 -*- # TEST_UNICODE_LITERALS from __future__ import (absolute_import, division, print_function, unicode_literals) from ...extern import six # pylint: disable=W0611 from ...extern.six import next from ...extern.six.moves import range import io import locale from ...tests.helper import pytest from .. import console from ... import units as u class FakeTTY(io.StringIO): """IOStream that fakes a TTY; provide an encoding to emulate an output stream with a specific encoding. """ def __new__(cls, encoding=None): # Return a new subclass of FakeTTY with the requested encoding if encoding is None: return super(FakeTTY, cls).__new__(cls) # Since we're using unicode_literals in this module ensure that this is # a 'str' object (since a class name can't be unicode in Python 2.7) encoding = str(encoding) cls = type(encoding.title() + cls.__name__, (cls,), {'encoding': encoding}) return cls.__new__(cls) def __init__(self, encoding=None): super(FakeTTY, self).__init__() def write(self, s): if isinstance(s, bytes): # Just allow this case to work s = s.decode('latin-1') elif self.encoding is not None: s.encode(self.encoding) return super(FakeTTY, self).write(s) def isatty(self): return True def test_fake_tty(): # First test without a specified encoding; we should be able to write # arbitrary unicode strings f1 = FakeTTY() assert f1.isatty() f1.write('☃') assert f1.getvalue() == '☃' # Now test an ASCII-only TTY--it should raise a UnicodeEncodeError when # trying to write a string containing non-ASCII characters f2 = FakeTTY('ascii') assert f2.isatty() assert f2.__class__.__name__ == 'AsciiFakeTTY' assert pytest.raises(UnicodeEncodeError, f2.write, '☃') assert f2.getvalue() == '' @pytest.mark.skipif(str("sys.platform.startswith('win')")) def test_color_text(): assert console._color_text("foo", "green") == '\033[0;32mfoo\033[0m' def test_color_print(): # This stuff is hard to test, at least smoke test it console.color_print("foo", "green") console.color_print("foo", "green", "bar", "red") def test_color_print2(): # Test that this automatically detects that io.StringIO is # not a tty stream = io.StringIO() console.color_print("foo", "green", file=stream) assert stream.getvalue() == 'foo\n' stream = io.StringIO() console.color_print("foo", "green", "bar", "red", "baz", file=stream) assert stream.getvalue() == 'foobarbaz\n' @pytest.mark.skipif(str("sys.platform.startswith('win')")) def test_color_print3(): # Test that this thinks the FakeTTY is a tty and applies colors. stream = FakeTTY() console.color_print("foo", "green", file=stream) assert stream.getvalue() == '\x1b[0;32mfoo\x1b[0m\n' stream = FakeTTY() console.color_print("foo", "green", "bar", "red", "baz", file=stream) assert stream.getvalue() == '\x1b[0;32mfoo\x1b[0m\x1b[0;31mbar\x1b[0mbaz\n' def test_color_print_unicode(): console.color_print("überbær", "red") def test_color_print_invalid_color(): console.color_print("foo", "unknown") @pytest.mark.skipif(str('not six.PY2')) def test_color_print_no_default_encoding(): """Regression test for #1244 In some environments `locale.getpreferredencoding` can return ``''``; make sure there are some reasonable fallbacks. """ # Not sure of a reliable way to force getpreferredencoding() to return # an empty string other than to temporarily patch it orig_func = locale.getpreferredencoding locale.getpreferredencoding = lambda: '' try: # Try printing a string that can be utf-8 decoded (the default) stream = io.StringIO() console.color_print(b'\xe2\x98\x83', 'white', file=stream) assert stream.getvalue() == '☃\n' # Test the latin-1 fallback stream = io.StringIO() console.color_print(b'\xcd\xef', 'red', file=stream) assert stream.getvalue() == 'Íï\n' finally: locale.getpreferredencoding = orig_func def test_spinner_non_unicode_console(): """Regression test for #1760 Ensures that the spinner can fall go into fallback mode when using the unicode spinner on a terminal whose default encoding cannot encode the unicode characters. """ stream = FakeTTY('ascii') chars = console.Spinner._default_unicode_chars with console.Spinner("Reticulating splines", file=stream, chars=chars) as s: next(s) def test_progress_bar(): # This stuff is hard to test, at least smoke test it with console.ProgressBar(50) as bar: for i in range(50): bar.update() def test_progress_bar2(): for x in console.ProgressBar(range(50)): pass def test_progress_bar3(): def do_nothing(*args, **kwargs): pass console.ProgressBar.map(do_nothing, range(50)) def test_zero_progress_bar(): with console.ProgressBar(0) as bar: pass def test_progress_bar_as_generator(): sum = 0 for x in console.ProgressBar(range(50)): sum += x assert sum == 1225 sum = 0 for x in console.ProgressBar(50): sum += x assert sum == 1225 @pytest.mark.parametrize(("seconds","string"), [(864088," 1w 3d"), (187213, " 2d 4h"), (3905, " 1h 5m"), (64, " 1m 4s"), (15, " 15s"), (2, " 2s")] ) def test_human_time(seconds, string): human_time = console.human_time(seconds) assert human_time == string @pytest.mark.parametrize(("size","string"), [(8640882,"8.6M"), (187213, "187k"), (3905, "3.9k"), (64, " 64 "), (2, " 2 "), (10*u.GB, " 10G")] ) def test_human_file_size(size, string): human_time = console.human_file_size(size) assert human_time == string @pytest.mark.parametrize("size", (50*u.km, 100*u.g)) def test_bad_human_file_size(size): assert pytest.raises(u.UnitConversionError, console.human_file_size, size)
bsd-3-clause
DooMLoRD/android_kernel_sony_msm8660
scripts/build-all.py
1250
9474
#! /usr/bin/env python # Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of Code Aurora nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Build the kernel for all targets using the Android build environment. # # TODO: Accept arguments to indicate what to build. import glob from optparse import OptionParser import subprocess import os import os.path import shutil import sys version = 'build-all.py, version 0.01' build_dir = '../all-kernels' make_command = ["vmlinux", "modules"] make_env = os.environ make_env.update({ 'ARCH': 'arm', 'CROSS_COMPILE': 'arm-none-linux-gnueabi-', 'KCONFIG_NOTIMESTAMP': 'true' }) all_options = {} def error(msg): sys.stderr.write("error: %s\n" % msg) def fail(msg): """Fail with a user-printed message""" error(msg) sys.exit(1) def check_kernel(): """Ensure that PWD is a kernel directory""" if (not os.path.isfile('MAINTAINERS') or not os.path.isfile('arch/arm/mach-msm/Kconfig')): fail("This doesn't seem to be an MSM kernel dir") def check_build(): """Ensure that the build directory is present.""" if not os.path.isdir(build_dir): try: os.makedirs(build_dir) except OSError as exc: if exc.errno == errno.EEXIST: pass else: raise def update_config(file, str): print 'Updating %s with \'%s\'\n' % (file, str) defconfig = open(file, 'a') defconfig.write(str + '\n') defconfig.close() def scan_configs(): """Get the full list of defconfigs appropriate for this tree.""" names = {} for n in glob.glob('arch/arm/configs/[fm]sm[0-9-]*_defconfig'): names[os.path.basename(n)[:-10]] = n for n in glob.glob('arch/arm/configs/qsd*_defconfig'): names[os.path.basename(n)[:-10]] = n for n in glob.glob('arch/arm/configs/apq*_defconfig'): names[os.path.basename(n)[:-10]] = n return names class Builder: def __init__(self, logname): self.logname = logname self.fd = open(logname, 'w') def run(self, args): devnull = open('/dev/null', 'r') proc = subprocess.Popen(args, stdin=devnull, env=make_env, bufsize=0, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) count = 0 # for line in proc.stdout: rawfd = proc.stdout.fileno() while True: line = os.read(rawfd, 1024) if not line: break self.fd.write(line) self.fd.flush() if all_options.verbose: sys.stdout.write(line) sys.stdout.flush() else: for i in range(line.count('\n')): count += 1 if count == 64: count = 0 print sys.stdout.write('.') sys.stdout.flush() print result = proc.wait() self.fd.close() return result failed_targets = [] def build(target): dest_dir = os.path.join(build_dir, target) log_name = '%s/log-%s.log' % (build_dir, target) print 'Building %s in %s log %s' % (target, dest_dir, log_name) if not os.path.isdir(dest_dir): os.mkdir(dest_dir) defconfig = 'arch/arm/configs/%s_defconfig' % target dotconfig = '%s/.config' % dest_dir savedefconfig = '%s/defconfig' % dest_dir shutil.copyfile(defconfig, dotconfig) devnull = open('/dev/null', 'r') subprocess.check_call(['make', 'O=%s' % dest_dir, '%s_defconfig' % target], env=make_env, stdin=devnull) devnull.close() if not all_options.updateconfigs: build = Builder(log_name) result = build.run(['make', 'O=%s' % dest_dir] + make_command) if result != 0: if all_options.keep_going: failed_targets.append(target) fail_or_error = error else: fail_or_error = fail fail_or_error("Failed to build %s, see %s" % (target, build.logname)) # Copy the defconfig back. if all_options.configs or all_options.updateconfigs: devnull = open('/dev/null', 'r') subprocess.check_call(['make', 'O=%s' % dest_dir, 'savedefconfig'], env=make_env, stdin=devnull) devnull.close() shutil.copyfile(savedefconfig, defconfig) def build_many(allconf, targets): print "Building %d target(s)" % len(targets) for target in targets: if all_options.updateconfigs: update_config(allconf[target], all_options.updateconfigs) build(target) if failed_targets: fail('\n '.join(["Failed targets:"] + [target for target in failed_targets])) def main(): global make_command check_kernel() check_build() configs = scan_configs() usage = (""" %prog [options] all -- Build all targets %prog [options] target target ... -- List specific targets %prog [options] perf -- Build all perf targets %prog [options] noperf -- Build all non-perf targets""") parser = OptionParser(usage=usage, version=version) parser.add_option('--configs', action='store_true', dest='configs', help="Copy configs back into tree") parser.add_option('--list', action='store_true', dest='list', help='List available targets') parser.add_option('-v', '--verbose', action='store_true', dest='verbose', help='Output to stdout in addition to log file') parser.add_option('--oldconfig', action='store_true', dest='oldconfig', help='Only process "make oldconfig"') parser.add_option('--updateconfigs', dest='updateconfigs', help="Update defconfigs with provided option setting, " "e.g. --updateconfigs=\'CONFIG_USE_THING=y\'") parser.add_option('-j', '--jobs', type='int', dest="jobs", help="Number of simultaneous jobs") parser.add_option('-l', '--load-average', type='int', dest='load_average', help="Don't start multiple jobs unless load is below LOAD_AVERAGE") parser.add_option('-k', '--keep-going', action='store_true', dest='keep_going', default=False, help="Keep building other targets if a target fails") parser.add_option('-m', '--make-target', action='append', help='Build the indicated make target (default: %s)' % ' '.join(make_command)) (options, args) = parser.parse_args() global all_options all_options = options if options.list: print "Available targets:" for target in configs.keys(): print " %s" % target sys.exit(0) if options.oldconfig: make_command = ["oldconfig"] elif options.make_target: make_command = options.make_target if options.jobs: make_command.append("-j%d" % options.jobs) if options.load_average: make_command.append("-l%d" % options.load_average) if args == ['all']: build_many(configs, configs.keys()) elif args == ['perf']: targets = [] for t in configs.keys(): if "perf" in t: targets.append(t) build_many(configs, targets) elif args == ['noperf']: targets = [] for t in configs.keys(): if "perf" not in t: targets.append(t) build_many(configs, targets) elif len(args) > 0: targets = [] for t in args: if t not in configs.keys(): parser.error("Target '%s' not one of %s" % (t, configs.keys())) targets.append(t) build_many(configs, targets) else: parser.error("Must specify a target to build, or 'all'") if __name__ == "__main__": main()
gpl-2.0
googleapis/googleapis-gen
google/cloud/secrets/v1beta1/secretmanager-v1beta1-py/google/cloud/secretmanager/__init__.py
1
3026
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from google.cloud.secretmanager_v1beta1.services.secret_manager_service.client import SecretManagerServiceClient from google.cloud.secretmanager_v1beta1.services.secret_manager_service.async_client import SecretManagerServiceAsyncClient from google.cloud.secretmanager_v1beta1.types.resources import Replication from google.cloud.secretmanager_v1beta1.types.resources import Secret from google.cloud.secretmanager_v1beta1.types.resources import SecretPayload from google.cloud.secretmanager_v1beta1.types.resources import SecretVersion from google.cloud.secretmanager_v1beta1.types.service import AccessSecretVersionRequest from google.cloud.secretmanager_v1beta1.types.service import AccessSecretVersionResponse from google.cloud.secretmanager_v1beta1.types.service import AddSecretVersionRequest from google.cloud.secretmanager_v1beta1.types.service import CreateSecretRequest from google.cloud.secretmanager_v1beta1.types.service import DeleteSecretRequest from google.cloud.secretmanager_v1beta1.types.service import DestroySecretVersionRequest from google.cloud.secretmanager_v1beta1.types.service import DisableSecretVersionRequest from google.cloud.secretmanager_v1beta1.types.service import EnableSecretVersionRequest from google.cloud.secretmanager_v1beta1.types.service import GetSecretRequest from google.cloud.secretmanager_v1beta1.types.service import GetSecretVersionRequest from google.cloud.secretmanager_v1beta1.types.service import ListSecretsRequest from google.cloud.secretmanager_v1beta1.types.service import ListSecretsResponse from google.cloud.secretmanager_v1beta1.types.service import ListSecretVersionsRequest from google.cloud.secretmanager_v1beta1.types.service import ListSecretVersionsResponse from google.cloud.secretmanager_v1beta1.types.service import UpdateSecretRequest __all__ = ('SecretManagerServiceClient', 'SecretManagerServiceAsyncClient', 'Replication', 'Secret', 'SecretPayload', 'SecretVersion', 'AccessSecretVersionRequest', 'AccessSecretVersionResponse', 'AddSecretVersionRequest', 'CreateSecretRequest', 'DeleteSecretRequest', 'DestroySecretVersionRequest', 'DisableSecretVersionRequest', 'EnableSecretVersionRequest', 'GetSecretRequest', 'GetSecretVersionRequest', 'ListSecretsRequest', 'ListSecretsResponse', 'ListSecretVersionsRequest', 'ListSecretVersionsResponse', 'UpdateSecretRequest', )
apache-2.0
michalliu/OpenWrt-Firefly-Libraries
staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/idlelib/ZoomHeight.py
130
1300
# Sample extension: zoom a window to maximum height import re import sys from idlelib import macosxSupport class ZoomHeight: menudefs = [ ('windows', [ ('_Zoom Height', '<<zoom-height>>'), ]) ] def __init__(self, editwin): self.editwin = editwin def zoom_height_event(self, event): top = self.editwin.top zoom_height(top) def zoom_height(top): geom = top.wm_geometry() m = re.match(r"(\d+)x(\d+)\+(-?\d+)\+(-?\d+)", geom) if not m: top.bell() return width, height, x, y = map(int, m.groups()) newheight = top.winfo_screenheight() if sys.platform == 'win32': newy = 0 newheight = newheight - 72 elif macosxSupport.isAquaTk(): # The '88' below is a magic number that avoids placing the bottom # of the window below the panel on my machine. I don't know how # to calculate the correct value for this with tkinter. newy = 22 newheight = newheight - newy - 88 else: #newy = 24 newy = 0 #newheight = newheight - 96 newheight = newheight - 88 if height >= newheight: newgeom = "" else: newgeom = "%dx%d+%d+%d" % (width, newheight, x, newy) top.wm_geometry(newgeom)
gpl-2.0
nimzco/Environment
Sublime/Packages/ArcticTypescript/lib/system/Completion.py
2
10326
# coding=utf8 import re import json import sublime from ..utils import Debug from ..utils.uiutils import get_prefix from ..utils.viewutils import get_file_infos, get_content_of_line_at # IS AN OBJECT MEMBER # TRUE: line=Instance. or line=Instance.fooba or line=Instance.foobar.alic # FALSE: line=Inst js_id_re = re.compile(u'^[_$a-zA-Z\u00FF-\uFFFF][_$a-zA-Z0-9\u00FF-\uFFFF]*') def is_member_completion(line_text): def partial_completion(): sp = line_text.split(".") if len(sp) > 1: return js_id_re.match(sp[-1]) is not None return False return line_text.endswith(".") or partial_completion() def get_col_after_last_dot(line_text): return line_text.rfind(".") + 1 class Completion(object): completion_chars = ['.']#['.',':'] completion_list = [] interface = False enabled_for_col_reference = '' # 'dot' or 'cursor' enabled_for = {'line': 0, 'col': 0, 'viewid': -1} def __init__(self, project): self.project = project # PREPARE LISTE def prepare_list(self, tss_result_json): del self.completion_list[:] try: entries = json.loads(tss_result_json) entries = entries['entries'] except: if tss_result_json.strip() == 'null': sublime.status_message('ArcticTypescript: no completions available') else: Debug('error', 'Completion request failed: %s' % tss_result_json) return 0 for entry in entries: if self.interface and entry['kind'] != 'primitive type' and entry['kind'] != 'interface' : continue key = self._get_list_key(entry) value = self._get_list_value(entry) self.completion_list.append((key,value)) self.completion_list.sort() return len(self.completion_list) # GET LISTE def get_list(self): return self.completion_list # TYPESCRIPT COMPLETION ? def trigger(self, view, force_enable=False): cursor_pos = view.sel()[0].begin() # cursor pos as int (cursor_line, cursor_col) = view.rowcol(cursor_pos) char = view.substr(cursor_pos-1) enabled = force_enable or (char in self.completion_chars) self.interface = char is ':' if enabled: Debug('autocomplete', "Autocompletion for line %i , %i, forced=%s" % (cursor_line+1, cursor_col+1, force_enable) ) is_member = is_member_completion( get_content_of_line_at(view, cursor_pos) ) is_member_str = str( is_member ).lower() # 'true' or 'false' # do execute tss.js complete for cursor positon after last dot, so we use # sublimes fuzzy mechanism to reduce the list, not the mechanism # (1:1 matching of the typed chars) tss.js would use autocomplete_col = 0 if is_member: autocomplete_col = get_col_after_last_dot( get_content_of_line_at(view, cursor_pos) ) self.enabled_for_col_reference = 'dot' Debug('autocomplete', " -> use dot as referene") if autocomplete_col != cursor_col: Debug('autocomplete', " -> dot is on col %i, use this col instead of cursor position %i" % (autocomplete_col+1, cursor_col+1)) else: Debug('autocomplete', " -> use cursor position %i for autocomplete" % (cursor_col+1)) self.enabled_for_col_reference = 'cursor' autocomplete_col = cursor_col self.enabled_for['viewid'] = view.id() self.enabled_for['line'] = cursor_line self.enabled_for['col'] = autocomplete_col Debug('autocomplete', " -> push current file contents as update to tss.js") self.project.tsserver.update(view) def async_react_completions_available(tss_result_json, filename, line, col, is_member_str): Debug('autocomplete', "Autocompletion results available for line %i , %i" % (line+1, col+1) ) i = self.prepare_list(tss_result_json) Debug('autocomplete', " -> prepare List (%i items)" % i ) # view or line changed current_view = sublime.active_window().active_view() current_cursor_pos = current_view.sel()[0].begin() (current_cursor_line, current_cursor_col) = current_view.rowcol(current_cursor_pos) Debug('autocomplete', " => CL: {0}, L: {1}, efcr: {2}, ccc: {3}, col: {4}, ismstr: {5}" .format(current_cursor_line, line, self.enabled_for_col_reference, current_cursor_col, col, is_member_str)) if current_view.id() != self.enabled_for['viewid'] or filename != current_view.file_name(): Debug('autocomplete', " -> file changed since activation of autocomplete or out-dated request -> cancel") return if current_cursor_line != self.enabled_for['line'] or current_cursor_line != line: Debug('autocomplete', " -> line changed since start of autocomplete (%i to %i) or out-dated request -> cancel" % (current_cursor_line, line) ) return if self.enabled_for_col_reference == 'cursor' \ and (current_cursor_col != self.enabled_for['col'] or current_cursor_col != col): Debug('autocomplete', " -> cursor changed position (current col: %i ; at command issue: %i) or out-dated request -> cancel" % (current_cursor_col, col) ) return if is_member_str == 'true': current_dot_col = get_col_after_last_dot( get_content_of_line_at(view, current_cursor_pos) ) if self.enabled_for_col_reference == 'dot' \ and (current_dot_col != self.enabled_for['col'] or current_dot_col != col): Debug('autocomplete', " -> it's not the same dot reference anymore (current dot pos: %i ; at command issue: %i) or out-dated request -> cancel" % (current_dot_col, col) ) return Debug('autocomplete', " -> command to sublime to now show autocomplete box with prepared list" ) # this will trigger Listener.on_query_completions # but on_query_completions needs to have the completion list # already available current_view.run_command('auto_complete',{ 'disable_auto_insert': True, 'api_completions_only': True, 'next_completion_if_showing': True }) Debug('autocomplete', " -> (sublime cmd finished)" ) self.project.tsserver.complete(view.file_name(), cursor_line, autocomplete_col, is_member_str, async_react_completions_available) # ENTRY KEY def _get_list_key(self,entry): #{'name': 'SVGLineElement', # 'kind': 'var', # 'kindModifiers': 'declare', # 'type': 'interface SVGLineElement\nvar SVGLineElement: {\n new (): SVGLineElement;\n prototype: SVGLineElement;\n}', # 'docComment': ''} kindModifiers = get_prefix(entry['kindModifiers']) kind = get_prefix(entry['kind']) type_ = entry['type'] if 'type' in entry else entry['name'] type_ = type_.split('\n')[0] if kindModifiers == "" and kind == "": kind = get_prefix(type_.split(' ')[0]) return kindModifiers+' '+kind+' '+str(entry['name'])+' '+str(type_) # ENTRY VALUE def _get_list_value(self,entry): # {'kind': 'method', 'docComment': '', 'kindModifiers': 'declare', 'type': '(method) MSNodeExtensions.swapNode(otherNode: Node): Node', 'name': 'swapNode'} # {'kind': 'property', 'docComment': '', 'kindModifiers': 'declare', 'type': '(property) GlobalEventHandlers.onpointerup: (ev: PointerEvent) => any', 'name': 'onpointerup'} # {'kind': 'property', 'docComment': '', 'kindModifiers': 'declare', 'type': '(property) Node.DOCUMENT_TYPE_NODE: number', 'name': 'DOCUMENT_TYPE_NODE'} # {'kind': 'method', 'docComment': 'Allows updating the print settings for the page.', 'kindModifiers': 'declare', 'type': '(method) Document.updateSettings(): void', 'name': 'updateSettings'} # {'kindModifiers': 'declare', 'docComment': '', 'kind': 'function', 'name': 'setTimeout', 'type': '(function) setTimeout(handler: any, timeout?: any, ...args: any[]): number'} type_ = entry['type'] if 'type' in entry else entry['name'] # remove (<kind>) kind_part = "(%s)" % entry['kind'] if type_.startswith(kind_part): type_ = type_[len(kind_part):] # catches the inner argumetns of a function call match = re.match('.*\((.*)\):', str(type_)) result = [] if match: variables = self._parse_args(match.group(1)) count = 1 for variable in variables: splits = variable.split(':') if len(splits) > 1: data = '"'+variable+'"' data = '${'+str(count)+':'+data+'}' result.append(data) count = count+1 else: result.append('') return re.escape(entry['name'])+'('+','.join(result)+')' else: return re.escape(entry['name']) # PARSE FUNCTION ARGUMENTS def _parse_args(self, group): # group = "otherNode: Node, param2: string" args = [] arg = "" callback = False for char in group: if char == '(' or char == '<': arg += char callback = True elif char == ')' or char == '>': arg += char callback = False elif char == ',': if callback == False: args.append(arg) arg = "" else: arg+=char else: arg+=char args.append(arg) return args
mit
jhayworth/config
.emacs.d/elpy/rpc-venv/local/lib/python2.7/site-packages/pip/_internal/distributions/sdist.py
20
4086
import logging from pip._internal.build_env import BuildEnvironment from pip._internal.distributions.base import AbstractDistribution from pip._internal.exceptions import InstallationError from pip._internal.utils.subprocess import runner_with_spinner_message from pip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: from typing import Set, Tuple from pip._vendor.pkg_resources import Distribution from pip._internal.index.package_finder import PackageFinder logger = logging.getLogger(__name__) class SourceDistribution(AbstractDistribution): """Represents a source distribution. The preparation step for these needs metadata for the packages to be generated, either using PEP 517 or using the legacy `setup.py egg_info`. """ def get_pkg_resources_distribution(self): # type: () -> Distribution return self.req.get_dist() def prepare_distribution_metadata(self, finder, build_isolation): # type: (PackageFinder, bool) -> None # Load pyproject.toml, to determine whether PEP 517 is to be used self.req.load_pyproject_toml() # Set up the build isolation, if this requirement should be isolated should_isolate = self.req.use_pep517 and build_isolation if should_isolate: self._setup_isolation(finder) self.req.prepare_metadata() def _setup_isolation(self, finder): # type: (PackageFinder) -> None def _raise_conflicts(conflicting_with, conflicting_reqs): # type: (str, Set[Tuple[str, str]]) -> None format_string = ( "Some build dependencies for {requirement} " "conflict with {conflicting_with}: {description}." ) error_message = format_string.format( requirement=self.req, conflicting_with=conflicting_with, description=', '.join( '{} is incompatible with {}'.format(installed, wanted) for installed, wanted in sorted(conflicting) ) ) raise InstallationError(error_message) # Isolate in a BuildEnvironment and install the build-time # requirements. pyproject_requires = self.req.pyproject_requires assert pyproject_requires is not None self.req.build_env = BuildEnvironment() self.req.build_env.install_requirements( finder, pyproject_requires, 'overlay', "Installing build dependencies" ) conflicting, missing = self.req.build_env.check_requirements( self.req.requirements_to_check ) if conflicting: _raise_conflicts("PEP 517/518 supported requirements", conflicting) if missing: logger.warning( "Missing build requirements in pyproject.toml for %s.", self.req, ) logger.warning( "The project does not specify a build backend, and " "pip cannot fall back to setuptools without %s.", " and ".join(map(repr, sorted(missing))) ) # Install any extra build dependencies that the backend requests. # This must be done in a second pass, as the pyproject.toml # dependencies must be installed before we can call the backend. with self.req.build_env: runner = runner_with_spinner_message( "Getting requirements to build wheel" ) backend = self.req.pep517_backend assert backend is not None with backend.subprocess_runner(runner): reqs = backend.get_requires_for_build_wheel() conflicting, missing = self.req.build_env.check_requirements(reqs) if conflicting: _raise_conflicts("the backend dependencies", conflicting) self.req.build_env.install_requirements( finder, missing, 'normal', "Installing backend dependencies" )
gpl-3.0
tempbottle/kbengine
kbe/res/scripts/common/Lib/distutils/tests/test_util.py
94
11250
"""Tests for distutils.util.""" import os import sys import unittest from copy import copy from test.support import run_unittest from distutils.errors import DistutilsPlatformError, DistutilsByteCompileError from distutils.util import (get_platform, convert_path, change_root, check_environ, split_quoted, strtobool, rfc822_escape, byte_compile, grok_environment_error) from distutils import util # used to patch _environ_checked from distutils.sysconfig import get_config_vars from distutils import sysconfig from distutils.tests import support import _osx_support class UtilTestCase(support.EnvironGuard, unittest.TestCase): def setUp(self): super(UtilTestCase, self).setUp() # saving the environment self.name = os.name self.platform = sys.platform self.version = sys.version self.sep = os.sep self.join = os.path.join self.isabs = os.path.isabs self.splitdrive = os.path.splitdrive self._config_vars = copy(sysconfig._config_vars) # patching os.uname if hasattr(os, 'uname'): self.uname = os.uname self._uname = os.uname() else: self.uname = None self._uname = None os.uname = self._get_uname def tearDown(self): # getting back the environment os.name = self.name sys.platform = self.platform sys.version = self.version os.sep = self.sep os.path.join = self.join os.path.isabs = self.isabs os.path.splitdrive = self.splitdrive if self.uname is not None: os.uname = self.uname else: del os.uname sysconfig._config_vars = copy(self._config_vars) super(UtilTestCase, self).tearDown() def _set_uname(self, uname): self._uname = uname def _get_uname(self): return self._uname def test_get_platform(self): # windows XP, 32bits os.name = 'nt' sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) ' '[MSC v.1310 32 bit (Intel)]') sys.platform = 'win32' self.assertEqual(get_platform(), 'win32') # windows XP, amd64 os.name = 'nt' sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) ' '[MSC v.1310 32 bit (Amd64)]') sys.platform = 'win32' self.assertEqual(get_platform(), 'win-amd64') # windows XP, itanium os.name = 'nt' sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) ' '[MSC v.1310 32 bit (Itanium)]') sys.platform = 'win32' self.assertEqual(get_platform(), 'win-ia64') # macbook os.name = 'posix' sys.version = ('2.5 (r25:51918, Sep 19 2006, 08:49:13) ' '\n[GCC 4.0.1 (Apple Computer, Inc. build 5341)]') sys.platform = 'darwin' self._set_uname(('Darwin', 'macziade', '8.11.1', ('Darwin Kernel Version 8.11.1: ' 'Wed Oct 10 18:23:28 PDT 2007; ' 'root:xnu-792.25.20~1/RELEASE_I386'), 'i386')) _osx_support._remove_original_values(get_config_vars()) get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3' get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g ' '-fwrapv -O3 -Wall -Wstrict-prototypes') cursize = sys.maxsize sys.maxsize = (2 ** 31)-1 try: self.assertEqual(get_platform(), 'macosx-10.3-i386') finally: sys.maxsize = cursize # macbook with fat binaries (fat, universal or fat64) _osx_support._remove_original_values(get_config_vars()) get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.4' get_config_vars()['CFLAGS'] = ('-arch ppc -arch i386 -isysroot ' '/Developer/SDKs/MacOSX10.4u.sdk ' '-fno-strict-aliasing -fno-common ' '-dynamic -DNDEBUG -g -O3') self.assertEqual(get_platform(), 'macosx-10.4-fat') _osx_support._remove_original_values(get_config_vars()) os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.1' self.assertEqual(get_platform(), 'macosx-10.4-fat') _osx_support._remove_original_values(get_config_vars()) get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch i386 -isysroot ' '/Developer/SDKs/MacOSX10.4u.sdk ' '-fno-strict-aliasing -fno-common ' '-dynamic -DNDEBUG -g -O3') self.assertEqual(get_platform(), 'macosx-10.4-intel') _osx_support._remove_original_values(get_config_vars()) get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc -arch i386 -isysroot ' '/Developer/SDKs/MacOSX10.4u.sdk ' '-fno-strict-aliasing -fno-common ' '-dynamic -DNDEBUG -g -O3') self.assertEqual(get_platform(), 'macosx-10.4-fat3') _osx_support._remove_original_values(get_config_vars()) get_config_vars()['CFLAGS'] = ('-arch ppc64 -arch x86_64 -arch ppc -arch i386 -isysroot ' '/Developer/SDKs/MacOSX10.4u.sdk ' '-fno-strict-aliasing -fno-common ' '-dynamic -DNDEBUG -g -O3') self.assertEqual(get_platform(), 'macosx-10.4-universal') _osx_support._remove_original_values(get_config_vars()) get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc64 -isysroot ' '/Developer/SDKs/MacOSX10.4u.sdk ' '-fno-strict-aliasing -fno-common ' '-dynamic -DNDEBUG -g -O3') self.assertEqual(get_platform(), 'macosx-10.4-fat64') for arch in ('ppc', 'i386', 'x86_64', 'ppc64'): _osx_support._remove_original_values(get_config_vars()) get_config_vars()['CFLAGS'] = ('-arch %s -isysroot ' '/Developer/SDKs/MacOSX10.4u.sdk ' '-fno-strict-aliasing -fno-common ' '-dynamic -DNDEBUG -g -O3'%(arch,)) self.assertEqual(get_platform(), 'macosx-10.4-%s'%(arch,)) # linux debian sarge os.name = 'posix' sys.version = ('2.3.5 (#1, Jul 4 2007, 17:28:59) ' '\n[GCC 4.1.2 20061115 (prerelease) (Debian 4.1.1-21)]') sys.platform = 'linux2' self._set_uname(('Linux', 'aglae', '2.6.21.1dedibox-r7', '#1 Mon Apr 30 17:25:38 CEST 2007', 'i686')) self.assertEqual(get_platform(), 'linux-i686') # XXX more platforms to tests here def test_convert_path(self): # linux/mac os.sep = '/' def _join(path): return '/'.join(path) os.path.join = _join self.assertEqual(convert_path('/home/to/my/stuff'), '/home/to/my/stuff') # win os.sep = '\\' def _join(*path): return '\\'.join(path) os.path.join = _join self.assertRaises(ValueError, convert_path, '/home/to/my/stuff') self.assertRaises(ValueError, convert_path, 'home/to/my/stuff/') self.assertEqual(convert_path('home/to/my/stuff'), 'home\\to\\my\\stuff') self.assertEqual(convert_path('.'), os.curdir) def test_change_root(self): # linux/mac os.name = 'posix' def _isabs(path): return path[0] == '/' os.path.isabs = _isabs def _join(*path): return '/'.join(path) os.path.join = _join self.assertEqual(change_root('/root', '/old/its/here'), '/root/old/its/here') self.assertEqual(change_root('/root', 'its/here'), '/root/its/here') # windows os.name = 'nt' def _isabs(path): return path.startswith('c:\\') os.path.isabs = _isabs def _splitdrive(path): if path.startswith('c:'): return ('', path.replace('c:', '')) return ('', path) os.path.splitdrive = _splitdrive def _join(*path): return '\\'.join(path) os.path.join = _join self.assertEqual(change_root('c:\\root', 'c:\\old\\its\\here'), 'c:\\root\\old\\its\\here') self.assertEqual(change_root('c:\\root', 'its\\here'), 'c:\\root\\its\\here') # BugsBunny os (it's a great os) os.name = 'BugsBunny' self.assertRaises(DistutilsPlatformError, change_root, 'c:\\root', 'its\\here') # XXX platforms to be covered: mac def test_check_environ(self): util._environ_checked = 0 if 'HOME' in os.environ: del os.environ['HOME'] # posix without HOME if os.name == 'posix': # this test won't run on windows check_environ() import pwd self.assertEqual(os.environ['HOME'], pwd.getpwuid(os.getuid())[5]) else: check_environ() self.assertEqual(os.environ['PLAT'], get_platform()) self.assertEqual(util._environ_checked, 1) def test_split_quoted(self): self.assertEqual(split_quoted('""one"" "two" \'three\' \\four'), ['one', 'two', 'three', 'four']) def test_strtobool(self): yes = ('y', 'Y', 'yes', 'True', 't', 'true', 'True', 'On', 'on', '1') no = ('n', 'no', 'f', 'false', 'off', '0', 'Off', 'No', 'N') for y in yes: self.assertTrue(strtobool(y)) for n in no: self.assertFalse(strtobool(n)) def test_rfc822_escape(self): header = 'I am a\npoor\nlonesome\nheader\n' res = rfc822_escape(header) wanted = ('I am a%(8s)spoor%(8s)slonesome%(8s)s' 'header%(8s)s') % {'8s': '\n'+8*' '} self.assertEqual(res, wanted) def test_dont_write_bytecode(self): # makes sure byte_compile raise a DistutilsError # if sys.dont_write_bytecode is True old_dont_write_bytecode = sys.dont_write_bytecode sys.dont_write_bytecode = True try: self.assertRaises(DistutilsByteCompileError, byte_compile, []) finally: sys.dont_write_bytecode = old_dont_write_bytecode def test_grok_environment_error(self): # test obsolete function to ensure backward compat (#4931) exc = IOError("Unable to find batch file") msg = grok_environment_error(exc) self.assertEqual(msg, "error: Unable to find batch file") def test_suite(): return unittest.makeSuite(UtilTestCase) if __name__ == "__main__": run_unittest(test_suite())
lgpl-3.0
labase/surdonews
src/surdonews/leao/main.py
1
5477
from jqueryui import jq from browser import document, html from superpython.virgem.main import Sala, Labirinto, Cena, INVENTARIO # importando do virgem STYLE = dict(position="absolute", width=300, left=0, top=0, background="blue") # mudar cor do background lá embaixo STYLE["min-height"] = "300px" IMAGEM = "http://s16.postimg.org/k81hwi2n9/Desert.jpg" class Leao: SETOR = None def __init__(self): pass def monta(self): NONE = [None] * 4 imn = "https://upload.wikimedia.org/wikipedia/commons/1/1e/Est%C3%BAdio_-_TV_Cultura_Montenegro.jpg" iml = "http://mochilaotrips.com/wp-content/uploads/2013/03/IMG_1447.jpg" ims = "https://upload.wikimedia.org/wikipedia/commons/0/01/Morro_de_Castelo_Branco,_aspectos_1,_Castelo_Branco,_concelho_da_Horta,_ilha_do_Faial,_A%C3%A7ores,_Portugal.JPG" imo = "http://www.unicos.cc/wp-content/uploads/2014/12/jornalismo-1-951x476.jpg" irl = "http://www.vipcomm.com.br/site/upload/sbHulk_GN_150614026.jpg" iro = "https://blogpontodeonibus.files.wordpress.com/2013/02/photodownload-php.jpg" iro = "http://imagens.canaltech.com.br/38560.54878-Tirar-fotos.jpg" irn = "http://7diasverdes.com.br/wp-content/uploads/2013/07/Bicicleta-de-passeio.jpg" irs = "http://www.boulevardshopping.com.br/novo/wp-content/uploads/2012/02/Mcdonalds.jpg" isn = "http://www.comercialvidoto.com.br/site/wgc_media/photos/Banco-pe-de-Ferro-Tamandua.png" isl = "http://andif.com.br/imagens/noticias/Banco_Santander_mjg.jpg" iso = "http://imguol.com/2013/01/08/fiat-mille-economy-1357657820399_956x500.jpg" iss = "http://images.forwallpaper.com/files/images/a/a809/a809de18/32241/notepad.jpg" desk = "https://blogpontodeonibus.files.wordpress.com/2012/07/expresso_brasileirold_chassiscania_1.jpg" drawer = "http://s.glbimg.com/og/rg/f/original/2010/07/09/tiago606.jpg" imageM = "" sala_norte = Sala([isn, desk, iss, iso], NONE) # mar sala_leste = Sala([isn, isl, iss, iso], NONE) # mar sala_sul = Sala([irn, irl, irs, iro], NONE) # deserto sala_oeste = Sala([isn, isl, iss, iso], NONE) # mar salas = [sala_norte.norte, sala_leste.leste, sala_sul.sul, sala_oeste.oeste] sala_centro = Sala([imn, iml, ims, imo], salas) labirinto = Leao.SETOR = Labirinto([ sala_centro, sala_norte, sala_leste, sala_sul, sala_oeste]) labirinto.norte.leste.meio = Cena(img=imageM) labirinto.sul.sul.meio = Cena(vai=self.help) # mudado labirinto.leste.sul.meio = Cena(vai=self.pega_invent) # mudado labirinto = Cena(vai=self.objetivo) # mudado return labirinto def nao_monta(self): pass def vai(self): labirinto = self.monta() self.monta = self.nao_monta labirinto.centro.norte.vai() return labirinto """def pega_card(self): riocard = "https://www.cartaoriocard.com.br/rcc/static/img/personal-1.png" #link da imagem flag = None def clicou(_): #hipótese de flag input("Você não está num meio de transporte.") if not "card" in INVENTARIO.inventario: #Se o Rio Card não estiver no inventário significa que ele pegou input("Você pegou o RioCard.") INVENTARIO.bota("card", riocard, clicou) else: input("Atenção: o inventário está vazio!")""" def pega_invent(self): riocard = "https://www.cartaoriocard.com.br/rcc/static/img/personal-1.png" # link da imagem flag = None def clicou(_): # hipótese de flag input("Você não está num meio de transporte.") if not "card" in INVENTARIO.inventario: # Se o Rio Card não estiver no inventário significa que ele pegou input("Você pegou o RioCard.") INVENTARIO.bota("card", riocard, clicou) else: input("Atenção: o inventário está vazio!") def help(self): ajuda = "http://icons.iconarchive.com/icons/oxygen-icons.org/oxygen/256/Actions-help-hint-icon.png" flag = None def clicou(_): # caso aconteça flag input("Você precisa ir na sala à leste do atendimento.") if not "ajuda" in INVENTARIO.inventario: input("Você quer saber sobre o meu relátorio sobre a gripe? Ele na escrivaninha na sala lesta à recepção.") INVENTARIO.bota("ajuda", ajuda, clicou) else: input("Achou o relatorio? Procurou na sala certa?") """ def objetivo(self): ajuda = "http://www.iconsdownload.net/icons/256/11335-target-icon.png" flag = None def clicou(_): input("Objetivo do programa: Você é um repórter e precisa achar o relatório com o resumo de todas as matérias que você vai conquistar nos diversos lugares do labirinto.") """ INSTANCIA = None def leao(): def cria_leao(): global INSTANCIA INSTANCIA = Leao() if not INSTANCIA: cria_leao() return INSTANCIA if __name__ == "__main__": change_bg = "Para qual cor você quer mudar o plano de fundo? azul/branco" escolha = input(change_bg) if escolha == "azul": background = "blue" lab = leao() print(INSTANCIA) INVENTARIO.inicia() lab.vai() # lab.centro.norte.vai() # lab.sul.oeste.meio = metro.centro.norte
gpl-3.0
henryr/Impala
tests/benchmark/plugins/vtune_plugin.py
12
5735
#!/usr/bin/env python # Copyright (c) 2013 Cloudera, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from os import environ from tests.util.cluster_controller import ClusterController from tests.benchmark.plugins import Plugin import datetime import threading import time class VTunePlugin(Plugin): """ This plugin runs Intel's VTune amplifier Before the query is executed, the plugin starts VTune collection. After the query has completed, the plugin stops the collection. """ __name__ = "VTunePlugin" #TODO: We should make these configurable VTUNE_PATH = '/opt/intel/vtune_amplifier_xe_2013/' TARGET_PROCESS = 'impalad' RESULT_DIR_BASE = '/var/log/impala/vtune/' + '%s' + '/db=%s' RESULT_QUERY_SCOPE = '_query=%s_format=%s_iteration=%i' KILL_CMD = 'ps aux | grep vtune | grep -v grep | awk \'{print $2}\' | xargs kill -9' def __init__(self, *args, **kwargs): self.cluster_controller = ClusterController(*args, **kwargs) Plugin.__init__(self, *args, **kwargs) # This is the unique run identifier self.run_tag = environ.get('BUILD_TAG', datetime.datetime.now()) # This method checks to ensure that VTune is installed in the expected path self._check_path_on_hosts() def run_pre_hook(self, context): # Source VTune variables and build the correct command string. For the workload # scope, the database name is added to the result path. For the query scope, the # query name and iteration is also added. result_dir = self.RESULT_DIR_BASE if context.get('scope') == 'Query': result_dir = result_dir + self.RESULT_QUERY_SCOPE pre_cmd = ('echo 0 > /proc/sys/kernel/nmi_watchdog\n' 'source ' + self.VTUNE_PATH + 'amplxe-vars.sh\n' 'amplxe-cl -collect advanced-hotspots ' '-result-dir=' + result_dir + ' -target-process=' + self.TARGET_PROCESS) table_format_str = context.get('table_format', 'UNKNOWN').replace('/', '-') pre_cmd = pre_cmd % (self.run_tag, context.get('db_name', 'UNKNOWN'), context.get('short_query_name', 'UNKNOWN'), table_format_str,context.get('iteration', 1)) self.thread = threading.Thread(target=self.cluster_controller.run_cmd, args=[pre_cmd], kwargs={'serial':False}) self.thread.start() # TODO: Test whether this is a good time to wait # Because we start this colection asychronously, we need to ensure that all the # machines are running. For now this is simplier than doing the full check that we # do in the post hook. time.sleep(2) def run_post_hook(self, context): # Source VTune variables and build the correct command string. This process is # identical to that in run_pre_hook() result_dir = self.RESULT_DIR_BASE if context.get('scope') == 'Query': result_dir = result_dir + self.RESULT_QUERY_SCOPE post_cmd = ('source ' + self.VTUNE_PATH + 'amplxe-vars.sh \n' 'amplxe-cl -command stop -result-dir=' + result_dir) table_format_str = context.get('table_format', 'UNKNOWN').replace('/', '-') # TODO: Fix the context dict to remove the ambiguity of the variable name # new_query_name post_cmd = post_cmd % (self.run_tag, context.get('db_name', 'UNKNOWN'), context.get('short_query_name', 'UNKNOWN'), table_format_str, context.get('iteration', 1)) self.cluster_controller.run_cmd(post_cmd) # Wait for reports to generate and kill hosts that are hanging around self._wait_for_completion(2) def _check_path_on_hosts(self): path_check_cmd = 'if [ -d "%s" ]; then echo "exists"\nfi' % (self.VTUNE_PATH) host_check_dict = self.cluster_controller.run_cmd(path_check_cmd) bad_hosts = [k for k in host_check_dict.keys() if host_check_dict[k] != "exists"] if bad_hosts: raise RuntimeError('VTune is not installed in the expected path for hosts %s' % ",".join(bad_hosts)) def _wait_for_completion(self, timeout): """ Waits for VTune reports to finish generating. On large datasets it can take time for the reports to generate. This method waits for a timeout period, checking to see if any machine in the cluster is still running a VTune command. After the timeout period, _kill_vtune() is called which kills any unterminated VTune commands. """ grep_dict = {} reports_done = True finish_time = datetime.datetime.now() + datetime.timedelta(minutes=timeout) while ((reports_done) and (datetime.datetime.now() < finish_time)): grep_dict = self.cluster_controller.run_cmd('ps aux|grep vtune|grep -v grep') reports_done = any(map(self.__is_not_none_or_empty_str, grep_dict.values())) # TODO: Investigate a better length of time for the sleep period between checks time.sleep(5) self._kill_vtune(grep_dict) def _kill_vtune(self, host_dict): # This method kills threads that are still hanging around after timeout kill_list = filter(self.__is_not_none_or_empty_str, host_dict.keys()) if kill_list: self.cluster_controller.change_fabric_hosts(kill_list) self.cluster_controller.run_cmd(self.KILL_CMD) self.cluster_controller.reset_fabric_hosts() def __is_not_none_or_empty_str(self, s): return s != None and s != ''
apache-2.0
ipashchenko/emcee-x
document/plots/oned.py
16
2164
import os import sys import time import numpy as np import matplotlib.pyplot as pl import h5py from multiprocessing import Pool sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", ".."))) import emcee # import acor def lnprobfn(p, icov): return -0.5 * np.dot(p, np.dot(icov, p)) def random_cov(ndim, dof=1): v = np.random.randn(ndim * (ndim + dof)).reshape((ndim + dof, ndim)) return (sum([np.outer(v[i], v[i]) for i in range(ndim + dof)]) / (ndim + dof)) _rngs = {} def _worker(args): i, outfn, nsteps = args pid = os.getpid() _random = _rngs.get(pid, np.random.RandomState(int(int(pid) + time.time()))) _rngs[pid] = _random ndim = int(np.ceil(2 ** (7 * _random.rand()))) nwalkers = 2 * ndim + 2 # nwalkers += nwalkers % 2 print ndim, nwalkers cov = random_cov(ndim) icov = np.linalg.inv(cov) ens_samp = emcee.EnsembleSampler(nwalkers, ndim, lnprobfn, args=[icov]) ens_samp.random_state = _random.get_state() pos, lnprob, state = ens_samp.run_mcmc(np.random.randn(nwalkers * ndim) .reshape([nwalkers, ndim]), nsteps) proposal = np.diag(cov.diagonal()) mh_samp = emcee.MHSampler(proposal, ndim, lnprobfn, args=[icov]) mh_samp.random_state = state mh_samp.run_mcmc(np.random.randn(ndim), nsteps) f = h5py.File(outfn) f["data"][i, :] = np.array([ndim, np.mean(ens_samp.acor), np.mean(mh_samp.acor)]) f.close() def oned(): nsteps = 10000 niter = 10 nthreads = 2 outfn = os.path.join(os.path.split(__file__)[0], "gauss_scaling.h5") print outfn f = h5py.File(outfn, "w") f.create_dataset("data", (niter, 3), "f") f.close() pool = Pool(nthreads) pool.map(_worker, [(i, outfn, nsteps) for i in range(niter)]) f = h5py.File(outfn) data = f["data"][...] f.close() pl.clf() pl.plot(data[:, 0], data[:, 1], "ks", alpha=0.5) pl.plot(data[:, 0], data[:, 2], ".k", alpha=0.5) pl.savefig(os.path.join(os.path.split(__file__)[0], "gauss_scaling.png")) if __name__ == "__main__": oned()
mit
superchilli/webapp
venv/lib/python2.7/site-packages/wheel/test/test_basic.py
472
6405
""" Basic wheel tests. """ import os import pkg_resources import json import sys from pkg_resources import resource_filename import wheel.util import wheel.tool from wheel import egg2wheel from wheel.install import WheelFile from zipfile import ZipFile from shutil import rmtree test_distributions = ("complex-dist", "simple.dist", "headers.dist") def teardown_module(): """Delete eggs/wheels created by tests.""" base = pkg_resources.resource_filename('wheel.test', '') for dist in test_distributions: for subdir in ('build', 'dist'): try: rmtree(os.path.join(base, dist, subdir)) except OSError: pass def setup_module(): build_wheel() build_egg() def build_wheel(): """Build wheels from test distributions.""" for dist in test_distributions: pwd = os.path.abspath(os.curdir) distdir = pkg_resources.resource_filename('wheel.test', dist) os.chdir(distdir) try: sys.argv = ['', 'bdist_wheel'] exec(compile(open('setup.py').read(), 'setup.py', 'exec')) finally: os.chdir(pwd) def build_egg(): """Build eggs from test distributions.""" for dist in test_distributions: pwd = os.path.abspath(os.curdir) distdir = pkg_resources.resource_filename('wheel.test', dist) os.chdir(distdir) try: sys.argv = ['', 'bdist_egg'] exec(compile(open('setup.py').read(), 'setup.py', 'exec')) finally: os.chdir(pwd) def test_findable(): """Make sure pkg_resources can find us.""" assert pkg_resources.working_set.by_key['wheel'].version def test_egg_re(): """Make sure egg_info_re matches.""" egg_names = open(pkg_resources.resource_filename('wheel', 'eggnames.txt')) for line in egg_names: line = line.strip() if not line: continue assert egg2wheel.egg_info_re.match(line), line def test_compatibility_tags(): """Test compatibilty tags are working.""" wf = WheelFile("package-1.0.0-cp32.cp33-noabi-noarch.whl") assert (list(wf.compatibility_tags) == [('cp32', 'noabi', 'noarch'), ('cp33', 'noabi', 'noarch')]) assert (wf.arity == 2) wf2 = WheelFile("package-1.0.0-1st-cp33-noabi-noarch.whl") wf2_info = wf2.parsed_filename.groupdict() assert wf2_info['build'] == '1st', wf2_info def test_convert_egg(): base = pkg_resources.resource_filename('wheel.test', '') for dist in test_distributions: distdir = os.path.join(base, dist, 'dist') eggs = [e for e in os.listdir(distdir) if e.endswith('.egg')] wheel.tool.convert(eggs, distdir, verbose=False) def test_unpack(): """ Make sure 'wheel unpack' works. This also verifies the integrity of our testing wheel files. """ for dist in test_distributions: distdir = pkg_resources.resource_filename('wheel.test', os.path.join(dist, 'dist')) for wheelfile in (w for w in os.listdir(distdir) if w.endswith('.whl')): wheel.tool.unpack(os.path.join(distdir, wheelfile), distdir) def test_no_scripts(): """Make sure entry point scripts are not generated.""" dist = "complex-dist" basedir = pkg_resources.resource_filename('wheel.test', dist) for (dirname, subdirs, filenames) in os.walk(basedir): for filename in filenames: if filename.endswith('.whl'): whl = ZipFile(os.path.join(dirname, filename)) for entry in whl.infolist(): assert not '.data/scripts/' in entry.filename def test_pydist(): """Make sure pydist.json exists and validates against our schema.""" # XXX this test may need manual cleanup of older wheels import jsonschema def open_json(filename): return json.loads(open(filename, 'rb').read().decode('utf-8')) pymeta_schema = open_json(resource_filename('wheel.test', 'pydist-schema.json')) valid = 0 for dist in ("simple.dist", "complex-dist"): basedir = pkg_resources.resource_filename('wheel.test', dist) for (dirname, subdirs, filenames) in os.walk(basedir): for filename in filenames: if filename.endswith('.whl'): whl = ZipFile(os.path.join(dirname, filename)) for entry in whl.infolist(): if entry.filename.endswith('/metadata.json'): pymeta = json.loads(whl.read(entry).decode('utf-8')) jsonschema.validate(pymeta, pymeta_schema) valid += 1 assert valid > 0, "No metadata.json found" def test_util(): """Test functions in util.py.""" for i in range(10): before = b'*' * i encoded = wheel.util.urlsafe_b64encode(before) assert not encoded.endswith(b'=') after = wheel.util.urlsafe_b64decode(encoded) assert before == after def test_pick_best(): """Test the wheel ranking algorithm.""" def get_tags(res): info = res[-1].parsed_filename.groupdict() return info['pyver'], info['abi'], info['plat'] cand_tags = [('py27', 'noabi', 'noarch'), ('py26', 'noabi', 'noarch'), ('cp27', 'noabi', 'linux_i686'), ('cp26', 'noabi', 'linux_i686'), ('cp27', 'noabi', 'linux_x86_64'), ('cp26', 'noabi', 'linux_x86_64')] cand_wheels = [WheelFile('testpkg-1.0-%s-%s-%s.whl' % t) for t in cand_tags] supported = [('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch')] supported2 = [('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch'), ('cp26', 'noabi', 'linux_i686'), ('py26', 'noabi', 'noarch')] supported3 = [('cp26', 'noabi', 'linux_i686'), ('py26', 'noabi', 'noarch'), ('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch')] for supp in (supported, supported2, supported3): context = lambda: list(supp) for wheel in cand_wheels: wheel.context = context best = max(cand_wheels) assert list(best.tags)[0] == supp[0] # assert_equal( # list(map(get_tags, pick_best(cand_wheels, supp, top=False))), supp)
mit
mikebranstein/mvc-quickstart-instructions
node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSUserFile.py
2710
5094
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Visual Studio user preferences file writer.""" import os import re import socket # for gethostname import gyp.common import gyp.easy_xml as easy_xml #------------------------------------------------------------------------------ def _FindCommandInPath(command): """If there are no slashes in the command given, this function searches the PATH env to find the given command, and converts it to an absolute path. We have to do this because MSVS is looking for an actual file to launch a debugger on, not just a command line. Note that this happens at GYP time, so anything needing to be built needs to have a full path.""" if '/' in command or '\\' in command: # If the command already has path elements (either relative or # absolute), then assume it is constructed properly. return command else: # Search through the path list and find an existing file that # we can access. paths = os.environ.get('PATH','').split(os.pathsep) for path in paths: item = os.path.join(path, command) if os.path.isfile(item) and os.access(item, os.X_OK): return item return command def _QuoteWin32CommandLineArgs(args): new_args = [] for arg in args: # Replace all double-quotes with double-double-quotes to escape # them for cmd shell, and then quote the whole thing if there # are any. if arg.find('"') != -1: arg = '""'.join(arg.split('"')) arg = '"%s"' % arg # Otherwise, if there are any spaces, quote the whole arg. elif re.search(r'[ \t\n]', arg): arg = '"%s"' % arg new_args.append(arg) return new_args class Writer(object): """Visual Studio XML user user file writer.""" def __init__(self, user_file_path, version, name): """Initializes the user file. Args: user_file_path: Path to the user file. version: Version info. name: Name of the user file. """ self.user_file_path = user_file_path self.version = version self.name = name self.configurations = {} def AddConfig(self, name): """Adds a configuration to the project. Args: name: Configuration name. """ self.configurations[name] = ['Configuration', {'Name': name}] def AddDebugSettings(self, config_name, command, environment = {}, working_directory=""): """Adds a DebugSettings node to the user file for a particular config. Args: command: command line to run. First element in the list is the executable. All elements of the command will be quoted if necessary. working_directory: other files which may trigger the rule. (optional) """ command = _QuoteWin32CommandLineArgs(command) abs_command = _FindCommandInPath(command[0]) if environment and isinstance(environment, dict): env_list = ['%s="%s"' % (key, val) for (key,val) in environment.iteritems()] environment = ' '.join(env_list) else: environment = '' n_cmd = ['DebugSettings', {'Command': abs_command, 'WorkingDirectory': working_directory, 'CommandArguments': " ".join(command[1:]), 'RemoteMachine': socket.gethostname(), 'Environment': environment, 'EnvironmentMerge': 'true', # Currently these are all "dummy" values that we're just setting # in the default manner that MSVS does it. We could use some of # these to add additional capabilities, I suppose, but they might # not have parity with other platforms then. 'Attach': 'false', 'DebuggerType': '3', # 'auto' debugger 'Remote': '1', 'RemoteCommand': '', 'HttpUrl': '', 'PDBPath': '', 'SQLDebugging': '', 'DebuggerFlavor': '0', 'MPIRunCommand': '', 'MPIRunArguments': '', 'MPIRunWorkingDirectory': '', 'ApplicationCommand': '', 'ApplicationArguments': '', 'ShimCommand': '', 'MPIAcceptMode': '', 'MPIAcceptFilter': '' }] # Find the config, and add it if it doesn't exist. if config_name not in self.configurations: self.AddConfig(config_name) # Add the DebugSettings onto the appropriate config. self.configurations[config_name].append(n_cmd) def WriteIfChanged(self): """Writes the user file.""" configs = ['Configurations'] for config, spec in sorted(self.configurations.iteritems()): configs.append(spec) content = ['VisualStudioUserFile', {'Version': self.version.ProjectVersion(), 'Name': self.name }, configs] easy_xml.WriteXmlIfChanged(content, self.user_file_path, encoding="Windows-1252")
mit
entomb/CouchPotatoServer
couchpotato/core/settings/__init__.py
5
7480
from __future__ import with_statement import traceback from couchpotato.api import addApiView from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import mergeDicts, tryInt, tryFloat from couchpotato.core.settings.model import Properties import ConfigParser class Settings(object): options = {} types = {} def __init__(self): addApiView('settings', self.view, docs = { 'desc': 'Return the options and its values of settings.conf. Including the default values and group ordering used on the settings page.', 'return': {'type': 'object', 'example': """{ // objects like in __init__.py of plugin "options": { "moovee" : { "groups" : [{ "description" : "SD movies only", "name" : "#alt.binaries.moovee", "options" : [{ "default" : false, "name" : "enabled", "type" : "enabler" }], "tab" : "providers" }], "name" : "moovee" } }, // object structured like settings.conf "values": { "moovee": { "enabled": false } } }"""} }) addApiView('settings.save', self.saveView, docs = { 'desc': 'Save setting to config file (settings.conf)', 'params': { 'section': {'desc': 'The section name in settings.conf'}, 'option': {'desc': 'The option name'}, 'value': {'desc': 'The value you want to save'}, } }) def setFile(self, config_file): self.file = config_file self.p = ConfigParser.RawConfigParser() self.p.read(config_file) from couchpotato.core.logger import CPLog self.log = CPLog(__name__) self.connectEvents() def parser(self): return self.p def sections(self): return self.p.sections() def connectEvents(self): addEvent('settings.options', self.addOptions) addEvent('settings.register', self.registerDefaults) addEvent('settings.save', self.save) def registerDefaults(self, section_name, options = None, save = True): if not options: options = {} self.addSection(section_name) for option_name, option in options.items(): self.setDefault(section_name, option_name, option.get('default', '')) # Migrate old settings from old location to the new location if option.get('migrate_from'): if self.p.has_option(option.get('migrate_from'), option_name): previous_value = self.p.get(option.get('migrate_from'), option_name) self.p.set(section_name, option_name, previous_value) self.p.remove_option(option.get('migrate_from'), option_name) if option.get('type'): self.setType(section_name, option_name, option.get('type')) if save: self.save() def set(self, section, option, value): return self.p.set(section, option, value) def get(self, option = '', section = 'core', default = None, type = None): try: try: type = self.types[section][option] except: type = 'unicode' if not type else type if hasattr(self, 'get%s' % type.capitalize()): return getattr(self, 'get%s' % type.capitalize())(section, option) else: return self.getUnicode(section, option) except: return default def delete(self, option = '', section = 'core'): self.p.remove_option(section, option) self.save() def getEnabler(self, section, option): return self.getBool(section, option) def getBool(self, section, option): try: return self.p.getboolean(section, option) except: return self.p.get(section, option) == 1 def getInt(self, section, option): try: return self.p.getint(section, option) except: return tryInt(self.p.get(section, option)) def getFloat(self, section, option): try: return self.p.getfloat(section, option) except: return tryFloat(self.p.get(section, option)) def getUnicode(self, section, option): value = self.p.get(section, option).decode('unicode_escape') return toUnicode(value).strip() def getValues(self): values = {} for section in self.sections(): values[section] = {} for option in self.p.items(section): (option_name, option_value) = option values[section][option_name] = self.get(option_name, section) return values def save(self): with open(self.file, 'wb') as configfile: self.p.write(configfile) self.log.debug('Saved settings') def addSection(self, section): if not self.p.has_section(section): self.p.add_section(section) def setDefault(self, section, option, value): if not self.p.has_option(section, option): self.p.set(section, option, value) def setType(self, section, option, type): if not self.types.get(section): self.types[section] = {} self.types[section][option] = type def addOptions(self, section_name, options): if not self.options.get(section_name): self.options[section_name] = options else: self.options[section_name] = mergeDicts(self.options[section_name], options) def getOptions(self): return self.options def view(self, **kwargs): return { 'options': self.getOptions(), 'values': self.getValues() } def saveView(self, **kwargs): section = kwargs.get('section') option = kwargs.get('name') value = kwargs.get('value') # See if a value handler is attached, use that as value new_value = fireEvent('setting.save.%s.%s' % (section, option), value, single = True) self.set(section, option, (new_value if new_value else value).encode('unicode_escape')) self.save() # After save (for re-interval etc) fireEvent('setting.save.%s.%s.after' % (section, option), single = True) fireEvent('setting.save.%s.*.after' % section, single = True) return { 'success': True, } def getProperty(self, identifier): from couchpotato import get_session db = get_session() prop = None try: propert = db.query(Properties).filter_by(identifier = identifier).first() prop = propert.value except: pass return prop def setProperty(self, identifier, value = ''): from couchpotato import get_session try: db = get_session() p = db.query(Properties).filter_by(identifier = identifier).first() if not p: p = Properties() db.add(p) p.identifier = identifier p.value = toUnicode(value) db.commit() except: self.log.error('Failed: %s', traceback.format_exc()) db.rollback() finally: db.close()
gpl-3.0
chouseknecht/ansible
lib/ansible/modules/cloud/univention/udm_user.py
37
18102
#!/usr/bin/python # -*- coding: UTF-8 -*- # Copyright: (c) 2016, Adfinis SyGroup AG # Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: udm_user version_added: "2.2" author: - Tobias Rüetschi (@keachi) short_description: Manage posix users on a univention corporate server description: - "This module allows to manage posix users on a univention corporate server (UCS). It uses the python API of the UCS to create a new object or edit it." requirements: - Python >= 2.6 options: state: default: "present" choices: [ present, absent ] description: - Whether the user is present or not. username: required: true description: - User name aliases: ['name'] firstname: description: - First name. Required if C(state=present). lastname: description: - Last name. Required if C(state=present). password: description: - Password. Required if C(state=present). birthday: description: - Birthday city: description: - City of users business address. country: description: - Country of users business address. department_number: description: - Department number of users business address. aliases: [ departmentNumber ] description: description: - Description (not gecos) display_name: description: - Display name (not gecos) aliases: [ displayName ] email: default: [] description: - A list of e-mail addresses. employee_number: description: - Employee number aliases: [ employeeNumber ] employee_type: description: - Employee type aliases: [ employeeType ] gecos: description: - GECOS groups: default: [] description: - "POSIX groups, the LDAP DNs of the groups will be found with the LDAP filter for each group as $GROUP: C((&(objectClass=posixGroup)(cn=$GROUP)))." home_share: description: - "Home NFS share. Must be a LDAP DN, e.g. C(cn=home,cn=shares,ou=school,dc=example,dc=com)." aliases: [ homeShare ] home_share_path: description: - Path to home NFS share, inside the homeShare. aliases: [ homeSharePath ] home_telephone_number: default: [] description: - List of private telephone numbers. aliases: [ homeTelephoneNumber ] homedrive: description: - Windows home drive, e.g. C("H:"). mail_alternative_address: default: [] description: - List of alternative e-mail addresses. aliases: [ mailAlternativeAddress ] mail_home_server: description: - FQDN of mail server aliases: [ mailHomeServer ] mail_primary_address: description: - Primary e-mail address aliases: [ mailPrimaryAddress ] mobile_telephone_number: default: [] description: - Mobile phone number aliases: [ mobileTelephoneNumber ] organisation: description: - Organisation aliases: [ organization ] override_pw_history: type: bool default: 'no' description: - Override password history aliases: [ overridePWHistory ] override_pw_length: type: bool default: 'no' description: - Override password check aliases: [ overridePWLength ] pager_telephonenumber: default: [] description: - List of pager telephone numbers. aliases: [ pagerTelephonenumber ] phone: description: - List of telephone numbers. postcode: description: - Postal code of users business address. primary_group: default: cn=Domain Users,cn=groups,$LDAP_BASE_DN description: - Primary group. This must be the group LDAP DN. aliases: [ primaryGroup ] profilepath: description: - Windows profile directory pwd_change_next_login: choices: [ '0', '1' ] description: - Change password on next login. aliases: [ pwdChangeNextLogin ] room_number: description: - Room number of users business address. aliases: [ roomNumber ] samba_privileges: description: - "Samba privilege, like allow printer administration, do domain join." aliases: [ sambaPrivileges ] samba_user_workstations: description: - Allow the authentication only on this Microsoft Windows host. aliases: [ sambaUserWorkstations ] sambahome: description: - Windows home path, e.g. C('\\\\$FQDN\\$USERNAME'). scriptpath: description: - Windows logon script. secretary: default: [] description: - A list of superiors as LDAP DNs. serviceprovider: default: [] description: - Enable user for the following service providers. shell: default: '/bin/bash' description: - Login shell street: description: - Street of users business address. title: description: - Title, e.g. C(Prof.). unixhome: default: '/home/$USERNAME' description: - Unix home directory userexpiry: default: Today + 1 year description: - Account expiry date, e.g. C(1999-12-31). position: default: '' description: - "Define the whole position of users object inside the LDAP tree, e.g. C(cn=employee,cn=users,ou=school,dc=example,dc=com)." update_password: default: always description: - "C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users." version_added: "2.3" ou: default: '' description: - "Organizational Unit inside the LDAP Base DN, e.g. C(school) for LDAP OU C(ou=school,dc=example,dc=com)." subpath: default: 'cn=users' description: - "LDAP subpath inside the organizational unit, e.g. C(cn=teachers,cn=users) for LDAP container C(cn=teachers,cn=users,dc=example,dc=com)." ''' EXAMPLES = ''' # Create a user on a UCS - udm_user: name: FooBar password: secure_password firstname: Foo lastname: Bar # Create a user with the DN # C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com) - udm_user: name: foo password: secure_password firstname: Foo lastname: Bar ou: school subpath: 'cn=teachers,cn=users' # or define the position - udm_user: name: foo password: secure_password firstname: Foo lastname: Bar position: 'cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com' ''' RETURN = '''# ''' import crypt from datetime import date, timedelta from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.univention_umc import ( umc_module_for_add, umc_module_for_edit, ldap_search, base_dn, ) def main(): expiry = date.strftime(date.today() + timedelta(days=365), "%Y-%m-%d") module = AnsibleModule( argument_spec=dict( birthday=dict(default=None, type='str'), city=dict(default=None, type='str'), country=dict(default=None, type='str'), department_number=dict(default=None, type='str', aliases=['departmentNumber']), description=dict(default=None, type='str'), display_name=dict(default=None, type='str', aliases=['displayName']), email=dict(default=[''], type='list'), employee_number=dict(default=None, type='str', aliases=['employeeNumber']), employee_type=dict(default=None, type='str', aliases=['employeeType']), firstname=dict(default=None, type='str'), gecos=dict(default=None, type='str'), groups=dict(default=[], type='list'), home_share=dict(default=None, type='str', aliases=['homeShare']), home_share_path=dict(default=None, type='str', aliases=['homeSharePath']), home_telephone_number=dict(default=[], type='list', aliases=['homeTelephoneNumber']), homedrive=dict(default=None, type='str'), lastname=dict(default=None, type='str'), mail_alternative_address=dict(default=[], type='list', aliases=['mailAlternativeAddress']), mail_home_server=dict(default=None, type='str', aliases=['mailHomeServer']), mail_primary_address=dict(default=None, type='str', aliases=['mailPrimaryAddress']), mobile_telephone_number=dict(default=[], type='list', aliases=['mobileTelephoneNumber']), organisation=dict(default=None, type='str', aliases=['organization']), overridePWHistory=dict(default=False, type='bool', aliases=['override_pw_history']), overridePWLength=dict(default=False, type='bool', aliases=['override_pw_length']), pager_telephonenumber=dict(default=[], type='list', aliases=['pagerTelephonenumber']), password=dict(default=None, type='str', no_log=True), phone=dict(default=[], type='list'), postcode=dict(default=None, type='str'), primary_group=dict(default=None, type='str', aliases=['primaryGroup']), profilepath=dict(default=None, type='str'), pwd_change_next_login=dict(default=None, type='str', choices=['0', '1'], aliases=['pwdChangeNextLogin']), room_number=dict(default=None, type='str', aliases=['roomNumber']), samba_privileges=dict(default=[], type='list', aliases=['sambaPrivileges']), samba_user_workstations=dict(default=[], type='list', aliases=['sambaUserWorkstations']), sambahome=dict(default=None, type='str'), scriptpath=dict(default=None, type='str'), secretary=dict(default=[], type='list'), serviceprovider=dict(default=[''], type='list'), shell=dict(default='/bin/bash', type='str'), street=dict(default=None, type='str'), title=dict(default=None, type='str'), unixhome=dict(default=None, type='str'), userexpiry=dict(default=expiry, type='str'), username=dict(required=True, aliases=['name'], type='str'), position=dict(default='', type='str'), update_password=dict(default='always', choices=['always', 'on_create'], type='str'), ou=dict(default='', type='str'), subpath=dict(default='cn=users', type='str'), state=dict(default='present', choices=['present', 'absent'], type='str') ), supports_check_mode=True, required_if=([ ('state', 'present', ['firstname', 'lastname', 'password']) ]) ) username = module.params['username'] position = module.params['position'] ou = module.params['ou'] subpath = module.params['subpath'] state = module.params['state'] changed = False diff = None users = list(ldap_search( '(&(objectClass=posixAccount)(uid={0}))'.format(username), attr=['uid'] )) if position != '': container = position else: if ou != '': ou = 'ou={0},'.format(ou) if subpath != '': subpath = '{0},'.format(subpath) container = '{0}{1}{2}'.format(subpath, ou, base_dn()) user_dn = 'uid={0},{1}'.format(username, container) exists = bool(len(users)) if state == 'present': try: if not exists: obj = umc_module_for_add('users/user', container) else: obj = umc_module_for_edit('users/user', user_dn) if module.params['displayName'] is None: module.params['displayName'] = '{0} {1}'.format( module.params['firstname'], module.params['lastname'] ) if module.params['unixhome'] is None: module.params['unixhome'] = '/home/{0}'.format( module.params['username'] ) for k in obj.keys(): if (k != 'password' and k != 'groups' and k != 'overridePWHistory' and k in module.params and module.params[k] is not None): obj[k] = module.params[k] # handle some special values obj['e-mail'] = module.params['email'] password = module.params['password'] if obj['password'] is None: obj['password'] = password if module.params['update_password'] == 'always': old_password = obj['password'].split('}', 2)[1] if crypt.crypt(password, old_password) != old_password: obj['overridePWHistory'] = module.params['overridePWHistory'] obj['overridePWLength'] = module.params['overridePWLength'] obj['password'] = password diff = obj.diff() if exists: for k in obj.keys(): if obj.hasChanged(k): changed = True else: changed = True if not module.check_mode: if not exists: obj.create() elif changed: obj.modify() except Exception: module.fail_json( msg="Creating/editing user {0} in {1} failed".format( username, container ) ) try: groups = module.params['groups'] if groups: filter = '(&(objectClass=posixGroup)(|(cn={0})))'.format( ')(cn='.join(groups) ) group_dns = list(ldap_search(filter, attr=['dn'])) for dn in group_dns: grp = umc_module_for_edit('groups/group', dn[0]) if user_dn not in grp['users']: grp['users'].append(user_dn) if not module.check_mode: grp.modify() changed = True except Exception: module.fail_json( msg="Adding groups to user {0} failed".format(username) ) if state == 'absent' and exists: try: obj = umc_module_for_edit('users/user', user_dn) if not module.check_mode: obj.remove() changed = True except Exception: module.fail_json( msg="Removing user {0} failed".format(username) ) module.exit_json( changed=changed, username=username, diff=diff, container=container ) if __name__ == '__main__': main()
gpl-3.0
cubieboard/openbox_external_chromium
testing/gtest/test/gtest_xml_test_utils.py
398
8029
#!/usr/bin/env python # # Copyright 2006, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test utilities for gtest_xml_output""" __author__ = 'eefacm@gmail.com (Sean Mcafee)' import re from xml.dom import minidom, Node import gtest_test_utils GTEST_OUTPUT_FLAG = "--gtest_output" GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml" class GTestXMLTestCase(gtest_test_utils.TestCase): """ Base class for tests of Google Test's XML output functionality. """ def AssertEquivalentNodes(self, expected_node, actual_node): """ Asserts that actual_node (a DOM node object) is equivalent to expected_node (another DOM node object), in that either both of them are CDATA nodes and have the same value, or both are DOM elements and actual_node meets all of the following conditions: * It has the same tag name as expected_node. * It has the same set of attributes as expected_node, each with the same value as the corresponding attribute of expected_node. Exceptions are any attribute named "time", which needs only be convertible to a floating-point number and any attribute named "type_param" which only has to be non-empty. * It has an equivalent set of child nodes (including elements and CDATA sections) as expected_node. Note that we ignore the order of the children as they are not guaranteed to be in any particular order. """ if expected_node.nodeType == Node.CDATA_SECTION_NODE: self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType) self.assertEquals(expected_node.nodeValue, actual_node.nodeValue) return self.assertEquals(Node.ELEMENT_NODE, actual_node.nodeType) self.assertEquals(Node.ELEMENT_NODE, expected_node.nodeType) self.assertEquals(expected_node.tagName, actual_node.tagName) expected_attributes = expected_node.attributes actual_attributes = actual_node .attributes self.assertEquals( expected_attributes.length, actual_attributes.length, "attribute numbers differ in element " + actual_node.tagName) for i in range(expected_attributes.length): expected_attr = expected_attributes.item(i) actual_attr = actual_attributes.get(expected_attr.name) self.assert_( actual_attr is not None, "expected attribute %s not found in element %s" % (expected_attr.name, actual_node.tagName)) self.assertEquals(expected_attr.value, actual_attr.value, " values of attribute %s in element %s differ" % (expected_attr.name, actual_node.tagName)) expected_children = self._GetChildren(expected_node) actual_children = self._GetChildren(actual_node) self.assertEquals( len(expected_children), len(actual_children), "number of child elements differ in element " + actual_node.tagName) for child_id, child in expected_children.iteritems(): self.assert_(child_id in actual_children, '<%s> is not in <%s> (in element %s)' % (child_id, actual_children, actual_node.tagName)) self.AssertEquivalentNodes(child, actual_children[child_id]) identifying_attribute = { "testsuites": "name", "testsuite": "name", "testcase": "name", "failure": "message", } def _GetChildren(self, element): """ Fetches all of the child nodes of element, a DOM Element object. Returns them as the values of a dictionary keyed by the IDs of the children. For <testsuites>, <testsuite> and <testcase> elements, the ID is the value of their "name" attribute; for <failure> elements, it is the value of the "message" attribute; CDATA sections and non-whitespace text nodes are concatenated into a single CDATA section with ID "detail". An exception is raised if any element other than the above four is encountered, if two child elements with the same identifying attributes are encountered, or if any other type of node is encountered. """ children = {} for child in element.childNodes: if child.nodeType == Node.ELEMENT_NODE: self.assert_(child.tagName in self.identifying_attribute, "Encountered unknown element <%s>" % child.tagName) childID = child.getAttribute(self.identifying_attribute[child.tagName]) self.assert_(childID not in children) children[childID] = child elif child.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]: if "detail" not in children: if (child.nodeType == Node.CDATA_SECTION_NODE or not child.nodeValue.isspace()): children["detail"] = child.ownerDocument.createCDATASection( child.nodeValue) else: children["detail"].nodeValue += child.nodeValue else: self.fail("Encountered unexpected node type %d" % child.nodeType) return children def NormalizeXml(self, element): """ Normalizes Google Test's XML output to eliminate references to transient information that may change from run to run. * The "time" attribute of <testsuites>, <testsuite> and <testcase> elements is replaced with a single asterisk, if it contains only digit characters. * The "type_param" attribute of <testcase> elements is replaced with a single asterisk (if it sn non-empty) as it is the type name returned by the compiler and is platform dependent. * The line number reported in the first line of the "message" attribute of <failure> elements is replaced with a single asterisk. * The directory names in file paths are removed. * The stack traces are removed. """ if element.tagName in ("testsuites", "testsuite", "testcase"): time = element.getAttributeNode("time") time.value = re.sub(r"^\d+(\.\d+)?$", "*", time.value) type_param = element.getAttributeNode("type_param") if type_param and type_param.value: type_param.value = "*" elif element.tagName == "failure": for child in element.childNodes: if child.nodeType == Node.CDATA_SECTION_NODE: # Removes the source line number. cdata = re.sub(r"^.*[/\\](.*:)\d+\n", "\\1*\n", child.nodeValue) # Removes the actual stack trace. child.nodeValue = re.sub(r"\nStack trace:\n(.|\n)*", "", cdata) for child in element.childNodes: if child.nodeType == Node.ELEMENT_NODE: self.NormalizeXml(child)
bsd-3-clause
JizhouZhang/SDR
gr-filter/python/filter/design/api_object.py
45
2638
# Copyright 2012 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # class ApiObject(): ''' Filter count variable if the filter design tool has to return multiple filter paramters in future e.g Cascaded Filters ''' def __init__(self, filtcount = 1): self.filtercount = filtcount self.restype = [''] * self.filtercount self.params = [''] * self.filtercount self.taps = [''] * self.filtercount ''' Updates params dictionary for the given filter number ''' def update_params(self, params, filtno): if (filtno <= self.filtercount): self.params[filtno - 1] = params ''' Updates filter type for the given filter number ''' def update_filttype(self, filttype, filtno): if (filtno <= self.filtercount): self.filttype[filtno - 1] = filttype ''' updates taps for the given filter number. taps will contain a list of coefficients in the case of fir design and (b,a) tuple in the case of iir design ''' def update_taps(self, taps, filtno): if (filtno <= self.filtercount): self.taps[filtno - 1] = taps ''' updates all of them in a single call ''' def update_all(self, filttype, params, taps, filtno): if (filtno <= self.filtercount): self.taps[filtno - 1] = taps self.params[filtno - 1] = params self.restype[filtno - 1] = filttype def get_filtercount(self): return self.filtercount def get_restype(self, filtno=1): if (filtno <= self.filtercount): return self.restype[filtno - 1] def get_params(self, filtno=1): if (filtno <= self.filtercount): return self.params[filtno - 1] def get_taps(self, filtno=1): if (filtno <= self.filtercount): return self.taps[filtno - 1]
gpl-3.0
levilucio/SyVOLT
GM2AUTOSAR_MM/transformation/HMapPartition.py
1
3685
from core.himesis import Himesis import uuid class HMapPartition(Himesis): def __init__(self): """ Creates the himesis graph representing the DSLTrans rule MapPartition. """ # Flag this instance as compiled now self.is_compiled = True super(HMapPartition, self).__init__(name='HMapPartition', num_nodes=0, edges=[]) # Set the graph attributes self["mm__"] = ['HimesisMM'] self["name"] = """MapPartition""" self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'MapPartition') # match model. We only support one match model self.add_node() self.vs[0]["mm__"] = """MatchModel""" # apply model node self.add_node() self.vs[1]["mm__"] = """ApplyModel""" # paired with relation between match and apply models self.add_node() self.vs[2]["mm__"] = """paired_with""" # match class Partition() node self.add_node() self.vs[3]["mm__"] = """Partition""" self.vs[3]["attr1"] = """+""" # match_contains node for class Partition() self.add_node() self.vs[4]["mm__"] = """match_contains""" # match class PhysicalNode() node self.add_node() self.vs[5]["mm__"] = """PhysicalNode""" self.vs[5]["attr1"] = """1""" # match_contains node for class PhysicalNode() self.add_node() self.vs[6]["mm__"] = """match_contains""" # match class Module() node self.add_node() self.vs[7]["mm__"] = """Module""" self.vs[7]["attr1"] = """1""" # match_contains node for class Module() self.add_node() self.vs[8]["mm__"] = """match_contains""" # apply class SwcToEcuMapping() node self.add_node() self.vs[9]["mm__"] = """SwcToEcuMapping""" self.vs[9]["attr1"] = """1""" # apply_contains node for class SwcToEcuMapping() self.add_node() self.vs[10]["mm__"] = """apply_contains""" # match association PhysicalNode--partition-->Partition node self.add_node() self.vs[11]["attr1"] = """partition""" self.vs[11]["mm__"] = """directLink_S""" # match association Partition--module-->Module node self.add_node() self.vs[12]["attr1"] = """module""" self.vs[12]["mm__"] = """directLink_S""" # Add the edges self.add_edges([ (0,4), # matchmodel -> match_contains (4,3), # match_contains -> match_class Partition() (0,6), # matchmodel -> match_contains (6,5), # match_contains -> match_class PhysicalNode() (0,8), # matchmodel -> match_contains (8,7), # match_contains -> match_class Module() (1,10), # applymodel -> apply_contains (10,9), # apply_contains -> apply_class SwcToEcuMapping() (5,11), # match_class PhysicalNode() -> association partition (11,3), # association partition -> match_class Partition() (3,12), # match_class Partition() -> association module (12,7), # association module -> match_class Module() (0,2), # matchmodel -> pairedwith (2,1) # pairedwith -> applyModel ]) # Add the attribute equations self["equations"] = [((9,'shortName'),('concat',(('constant','Swc2EcuMapping_'),(3,'name')))), ]
mit
aselle/tensorflow
tensorflow/python/keras/layers/wrappers.py
3
24939
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Wrapper layers: layers that augment the functionality of another layer. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy from tensorflow.python.framework import tensor_shape from tensorflow.python.keras import backend as K from tensorflow.python.keras.engine.base_layer import InputSpec from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.keras.layers.recurrent import _standardize_args from tensorflow.python.keras.utils import generic_utils from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import array_ops from tensorflow.python.util.tf_export import tf_export @tf_export('keras.layers.Wrapper') class Wrapper(Layer): """Abstract wrapper base class. Wrappers take another layer and augment it in various ways. Do not use this class as a layer, it is only an abstract base class. Two usable wrappers are the `TimeDistributed` and `Bidirectional` wrappers. Arguments: layer: The layer to be wrapped. """ def __init__(self, layer, **kwargs): assert isinstance(layer, Layer) self.layer = layer # Tracks mapping of Wrapper inputs to inner layer inputs. Useful when # the inner layer has update ops that depend on its inputs (as opposed # to the inputs to the Wrapper layer). self._input_map = {} super(Wrapper, self).__init__(**kwargs) def build(self, input_shape=None): self.built = True @property def activity_regularizer(self): if hasattr(self.layer, 'activity_regularizer'): return self.layer.activity_regularizer else: return None @property def trainable(self): return self.layer.trainable @trainable.setter def trainable(self, value): self.layer.trainable = value @property def trainable_weights(self): return self.layer.trainable_weights @property def non_trainable_weights(self): return self.layer.non_trainable_weights @property def updates(self): return self.layer.updates + self._updates @property def losses(self): return self.layer.losses + self._losses def get_weights(self): return self.layer.get_weights() def set_weights(self, weights): self.layer.set_weights(weights) def get_config(self): config = { 'layer': { 'class_name': self.layer.__class__.__name__, 'config': self.layer.get_config() } } base_config = super(Wrapper, self).get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config, custom_objects=None): from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top layer = deserialize_layer( config.pop('layer'), custom_objects=custom_objects) return cls(layer, **config) @tf_export('keras.layers.TimeDistributed') class TimeDistributed(Wrapper): """This wrapper allows to apply a layer to every temporal slice of an input. The input should be at least 3D, and the dimension of index one will be considered to be the temporal dimension. Consider a batch of 32 samples, where each sample is a sequence of 10 vectors of 16 dimensions. The batch input shape of the layer is then `(32, 10, 16)`, and the `input_shape`, not including the samples dimension, is `(10, 16)`. You can then use `TimeDistributed` to apply a `Dense` layer to each of the 10 timesteps, independently: ```python # as the first layer in a model model = Sequential() model.add(TimeDistributed(Dense(8), input_shape=(10, 16))) # now model.output_shape == (None, 10, 8) ``` The output will then have shape `(32, 10, 8)`. In subsequent layers, there is no need for the `input_shape`: ```python model.add(TimeDistributed(Dense(32))) # now model.output_shape == (None, 10, 32) ``` The output will then have shape `(32, 10, 32)`. `TimeDistributed` can be used with arbitrary layers, not just `Dense`, for instance with a `Conv2D` layer: ```python model = Sequential() model.add(TimeDistributed(Conv2D(64, (3, 3)), input_shape=(10, 299, 299, 3))) ``` Arguments: layer: a layer instance. Raises: ValueError: If not initialized with a `Layer` instance. """ def __init__(self, layer, **kwargs): if not isinstance(layer, Layer): raise ValueError( 'Please initialize `TimeDistributed` layer with a ' '`Layer` instance. You passed: {input}'.format(input=layer)) super(TimeDistributed, self).__init__(layer, **kwargs) self.supports_masking = True self._track_checkpointable(layer, name='layer') def _get_shape_tuple(self, init_tuple, tensor, start_idx, int_shape=None): """Finds non-specific dimensions in the static shapes. The static shapes are replaced with the corresponding dynamic shapes of the tensor. Arguments: init_tuple: a tuple, the first part of the output shape tensor: the tensor from which to get the (static and dynamic) shapes as the last part of the output shape start_idx: int, which indicate the first dimension to take from the static shape of the tensor int_shape: an alternative static shape to take as the last part of the output shape Returns: The new int_shape with the first part from init_tuple and the last part from either `int_shape` (if provided) or `tensor.shape`, where every `None` is replaced by the corresponding dimension from `tf.shape(tensor)`. """ # replace all None in int_shape by K.shape if int_shape is None: int_shape = K.int_shape(tensor)[start_idx:] if not any(not s for s in int_shape): return init_tuple + tuple(int_shape) shape = K.shape(tensor) int_shape = list(int_shape) for i, s in enumerate(int_shape): if not s: int_shape[i] = shape[start_idx + i] return init_tuple + tuple(int_shape) def build(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() assert len(input_shape) >= 3 self.input_spec = InputSpec(shape=input_shape) child_input_shape = [input_shape[0]] + input_shape[2:] if not self.layer.built: # The base layer class calls a conversion function on the input shape to # convert it to a TensorShape. The conversion function requires a # tuple which is why we cast the shape. self.layer.build(tuple(child_input_shape)) self.layer.built = True super(TimeDistributed, self).build() self.built = True def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() child_input_shape = tensor_shape.TensorShape([input_shape[0]] + input_shape[2:]) child_output_shape = self.layer.compute_output_shape( child_input_shape).as_list() timesteps = input_shape[1] return tensor_shape.TensorShape([child_output_shape[0], timesteps] + child_output_shape[1:]) def call(self, inputs, training=None, mask=None): kwargs = {} if generic_utils.has_arg(self.layer.call, 'training'): kwargs['training'] = training uses_learning_phase = False # pylint: disable=redefined-outer-name input_shape = K.int_shape(inputs) if input_shape[0]: # batch size matters, use rnn-based implementation def step(x, _): global uses_learning_phase # pylint: disable=global-variable-undefined output = self.layer.call(x, **kwargs) if hasattr(output, '_uses_learning_phase'): uses_learning_phase = (output._uses_learning_phase or uses_learning_phase) return output, [] _, outputs, _ = K.rnn( step, inputs, initial_states=[], input_length=input_shape[1], unroll=False) y = outputs else: # No batch size specified, therefore the layer will be able # to process batches of any size. # We can go with reshape-based implementation for performance. input_length = input_shape[1] if not input_length: input_length = array_ops.shape(inputs)[1] inner_input_shape = self._get_shape_tuple((-1,), inputs, 2) # Shape: (num_samples * timesteps, ...). And track the # transformation in self._input_map. input_uid = generic_utils.object_list_uid(inputs) inputs = array_ops.reshape(inputs, inner_input_shape) self._input_map[input_uid] = inputs # (num_samples * timesteps, ...) if generic_utils.has_arg(self.layer.call, 'mask') and mask is not None: inner_mask_shape = self._get_shape_tuple((-1,), mask, 2) kwargs['mask'] = K.reshape(mask, inner_mask_shape) y = self.layer.call(inputs, **kwargs) if hasattr(y, '_uses_learning_phase'): uses_learning_phase = y._uses_learning_phase # Shape: (num_samples, timesteps, ...) output_shape = self.compute_output_shape(input_shape).as_list() output_shape = self._get_shape_tuple( (-1, input_length), y, 1, output_shape[2:]) y = array_ops.reshape(y, output_shape) # Apply activity regularizer if any: if (hasattr(self.layer, 'activity_regularizer') and self.layer.activity_regularizer is not None): regularization_loss = self.layer.activity_regularizer(y) self.add_loss(regularization_loss, inputs) if uses_learning_phase: y._uses_learning_phase = True return y def compute_mask(self, inputs, mask=None): """Computes an output mask tensor for Embedding layer. This is based on the inputs, mask, and the inner layer. If batch size is specified: Simply return the input `mask`. (An rnn-based implementation with more than one rnn inputs is required but not supported in tf.keras yet.) Otherwise we call `compute_mask` of the inner layer at each time step. If the output mask at each time step is not `None`: (E.g., inner layer is Masking or RNN) Concatenate all of them and return the concatenation. If the output mask at each time step is `None` and the input mask is not `None`:(E.g., inner layer is Dense) Reduce the input_mask to 2 dimensions and return it. Otherwise (both the output mask and the input mask are `None`): (E.g., `mask` is not used at all) Return `None`. Arguments: inputs: Tensor with shape [batch size, timesteps, ...] indicating the input to TimeDistributed. If static shape information is available for "batch size", `mask` is returned unmodified. mask: Either None (indicating no masking) or a Tensor indicating the input mask for TimeDistributed. The shape can be static or dynamic. Returns: Either None (no masking), or a [batch size, timesteps, ...] Tensor with an output mask for the TimeDistributed layer with the shape beyond the second dimension being the value of the input mask shape(if the computed output mask is none), an output mask with the shape beyond the first dimension being the value of the mask shape(if mask is not None) or output mask with the shape beyond the first dimension being the value of the computed output shape. """ # cases need to call the layer.compute_mask when input_mask is None: # Masking layer and Embedding layer with mask_zero input_shape = K.int_shape(inputs) if input_shape[0]: # batch size matters, we currently do not handle mask explicitly return mask inner_mask = mask if inner_mask is not None: inner_mask_shape = self._get_shape_tuple((-1,), mask, 2) inner_mask = K.reshape(inner_mask, inner_mask_shape) input_uid = generic_utils.object_list_uid(inputs) inner_inputs = self._input_map[input_uid] output_mask = self.layer.compute_mask(inner_inputs, inner_mask) if output_mask is None: if mask is None: return None # input_mask is not None, and output_mask is None: # we should return a not-None mask output_mask = mask for _ in range(2, len(K.int_shape(mask))): output_mask = K.any(output_mask, axis=-1) else: # output_mask is not None. We need to reshape it input_length = input_shape[1] if not input_length: input_length = K.shape(inputs)[1] output_mask_int_shape = K.int_shape(output_mask) if output_mask_int_shape is None: # if the output_mask does not have a static shape, # its shape must be the same as mask's if mask is not None: output_mask_int_shape = K.int_shape(mask) else: output_mask_int_shape = K.compute_output_shape(input_shape)[:-1] output_mask_shape = self._get_shape_tuple( (-1, input_length), output_mask, 1, output_mask_int_shape[1:]) output_mask = K.reshape(output_mask, output_mask_shape) return output_mask @tf_export('keras.layers.Bidirectional') class Bidirectional(Wrapper): """Bidirectional wrapper for RNNs. Arguments: layer: `Recurrent` instance. merge_mode: Mode by which outputs of the forward and backward RNNs will be combined. One of {'sum', 'mul', 'concat', 'ave', None}. If None, the outputs will not be combined, they will be returned as a list. Raises: ValueError: If not initialized with a `Layer` instance or In case of invalid `merge_mode` argument. Examples: ```python model = Sequential() model.add(Bidirectional(LSTM(10, return_sequences=True), input_shape=(5, 10))) model.add(Bidirectional(LSTM(10))) model.add(Dense(5)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') ``` """ def __init__(self, layer, merge_mode='concat', weights=None, **kwargs): if not isinstance(layer, Layer): raise ValueError( 'Please initialize `Bidirectional` layer with a ' '`Layer` instance. You passed: {input}'.format(input=layer)) if merge_mode not in ['sum', 'mul', 'ave', 'concat', None]: raise ValueError('Invalid merge mode. ' 'Merge mode should be one of ' '{"sum", "mul", "ave", "concat", None}') self.forward_layer = copy.copy(layer) config = layer.get_config() config['go_backwards'] = not config['go_backwards'] self.backward_layer = layer.__class__.from_config(config) self.forward_layer._name = 'forward_' + self.forward_layer.name self.backward_layer._name = 'backward_' + self.backward_layer.name self.merge_mode = merge_mode if weights: nw = len(weights) self.forward_layer.initial_weights = weights[:nw // 2] self.backward_layer.initial_weights = weights[nw // 2:] self.stateful = layer.stateful self.return_sequences = layer.return_sequences self.return_state = layer.return_state self.supports_masking = True self._trainable = True self._num_constants = None super(Bidirectional, self).__init__(layer, **kwargs) self.input_spec = layer.input_spec self._track_checkpointable(self.forward_layer, name='forward_layer') self._track_checkpointable(self.backward_layer, name='backward_layer') @property def trainable(self): return self._trainable @trainable.setter def trainable(self, value): self._trainable = value self.forward_layer.trainable = value self.backward_layer.trainable = value def get_weights(self): return self.forward_layer.get_weights() + self.backward_layer.get_weights() def set_weights(self, weights): nw = len(weights) self.forward_layer.set_weights(weights[:nw // 2]) self.backward_layer.set_weights(weights[nw // 2:]) @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): output_shape = tuple(self.forward_layer.compute_output_shape( input_shape).as_list()) if self.return_state: state_shape = output_shape[1:] output_shape = output_shape[0] if self.merge_mode == 'concat': output_shape = list(output_shape) output_shape[-1] *= 2 output_shape = tuple(output_shape) elif self.merge_mode is None: output_shape = [output_shape, copy.copy(output_shape)] if self.return_state: if self.merge_mode is None: return output_shape + state_shape + copy.copy(state_shape) return [output_shape] + state_shape + copy.copy(state_shape) return output_shape def __call__(self, inputs, initial_state=None, constants=None, **kwargs): """`Bidirectional.__call__` implements the same API as the wrapped `RNN`.""" inputs, initial_state, constants = _standardize_args( inputs, initial_state, constants, self._num_constants) if isinstance(inputs, list): if len(inputs) > 1: initial_state = inputs[1:] inputs = inputs[0] if initial_state is None and constants is None: return super(Bidirectional, self).__call__(inputs, **kwargs) # Applies the same workaround as in `RNN.__call__` additional_inputs = [] additional_specs = [] if initial_state is not None: # Check if `initial_state` can be splitted into half num_states = len(initial_state) if num_states % 2 > 0: raise ValueError( 'When passing `initial_state` to a Bidirectional RNN, ' 'the state should be a list containing the states of ' 'the underlying RNNs. ' 'Found: ' + str(initial_state)) kwargs['initial_state'] = initial_state additional_inputs += initial_state state_specs = [InputSpec(shape=K.int_shape(state)) for state in initial_state] self.forward_layer.state_spec = state_specs[:num_states // 2] self.backward_layer.state_spec = state_specs[num_states // 2:] additional_specs += state_specs if constants is not None: kwargs['constants'] = constants additional_inputs += constants constants_spec = [InputSpec(shape=K.int_shape(constant)) for constant in constants] self.forward_layer.constants_spec = constants_spec self.backward_layer.constants_spec = constants_spec additional_specs += constants_spec self._num_constants = len(constants) self.forward_layer._num_constants = self._num_constants self.backward_layer._num_constants = self._num_constants is_keras_tensor = K.is_keras_tensor(additional_inputs[0]) for tensor in additional_inputs: if K.is_keras_tensor(tensor) != is_keras_tensor: raise ValueError('The initial state of a Bidirectional' ' layer cannot be specified with a mix of' ' Keras tensors and non-Keras tensors' ' (a "Keras tensor" is a tensor that was' ' returned by a Keras layer, or by `Input`)') if is_keras_tensor: # Compute the full input spec, including state full_input = [inputs] + additional_inputs full_input_spec = self.input_spec + additional_specs # Perform the call with temporarily replaced input_spec original_input_spec = self.input_spec self.input_spec = full_input_spec output = super(Bidirectional, self).__call__(full_input, **kwargs) self.input_spec = original_input_spec return output else: return super(Bidirectional, self).__call__(inputs, **kwargs) def call(self, inputs, training=None, mask=None, initial_state=None, constants=None): """`Bidirectional.call` implements the same API as the wrapped `RNN`.""" kwargs = {} if generic_utils.has_arg(self.layer.call, 'training'): kwargs['training'] = training if generic_utils.has_arg(self.layer.call, 'mask'): kwargs['mask'] = mask if generic_utils.has_arg(self.layer.call, 'constants'): kwargs['constants'] = constants if initial_state is not None and generic_utils.has_arg( self.layer.call, 'initial_state'): forward_state = initial_state[:len(initial_state) // 2] backward_state = initial_state[len(initial_state) // 2:] y = self.forward_layer.call(inputs, initial_state=forward_state, **kwargs) y_rev = self.backward_layer.call( inputs, initial_state=backward_state, **kwargs) else: y = self.forward_layer.call(inputs, **kwargs) y_rev = self.backward_layer.call(inputs, **kwargs) if self.return_state: states = y[1:] + y_rev[1:] y = y[0] y_rev = y_rev[0] if self.return_sequences: y_rev = K.reverse(y_rev, 1) if self.merge_mode == 'concat': output = K.concatenate([y, y_rev]) elif self.merge_mode == 'sum': output = y + y_rev elif self.merge_mode == 'ave': output = (y + y_rev) / 2 elif self.merge_mode == 'mul': output = y * y_rev elif self.merge_mode is None: output = [y, y_rev] # Properly set learning phase if (getattr(y, '_uses_learning_phase', False) or getattr(y_rev, '_uses_learning_phase', False)): if self.merge_mode is None: for out in output: out._uses_learning_phase = True else: output._uses_learning_phase = True if self.return_state: if self.merge_mode is None: return output + states return [output] + states return output def reset_states(self): self.forward_layer.reset_states() self.backward_layer.reset_states() def build(self, input_shape): with K.name_scope(self.forward_layer.name): self.forward_layer.build(input_shape) with K.name_scope(self.backward_layer.name): self.backward_layer.build(input_shape) self.built = True def compute_mask(self, inputs, mask): if isinstance(mask, list): mask = mask[0] if self.return_sequences: if not self.merge_mode: output_mask = [mask, mask] else: output_mask = mask else: output_mask = [None, None] if not self.merge_mode else None if self.return_state: states = self.forward_layer.states state_mask = [None for _ in states] if isinstance(output_mask, list): return output_mask + state_mask * 2 return [output_mask] + state_mask * 2 return output_mask @property def trainable_weights(self): if hasattr(self.forward_layer, 'trainable_weights'): return (self.forward_layer.trainable_weights + self.backward_layer.trainable_weights) return [] @property def non_trainable_weights(self): if hasattr(self.forward_layer, 'non_trainable_weights'): return (self.forward_layer.non_trainable_weights + self.backward_layer.non_trainable_weights) return [] @property def updates(self): if hasattr(self.forward_layer, 'updates'): return self.forward_layer.updates + self.backward_layer.updates return [] @property def losses(self): if hasattr(self.forward_layer, 'losses'): return self.forward_layer.losses + self.backward_layer.losses return [] @property def constraints(self): constraints = {} if hasattr(self.forward_layer, 'constraints'): constraints.update(self.forward_layer.constraints) constraints.update(self.backward_layer.constraints) return constraints def get_config(self): config = {'merge_mode': self.merge_mode} if self._num_constants is not None: config['num_constants'] = self._num_constants base_config = super(Bidirectional, self).get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config, custom_objects=None): num_constants = config.pop('num_constants', None) layer = super(Bidirectional, cls).from_config(config, custom_objects=custom_objects) layer._num_constants = num_constants return layer
apache-2.0
Teamxrtc/webrtc-streaming-node
third_party/depot_tools/external_bin/gsutil/gsutil_4.15/gsutil/third_party/python-gflags/gflags.py
448
104236
#!/usr/bin/env python # # Copyright (c) 2002, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # --- # Author: Chad Lester # Design and style contributions by: # Amit Patel, Bogdan Cocosel, Daniel Dulitz, Eric Tiedemann, # Eric Veach, Laurence Gonsalves, Matthew Springer # Code reorganized a bit by Craig Silverstein """This module is used to define and parse command line flags. This module defines a *distributed* flag-definition policy: rather than an application having to define all flags in or near main(), each python module defines flags that are useful to it. When one python module imports another, it gains access to the other's flags. (This is implemented by having all modules share a common, global registry object containing all the flag information.) Flags are defined through the use of one of the DEFINE_xxx functions. The specific function used determines how the flag is parsed, checked, and optionally type-converted, when it's seen on the command line. IMPLEMENTATION: DEFINE_* creates a 'Flag' object and registers it with a 'FlagValues' object (typically the global FlagValues FLAGS, defined here). The 'FlagValues' object can scan the command line arguments and pass flag arguments to the corresponding 'Flag' objects for value-checking and type conversion. The converted flag values are available as attributes of the 'FlagValues' object. Code can access the flag through a FlagValues object, for instance gflags.FLAGS.myflag. Typically, the __main__ module passes the command line arguments to gflags.FLAGS for parsing. At bottom, this module calls getopt(), so getopt functionality is supported, including short- and long-style flags, and the use of -- to terminate flags. Methods defined by the flag module will throw 'FlagsError' exceptions. The exception argument will be a human-readable string. FLAG TYPES: This is a list of the DEFINE_*'s that you can do. All flags take a name, default value, help-string, and optional 'short' name (one-letter name). Some flags have other arguments, which are described with the flag. DEFINE_string: takes any input, and interprets it as a string. DEFINE_bool or DEFINE_boolean: typically does not take an argument: say --myflag to set FLAGS.myflag to true, or --nomyflag to set FLAGS.myflag to false. Alternately, you can say --myflag=true or --myflag=t or --myflag=1 or --myflag=false or --myflag=f or --myflag=0 DEFINE_float: takes an input and interprets it as a floating point number. Takes optional args lower_bound and upper_bound; if the number specified on the command line is out of range, it will raise a FlagError. DEFINE_integer: takes an input and interprets it as an integer. Takes optional args lower_bound and upper_bound as for floats. DEFINE_enum: takes a list of strings which represents legal values. If the command-line value is not in this list, raise a flag error. Otherwise, assign to FLAGS.flag as a string. DEFINE_list: Takes a comma-separated list of strings on the commandline. Stores them in a python list object. DEFINE_spaceseplist: Takes a space-separated list of strings on the commandline. Stores them in a python list object. Example: --myspacesepflag "foo bar baz" DEFINE_multistring: The same as DEFINE_string, except the flag can be specified more than once on the commandline. The result is a python list object (list of strings), even if the flag is only on the command line once. DEFINE_multi_int: The same as DEFINE_integer, except the flag can be specified more than once on the commandline. The result is a python list object (list of ints), even if the flag is only on the command line once. SPECIAL FLAGS: There are a few flags that have special meaning: --help prints a list of all the flags in a human-readable fashion --helpshort prints a list of all key flags (see below). --helpxml prints a list of all flags, in XML format. DO NOT parse the output of --help and --helpshort. Instead, parse the output of --helpxml. For more info, see "OUTPUT FOR --helpxml" below. --flagfile=foo read flags from file foo. --undefok=f1,f2 ignore unrecognized option errors for f1,f2. For boolean flags, you should use --undefok=boolflag, and --boolflag and --noboolflag will be accepted. Do not use --undefok=noboolflag. -- as in getopt(), terminates flag-processing FLAGS VALIDATORS: If your program: - requires flag X to be specified - needs flag Y to match a regular expression - or requires any more general constraint to be satisfied then validators are for you! Each validator represents a constraint over one flag, which is enforced starting from the initial parsing of the flags and until the program terminates. Also, lower_bound and upper_bound for numerical flags are enforced using flag validators. Howto: If you want to enforce a constraint over one flag, use gflags.RegisterValidator(flag_name, checker, message='Flag validation failed', flag_values=FLAGS) After flag values are initially parsed, and after any change to the specified flag, method checker(flag_value) will be executed. If constraint is not satisfied, an IllegalFlagValue exception will be raised. See RegisterValidator's docstring for a detailed explanation on how to construct your own checker. EXAMPLE USAGE: FLAGS = gflags.FLAGS gflags.DEFINE_integer('my_version', 0, 'Version number.') gflags.DEFINE_string('filename', None, 'Input file name', short_name='f') gflags.RegisterValidator('my_version', lambda value: value % 2 == 0, message='--my_version must be divisible by 2') gflags.MarkFlagAsRequired('filename') NOTE ON --flagfile: Flags may be loaded from text files in addition to being specified on the commandline. Any flags you don't feel like typing, throw them in a file, one flag per line, for instance: --myflag=myvalue --nomyboolean_flag You then specify your file with the special flag '--flagfile=somefile'. You CAN recursively nest flagfile= tokens OR use multiple files on the command line. Lines beginning with a single hash '#' or a double slash '//' are comments in your flagfile. Any flagfile=<file> will be interpreted as having a relative path from the current working directory rather than from the place the file was included from: myPythonScript.py --flagfile=config/somefile.cfg If somefile.cfg includes further --flagfile= directives, these will be referenced relative to the original CWD, not from the directory the including flagfile was found in! The caveat applies to people who are including a series of nested files in a different dir than they are executing out of. Relative path names are always from CWD, not from the directory of the parent include flagfile. We do now support '~' expanded directory names. Absolute path names ALWAYS work! EXAMPLE USAGE: FLAGS = gflags.FLAGS # Flag names are globally defined! So in general, we need to be # careful to pick names that are unlikely to be used by other libraries. # If there is a conflict, we'll get an error at import time. gflags.DEFINE_string('name', 'Mr. President', 'your name') gflags.DEFINE_integer('age', None, 'your age in years', lower_bound=0) gflags.DEFINE_boolean('debug', False, 'produces debugging output') gflags.DEFINE_enum('gender', 'male', ['male', 'female'], 'your gender') def main(argv): try: argv = FLAGS(argv) # parse flags except gflags.FlagsError, e: print '%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS) sys.exit(1) if FLAGS.debug: print 'non-flag arguments:', argv print 'Happy Birthday', FLAGS.name if FLAGS.age is not None: print 'You are a %d year old %s' % (FLAGS.age, FLAGS.gender) if __name__ == '__main__': main(sys.argv) KEY FLAGS: As we already explained, each module gains access to all flags defined by all the other modules it transitively imports. In the case of non-trivial scripts, this means a lot of flags ... For documentation purposes, it is good to identify the flags that are key (i.e., really important) to a module. Clearly, the concept of "key flag" is a subjective one. When trying to determine whether a flag is key to a module or not, assume that you are trying to explain your module to a potential user: which flags would you really like to mention first? We'll describe shortly how to declare which flags are key to a module. For the moment, assume we know the set of key flags for each module. Then, if you use the app.py module, you can use the --helpshort flag to print only the help for the flags that are key to the main module, in a human-readable format. NOTE: If you need to parse the flag help, do NOT use the output of --help / --helpshort. That output is meant for human consumption, and may be changed in the future. Instead, use --helpxml; flags that are key for the main module are marked there with a <key>yes</key> element. The set of key flags for a module M is composed of: 1. Flags defined by module M by calling a DEFINE_* function. 2. Flags that module M explictly declares as key by using the function DECLARE_key_flag(<flag_name>) 3. Key flags of other modules that M specifies by using the function ADOPT_module_key_flags(<other_module>) This is a "bulk" declaration of key flags: each flag that is key for <other_module> becomes key for the current module too. Notice that if you do not use the functions described at points 2 and 3 above, then --helpshort prints information only about the flags defined by the main module of our script. In many cases, this behavior is good enough. But if you move part of the main module code (together with the related flags) into a different module, then it is nice to use DECLARE_key_flag / ADOPT_module_key_flags and make sure --helpshort lists all relevant flags (otherwise, your code refactoring may confuse your users). Note: each of DECLARE_key_flag / ADOPT_module_key_flags has its own pluses and minuses: DECLARE_key_flag is more targeted and may lead a more focused --helpshort documentation. ADOPT_module_key_flags is good for cases when an entire module is considered key to the current script. Also, it does not require updates to client scripts when a new flag is added to the module. EXAMPLE USAGE 2 (WITH KEY FLAGS): Consider an application that contains the following three files (two auxiliary modules and a main module) File libfoo.py: import gflags gflags.DEFINE_integer('num_replicas', 3, 'Number of replicas to start') gflags.DEFINE_boolean('rpc2', True, 'Turn on the usage of RPC2.') ... some code ... File libbar.py: import gflags gflags.DEFINE_string('bar_gfs_path', '/gfs/path', 'Path to the GFS files for libbar.') gflags.DEFINE_string('email_for_bar_errors', 'bar-team@google.com', 'Email address for bug reports about module libbar.') gflags.DEFINE_boolean('bar_risky_hack', False, 'Turn on an experimental and buggy optimization.') ... some code ... File myscript.py: import gflags import libfoo import libbar gflags.DEFINE_integer('num_iterations', 0, 'Number of iterations.') # Declare that all flags that are key for libfoo are # key for this module too. gflags.ADOPT_module_key_flags(libfoo) # Declare that the flag --bar_gfs_path (defined in libbar) is key # for this module. gflags.DECLARE_key_flag('bar_gfs_path') ... some code ... When myscript is invoked with the flag --helpshort, the resulted help message lists information about all the key flags for myscript: --num_iterations, --num_replicas, --rpc2, and --bar_gfs_path. Of course, myscript uses all the flags declared by it (in this case, just --num_replicas) or by any of the modules it transitively imports (e.g., the modules libfoo, libbar). E.g., it can access the value of FLAGS.bar_risky_hack, even if --bar_risky_hack is not declared as a key flag for myscript. OUTPUT FOR --helpxml: The --helpxml flag generates output with the following structure: <?xml version="1.0"?> <AllFlags> <program>PROGRAM_BASENAME</program> <usage>MAIN_MODULE_DOCSTRING</usage> (<flag> [<key>yes</key>] <file>DECLARING_MODULE</file> <name>FLAG_NAME</name> <meaning>FLAG_HELP_MESSAGE</meaning> <default>DEFAULT_FLAG_VALUE</default> <current>CURRENT_FLAG_VALUE</current> <type>FLAG_TYPE</type> [OPTIONAL_ELEMENTS] </flag>)* </AllFlags> Notes: 1. The output is intentionally similar to the output generated by the C++ command-line flag library. The few differences are due to the Python flags that do not have a C++ equivalent (at least not yet), e.g., DEFINE_list. 2. New XML elements may be added in the future. 3. DEFAULT_FLAG_VALUE is in serialized form, i.e., the string you can pass for this flag on the command-line. E.g., for a flag defined using DEFINE_list, this field may be foo,bar, not ['foo', 'bar']. 4. CURRENT_FLAG_VALUE is produced using str(). This means that the string 'false' will be represented in the same way as the boolean False. Using repr() would have removed this ambiguity and simplified parsing, but would have broken the compatibility with the C++ command-line flags. 5. OPTIONAL_ELEMENTS describe elements relevant for certain kinds of flags: lower_bound, upper_bound (for flags that specify bounds), enum_value (for enum flags), list_separator (for flags that consist of a list of values, separated by a special token). 6. We do not provide any example here: please use --helpxml instead. This module requires at least python 2.2.1 to run. """ import cgi import getopt import os import re import string import struct import sys # pylint: disable-msg=C6204 try: import fcntl except ImportError: fcntl = None try: # Importing termios will fail on non-unix platforms. import termios except ImportError: termios = None import gflags_validators # pylint: enable-msg=C6204 # Are we running under pychecker? _RUNNING_PYCHECKER = 'pychecker.python' in sys.modules def _GetCallingModuleObjectAndName(): """Returns the module that's calling into this module. We generally use this function to get the name of the module calling a DEFINE_foo... function. """ # Walk down the stack to find the first globals dict that's not ours. for depth in range(1, sys.getrecursionlimit()): if not sys._getframe(depth).f_globals is globals(): globals_for_frame = sys._getframe(depth).f_globals module, module_name = _GetModuleObjectAndName(globals_for_frame) if module_name is not None: return module, module_name raise AssertionError("No module was found") def _GetCallingModule(): """Returns the name of the module that's calling into this module.""" return _GetCallingModuleObjectAndName()[1] def _GetThisModuleObjectAndName(): """Returns: (module object, module name) for this module.""" return _GetModuleObjectAndName(globals()) # module exceptions: class FlagsError(Exception): """The base class for all flags errors.""" pass class DuplicateFlag(FlagsError): """Raised if there is a flag naming conflict.""" pass class CantOpenFlagFileError(FlagsError): """Raised if flagfile fails to open: doesn't exist, wrong permissions, etc.""" pass class DuplicateFlagCannotPropagateNoneToSwig(DuplicateFlag): """Special case of DuplicateFlag -- SWIG flag value can't be set to None. This can be raised when a duplicate flag is created. Even if allow_override is True, we still abort if the new value is None, because it's currently impossible to pass None default value back to SWIG. See FlagValues.SetDefault for details. """ pass class DuplicateFlagError(DuplicateFlag): """A DuplicateFlag whose message cites the conflicting definitions. A DuplicateFlagError conveys more information than a DuplicateFlag, namely the modules where the conflicting definitions occur. This class was created to avoid breaking external modules which depend on the existing DuplicateFlags interface. """ def __init__(self, flagname, flag_values, other_flag_values=None): """Create a DuplicateFlagError. Args: flagname: Name of the flag being redefined. flag_values: FlagValues object containing the first definition of flagname. other_flag_values: If this argument is not None, it should be the FlagValues object where the second definition of flagname occurs. If it is None, we assume that we're being called when attempting to create the flag a second time, and we use the module calling this one as the source of the second definition. """ self.flagname = flagname first_module = flag_values.FindModuleDefiningFlag( flagname, default='<unknown>') if other_flag_values is None: second_module = _GetCallingModule() else: second_module = other_flag_values.FindModuleDefiningFlag( flagname, default='<unknown>') msg = "The flag '%s' is defined twice. First from %s, Second from %s" % ( self.flagname, first_module, second_module) DuplicateFlag.__init__(self, msg) class IllegalFlagValue(FlagsError): """The flag command line argument is illegal.""" pass class UnrecognizedFlag(FlagsError): """Raised if a flag is unrecognized.""" pass # An UnrecognizedFlagError conveys more information than an UnrecognizedFlag. # Since there are external modules that create DuplicateFlags, the interface to # DuplicateFlag shouldn't change. The flagvalue will be assigned the full value # of the flag and its argument, if any, allowing handling of unrecognized flags # in an exception handler. # If flagvalue is the empty string, then this exception is an due to a # reference to a flag that was not already defined. class UnrecognizedFlagError(UnrecognizedFlag): def __init__(self, flagname, flagvalue=''): self.flagname = flagname self.flagvalue = flagvalue UnrecognizedFlag.__init__( self, "Unknown command line flag '%s'" % flagname) # Global variable used by expvar _exported_flags = {} _help_width = 80 # width of help output def GetHelpWidth(): """Returns: an integer, the width of help lines that is used in TextWrap.""" if (not sys.stdout.isatty()) or (termios is None) or (fcntl is None): return _help_width try: data = fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, '1234') columns = struct.unpack('hh', data)[1] # Emacs mode returns 0. # Here we assume that any value below 40 is unreasonable if columns >= 40: return columns # Returning an int as default is fine, int(int) just return the int. return int(os.getenv('COLUMNS', _help_width)) except (TypeError, IOError, struct.error): return _help_width def CutCommonSpacePrefix(text): """Removes a common space prefix from the lines of a multiline text. If the first line does not start with a space, it is left as it is and only in the remaining lines a common space prefix is being searched for. That means the first line will stay untouched. This is especially useful to turn doc strings into help texts. This is because some people prefer to have the doc comment start already after the apostrophe and then align the following lines while others have the apostrophes on a separate line. The function also drops trailing empty lines and ignores empty lines following the initial content line while calculating the initial common whitespace. Args: text: text to work on Returns: the resulting text """ text_lines = text.splitlines() # Drop trailing empty lines while text_lines and not text_lines[-1]: text_lines = text_lines[:-1] if text_lines: # We got some content, is the first line starting with a space? if text_lines[0] and text_lines[0][0].isspace(): text_first_line = [] else: text_first_line = [text_lines.pop(0)] # Calculate length of common leading whitespace (only over content lines) common_prefix = os.path.commonprefix([line for line in text_lines if line]) space_prefix_len = len(common_prefix) - len(common_prefix.lstrip()) # If we have a common space prefix, drop it from all lines if space_prefix_len: for index in xrange(len(text_lines)): if text_lines[index]: text_lines[index] = text_lines[index][space_prefix_len:] return '\n'.join(text_first_line + text_lines) return '' def TextWrap(text, length=None, indent='', firstline_indent=None, tabs=' '): """Wraps a given text to a maximum line length and returns it. We turn lines that only contain whitespace into empty lines. We keep new lines and tabs (e.g., we do not treat tabs as spaces). Args: text: text to wrap length: maximum length of a line, includes indentation if this is None then use GetHelpWidth() indent: indent for all but first line firstline_indent: indent for first line; if None, fall back to indent tabs: replacement for tabs Returns: wrapped text Raises: FlagsError: if indent not shorter than length FlagsError: if firstline_indent not shorter than length """ # Get defaults where callee used None if length is None: length = GetHelpWidth() if indent is None: indent = '' if len(indent) >= length: raise FlagsError('Indent must be shorter than length') # In line we will be holding the current line which is to be started # with indent (or firstline_indent if available) and then appended # with words. if firstline_indent is None: firstline_indent = '' line = indent else: line = firstline_indent if len(firstline_indent) >= length: raise FlagsError('First line indent must be shorter than length') # If the callee does not care about tabs we simply convert them to # spaces If callee wanted tabs to be single space then we do that # already here. if not tabs or tabs == ' ': text = text.replace('\t', ' ') else: tabs_are_whitespace = not tabs.strip() line_regex = re.compile('([ ]*)(\t*)([^ \t]+)', re.MULTILINE) # Split the text into lines and the lines with the regex above. The # resulting lines are collected in result[]. For each split we get the # spaces, the tabs and the next non white space (e.g. next word). result = [] for text_line in text.splitlines(): # Store result length so we can find out whether processing the next # line gave any new content old_result_len = len(result) # Process next line with line_regex. For optimization we do an rstrip(). # - process tabs (changes either line or word, see below) # - process word (first try to squeeze on line, then wrap or force wrap) # Spaces found on the line are ignored, they get added while wrapping as # needed. for spaces, current_tabs, word in line_regex.findall(text_line.rstrip()): # If tabs weren't converted to spaces, handle them now if current_tabs: # If the last thing we added was a space anyway then drop # it. But let's not get rid of the indentation. if (((result and line != indent) or (not result and line != firstline_indent)) and line[-1] == ' '): line = line[:-1] # Add the tabs, if that means adding whitespace, just add it at # the line, the rstrip() code while shorten the line down if # necessary if tabs_are_whitespace: line += tabs * len(current_tabs) else: # if not all tab replacement is whitespace we prepend it to the word word = tabs * len(current_tabs) + word # Handle the case where word cannot be squeezed onto current last line if len(line) + len(word) > length and len(indent) + len(word) <= length: result.append(line.rstrip()) line = indent + word word = '' # No space left on line or can we append a space? if len(line) + 1 >= length: result.append(line.rstrip()) line = indent else: line += ' ' # Add word and shorten it up to allowed line length. Restart next # line with indent and repeat, or add a space if we're done (word # finished) This deals with words that cannot fit on one line # (e.g. indent + word longer than allowed line length). while len(line) + len(word) >= length: line += word result.append(line[:length]) word = line[length:] line = indent # Default case, simply append the word and a space if word: line += word + ' ' # End of input line. If we have content we finish the line. If the # current line is just the indent but we had content in during this # original line then we need to add an empty line. if (result and line != indent) or (not result and line != firstline_indent): result.append(line.rstrip()) elif len(result) == old_result_len: result.append('') line = indent return '\n'.join(result) def DocToHelp(doc): """Takes a __doc__ string and reformats it as help.""" # Get rid of starting and ending white space. Using lstrip() or even # strip() could drop more than maximum of first line and right space # of last line. doc = doc.strip() # Get rid of all empty lines whitespace_only_line = re.compile('^[ \t]+$', re.M) doc = whitespace_only_line.sub('', doc) # Cut out common space at line beginnings doc = CutCommonSpacePrefix(doc) # Just like this module's comment, comments tend to be aligned somehow. # In other words they all start with the same amount of white space # 1) keep double new lines # 2) keep ws after new lines if not empty line # 3) all other new lines shall be changed to a space # Solution: Match new lines between non white space and replace with space. doc = re.sub('(?<=\S)\n(?=\S)', ' ', doc, re.M) return doc def _GetModuleObjectAndName(globals_dict): """Returns the module that defines a global environment, and its name. Args: globals_dict: A dictionary that should correspond to an environment providing the values of the globals. Returns: A pair consisting of (1) module object and (2) module name (a string). Returns (None, None) if the module could not be identified. """ # The use of .items() (instead of .iteritems()) is NOT a mistake: if # a parallel thread imports a module while we iterate over # .iteritems() (not nice, but possible), we get a RuntimeError ... # Hence, we use the slightly slower but safer .items(). for name, module in sys.modules.items(): if getattr(module, '__dict__', None) is globals_dict: if name == '__main__': # Pick a more informative name for the main module. name = sys.argv[0] return (module, name) return (None, None) def _GetMainModule(): """Returns: string, name of the module from which execution started.""" # First, try to use the same logic used by _GetCallingModuleObjectAndName(), # i.e., call _GetModuleObjectAndName(). For that we first need to # find the dictionary that the main module uses to store the # globals. # # That's (normally) the same dictionary object that the deepest # (oldest) stack frame is using for globals. deepest_frame = sys._getframe(0) while deepest_frame.f_back is not None: deepest_frame = deepest_frame.f_back globals_for_main_module = deepest_frame.f_globals main_module_name = _GetModuleObjectAndName(globals_for_main_module)[1] # The above strategy fails in some cases (e.g., tools that compute # code coverage by redefining, among other things, the main module). # If so, just use sys.argv[0]. We can probably always do this, but # it's safest to try to use the same logic as _GetCallingModuleObjectAndName() if main_module_name is None: main_module_name = sys.argv[0] return main_module_name class FlagValues: """Registry of 'Flag' objects. A 'FlagValues' can then scan command line arguments, passing flag arguments through to the 'Flag' objects that it owns. It also provides easy access to the flag values. Typically only one 'FlagValues' object is needed by an application: gflags.FLAGS This class is heavily overloaded: 'Flag' objects are registered via __setitem__: FLAGS['longname'] = x # register a new flag The .value attribute of the registered 'Flag' objects can be accessed as attributes of this 'FlagValues' object, through __getattr__. Both the long and short name of the original 'Flag' objects can be used to access its value: FLAGS.longname # parsed flag value FLAGS.x # parsed flag value (short name) Command line arguments are scanned and passed to the registered 'Flag' objects through the __call__ method. Unparsed arguments, including argv[0] (e.g. the program name) are returned. argv = FLAGS(sys.argv) # scan command line arguments The original registered Flag objects can be retrieved through the use of the dictionary-like operator, __getitem__: x = FLAGS['longname'] # access the registered Flag object The str() operator of a 'FlagValues' object provides help for all of the registered 'Flag' objects. """ def __init__(self): # Since everything in this class is so heavily overloaded, the only # way of defining and using fields is to access __dict__ directly. # Dictionary: flag name (string) -> Flag object. self.__dict__['__flags'] = {} # Dictionary: module name (string) -> list of Flag objects that are defined # by that module. self.__dict__['__flags_by_module'] = {} # Dictionary: module id (int) -> list of Flag objects that are defined by # that module. self.__dict__['__flags_by_module_id'] = {} # Dictionary: module name (string) -> list of Flag objects that are # key for that module. self.__dict__['__key_flags_by_module'] = {} # Set if we should use new style gnu_getopt rather than getopt when parsing # the args. Only possible with Python 2.3+ self.UseGnuGetOpt(False) def UseGnuGetOpt(self, use_gnu_getopt=True): """Use GNU-style scanning. Allows mixing of flag and non-flag arguments. See http://docs.python.org/library/getopt.html#getopt.gnu_getopt Args: use_gnu_getopt: wether or not to use GNU style scanning. """ self.__dict__['__use_gnu_getopt'] = use_gnu_getopt def IsGnuGetOpt(self): return self.__dict__['__use_gnu_getopt'] def FlagDict(self): return self.__dict__['__flags'] def FlagsByModuleDict(self): """Returns the dictionary of module_name -> list of defined flags. Returns: A dictionary. Its keys are module names (strings). Its values are lists of Flag objects. """ return self.__dict__['__flags_by_module'] def FlagsByModuleIdDict(self): """Returns the dictionary of module_id -> list of defined flags. Returns: A dictionary. Its keys are module IDs (ints). Its values are lists of Flag objects. """ return self.__dict__['__flags_by_module_id'] def KeyFlagsByModuleDict(self): """Returns the dictionary of module_name -> list of key flags. Returns: A dictionary. Its keys are module names (strings). Its values are lists of Flag objects. """ return self.__dict__['__key_flags_by_module'] def _RegisterFlagByModule(self, module_name, flag): """Records the module that defines a specific flag. We keep track of which flag is defined by which module so that we can later sort the flags by module. Args: module_name: A string, the name of a Python module. flag: A Flag object, a flag that is key to the module. """ flags_by_module = self.FlagsByModuleDict() flags_by_module.setdefault(module_name, []).append(flag) def _RegisterFlagByModuleId(self, module_id, flag): """Records the module that defines a specific flag. Args: module_id: An int, the ID of the Python module. flag: A Flag object, a flag that is key to the module. """ flags_by_module_id = self.FlagsByModuleIdDict() flags_by_module_id.setdefault(module_id, []).append(flag) def _RegisterKeyFlagForModule(self, module_name, flag): """Specifies that a flag is a key flag for a module. Args: module_name: A string, the name of a Python module. flag: A Flag object, a flag that is key to the module. """ key_flags_by_module = self.KeyFlagsByModuleDict() # The list of key flags for the module named module_name. key_flags = key_flags_by_module.setdefault(module_name, []) # Add flag, but avoid duplicates. if flag not in key_flags: key_flags.append(flag) def _GetFlagsDefinedByModule(self, module): """Returns the list of flags defined by a module. Args: module: A module object or a module name (a string). Returns: A new list of Flag objects. Caller may update this list as he wishes: none of those changes will affect the internals of this FlagValue object. """ if not isinstance(module, str): module = module.__name__ return list(self.FlagsByModuleDict().get(module, [])) def _GetKeyFlagsForModule(self, module): """Returns the list of key flags for a module. Args: module: A module object or a module name (a string) Returns: A new list of Flag objects. Caller may update this list as he wishes: none of those changes will affect the internals of this FlagValue object. """ if not isinstance(module, str): module = module.__name__ # Any flag is a key flag for the module that defined it. NOTE: # key_flags is a fresh list: we can update it without affecting the # internals of this FlagValues object. key_flags = self._GetFlagsDefinedByModule(module) # Take into account flags explicitly declared as key for a module. for flag in self.KeyFlagsByModuleDict().get(module, []): if flag not in key_flags: key_flags.append(flag) return key_flags def FindModuleDefiningFlag(self, flagname, default=None): """Return the name of the module defining this flag, or default. Args: flagname: Name of the flag to lookup. default: Value to return if flagname is not defined. Defaults to None. Returns: The name of the module which registered the flag with this name. If no such module exists (i.e. no flag with this name exists), we return default. """ for module, flags in self.FlagsByModuleDict().iteritems(): for flag in flags: if flag.name == flagname or flag.short_name == flagname: return module return default def FindModuleIdDefiningFlag(self, flagname, default=None): """Return the ID of the module defining this flag, or default. Args: flagname: Name of the flag to lookup. default: Value to return if flagname is not defined. Defaults to None. Returns: The ID of the module which registered the flag with this name. If no such module exists (i.e. no flag with this name exists), we return default. """ for module_id, flags in self.FlagsByModuleIdDict().iteritems(): for flag in flags: if flag.name == flagname or flag.short_name == flagname: return module_id return default def AppendFlagValues(self, flag_values): """Appends flags registered in another FlagValues instance. Args: flag_values: registry to copy from """ for flag_name, flag in flag_values.FlagDict().iteritems(): # Each flags with shortname appears here twice (once under its # normal name, and again with its short name). To prevent # problems (DuplicateFlagError) with double flag registration, we # perform a check to make sure that the entry we're looking at is # for its normal name. if flag_name == flag.name: try: self[flag_name] = flag except DuplicateFlagError: raise DuplicateFlagError(flag_name, self, other_flag_values=flag_values) def RemoveFlagValues(self, flag_values): """Remove flags that were previously appended from another FlagValues. Args: flag_values: registry containing flags to remove. """ for flag_name in flag_values.FlagDict(): self.__delattr__(flag_name) def __setitem__(self, name, flag): """Registers a new flag variable.""" fl = self.FlagDict() if not isinstance(flag, Flag): raise IllegalFlagValue(flag) if not isinstance(name, type("")): raise FlagsError("Flag name must be a string") if len(name) == 0: raise FlagsError("Flag name cannot be empty") # If running under pychecker, duplicate keys are likely to be # defined. Disable check for duplicate keys when pycheck'ing. if (name in fl and not flag.allow_override and not fl[name].allow_override and not _RUNNING_PYCHECKER): module, module_name = _GetCallingModuleObjectAndName() if (self.FindModuleDefiningFlag(name) == module_name and id(module) != self.FindModuleIdDefiningFlag(name)): # If the flag has already been defined by a module with the same name, # but a different ID, we can stop here because it indicates that the # module is simply being imported a subsequent time. return raise DuplicateFlagError(name, self) short_name = flag.short_name if short_name is not None: if (short_name in fl and not flag.allow_override and not fl[short_name].allow_override and not _RUNNING_PYCHECKER): raise DuplicateFlagError(short_name, self) fl[short_name] = flag fl[name] = flag global _exported_flags _exported_flags[name] = flag def __getitem__(self, name): """Retrieves the Flag object for the flag --name.""" return self.FlagDict()[name] def __getattr__(self, name): """Retrieves the 'value' attribute of the flag --name.""" fl = self.FlagDict() if name not in fl: raise AttributeError(name) return fl[name].value def __setattr__(self, name, value): """Sets the 'value' attribute of the flag --name.""" fl = self.FlagDict() fl[name].value = value self._AssertValidators(fl[name].validators) return value def _AssertAllValidators(self): all_validators = set() for flag in self.FlagDict().itervalues(): for validator in flag.validators: all_validators.add(validator) self._AssertValidators(all_validators) def _AssertValidators(self, validators): """Assert if all validators in the list are satisfied. Asserts validators in the order they were created. Args: validators: Iterable(gflags_validators.Validator), validators to be verified Raises: AttributeError: if validators work with a non-existing flag. IllegalFlagValue: if validation fails for at least one validator """ for validator in sorted( validators, key=lambda validator: validator.insertion_index): try: validator.Verify(self) except gflags_validators.Error, e: message = validator.PrintFlagsWithValues(self) raise IllegalFlagValue('%s: %s' % (message, str(e))) def _FlagIsRegistered(self, flag_obj): """Checks whether a Flag object is registered under some name. Note: this is non trivial: in addition to its normal name, a flag may have a short name too. In self.FlagDict(), both the normal and the short name are mapped to the same flag object. E.g., calling only "del FLAGS.short_name" is not unregistering the corresponding Flag object (it is still registered under the longer name). Args: flag_obj: A Flag object. Returns: A boolean: True iff flag_obj is registered under some name. """ flag_dict = self.FlagDict() # Check whether flag_obj is registered under its long name. name = flag_obj.name if flag_dict.get(name, None) == flag_obj: return True # Check whether flag_obj is registered under its short name. short_name = flag_obj.short_name if (short_name is not None and flag_dict.get(short_name, None) == flag_obj): return True # The flag cannot be registered under any other name, so we do not # need to do a full search through the values of self.FlagDict(). return False def __delattr__(self, flag_name): """Deletes a previously-defined flag from a flag object. This method makes sure we can delete a flag by using del flag_values_object.<flag_name> E.g., gflags.DEFINE_integer('foo', 1, 'Integer flag.') del gflags.FLAGS.foo Args: flag_name: A string, the name of the flag to be deleted. Raises: AttributeError: When there is no registered flag named flag_name. """ fl = self.FlagDict() if flag_name not in fl: raise AttributeError(flag_name) flag_obj = fl[flag_name] del fl[flag_name] if not self._FlagIsRegistered(flag_obj): # If the Flag object indicated by flag_name is no longer # registered (please see the docstring of _FlagIsRegistered), then # we delete the occurrences of the flag object in all our internal # dictionaries. self.__RemoveFlagFromDictByModule(self.FlagsByModuleDict(), flag_obj) self.__RemoveFlagFromDictByModule(self.FlagsByModuleIdDict(), flag_obj) self.__RemoveFlagFromDictByModule(self.KeyFlagsByModuleDict(), flag_obj) def __RemoveFlagFromDictByModule(self, flags_by_module_dict, flag_obj): """Removes a flag object from a module -> list of flags dictionary. Args: flags_by_module_dict: A dictionary that maps module names to lists of flags. flag_obj: A flag object. """ for unused_module, flags_in_module in flags_by_module_dict.iteritems(): # while (as opposed to if) takes care of multiple occurrences of a # flag in the list for the same module. while flag_obj in flags_in_module: flags_in_module.remove(flag_obj) def SetDefault(self, name, value): """Changes the default value of the named flag object.""" fl = self.FlagDict() if name not in fl: raise AttributeError(name) fl[name].SetDefault(value) self._AssertValidators(fl[name].validators) def __contains__(self, name): """Returns True if name is a value (flag) in the dict.""" return name in self.FlagDict() has_key = __contains__ # a synonym for __contains__() def __iter__(self): return iter(self.FlagDict()) def __call__(self, argv): """Parses flags from argv; stores parsed flags into this FlagValues object. All unparsed arguments are returned. Flags are parsed using the GNU Program Argument Syntax Conventions, using getopt: http://www.gnu.org/software/libc/manual/html_mono/libc.html#Getopt Args: argv: argument list. Can be of any type that may be converted to a list. Returns: The list of arguments not parsed as options, including argv[0] Raises: FlagsError: on any parsing error """ # Support any sequence type that can be converted to a list argv = list(argv) shortopts = "" longopts = [] fl = self.FlagDict() # This pre parses the argv list for --flagfile=<> options. argv = argv[:1] + self.ReadFlagsFromFiles(argv[1:], force_gnu=False) # Correct the argv to support the google style of passing boolean # parameters. Boolean parameters may be passed by using --mybool, # --nomybool, --mybool=(true|false|1|0). getopt does not support # having options that may or may not have a parameter. We replace # instances of the short form --mybool and --nomybool with their # full forms: --mybool=(true|false). original_argv = list(argv) # list() makes a copy shortest_matches = None for name, flag in fl.items(): if not flag.boolean: continue if shortest_matches is None: # Determine the smallest allowable prefix for all flag names shortest_matches = self.ShortestUniquePrefixes(fl) no_name = 'no' + name prefix = shortest_matches[name] no_prefix = shortest_matches[no_name] # Replace all occurrences of this boolean with extended forms for arg_idx in range(1, len(argv)): arg = argv[arg_idx] if arg.find('=') >= 0: continue if arg.startswith('--'+prefix) and ('--'+name).startswith(arg): argv[arg_idx] = ('--%s=true' % name) elif arg.startswith('--'+no_prefix) and ('--'+no_name).startswith(arg): argv[arg_idx] = ('--%s=false' % name) # Loop over all of the flags, building up the lists of short options # and long options that will be passed to getopt. Short options are # specified as a string of letters, each letter followed by a colon # if it takes an argument. Long options are stored in an array of # strings. Each string ends with an '=' if it takes an argument. for name, flag in fl.items(): longopts.append(name + "=") if len(name) == 1: # one-letter option: allow short flag type also shortopts += name if not flag.boolean: shortopts += ":" longopts.append('undefok=') undefok_flags = [] # In case --undefok is specified, loop to pick up unrecognized # options one by one. unrecognized_opts = [] args = argv[1:] while True: try: if self.__dict__['__use_gnu_getopt']: optlist, unparsed_args = getopt.gnu_getopt(args, shortopts, longopts) else: optlist, unparsed_args = getopt.getopt(args, shortopts, longopts) break except getopt.GetoptError, e: if not e.opt or e.opt in fl: # Not an unrecognized option, re-raise the exception as a FlagsError raise FlagsError(e) # Remove offender from args and try again for arg_index in range(len(args)): if ((args[arg_index] == '--' + e.opt) or (args[arg_index] == '-' + e.opt) or (args[arg_index].startswith('--' + e.opt + '='))): unrecognized_opts.append((e.opt, args[arg_index])) args = args[0:arg_index] + args[arg_index+1:] break else: # We should have found the option, so we don't expect to get # here. We could assert, but raising the original exception # might work better. raise FlagsError(e) for name, arg in optlist: if name == '--undefok': flag_names = arg.split(',') undefok_flags.extend(flag_names) # For boolean flags, if --undefok=boolflag is specified, then we should # also accept --noboolflag, in addition to --boolflag. # Since we don't know the type of the undefok'd flag, this will affect # non-boolean flags as well. # NOTE: You shouldn't use --undefok=noboolflag, because then we will # accept --nonoboolflag here. We are choosing not to do the conversion # from noboolflag -> boolflag because of the ambiguity that flag names # can start with 'no'. undefok_flags.extend('no' + name for name in flag_names) continue if name.startswith('--'): # long option name = name[2:] short_option = 0 else: # short option name = name[1:] short_option = 1 if name in fl: flag = fl[name] if flag.boolean and short_option: arg = 1 flag.Parse(arg) # If there were unrecognized options, raise an exception unless # the options were named via --undefok. for opt, value in unrecognized_opts: if opt not in undefok_flags: raise UnrecognizedFlagError(opt, value) if unparsed_args: if self.__dict__['__use_gnu_getopt']: # if using gnu_getopt just return the program name + remainder of argv. ret_val = argv[:1] + unparsed_args else: # unparsed_args becomes the first non-flag detected by getopt to # the end of argv. Because argv may have been modified above, # return original_argv for this region. ret_val = argv[:1] + original_argv[-len(unparsed_args):] else: ret_val = argv[:1] self._AssertAllValidators() return ret_val def Reset(self): """Resets the values to the point before FLAGS(argv) was called.""" for f in self.FlagDict().values(): f.Unparse() def RegisteredFlags(self): """Returns: a list of the names and short names of all registered flags.""" return list(self.FlagDict()) def FlagValuesDict(self): """Returns: a dictionary that maps flag names to flag values.""" flag_values = {} for flag_name in self.RegisteredFlags(): flag = self.FlagDict()[flag_name] flag_values[flag_name] = flag.value return flag_values def __str__(self): """Generates a help string for all known flags.""" return self.GetHelp() def GetHelp(self, prefix=''): """Generates a help string for all known flags.""" helplist = [] flags_by_module = self.FlagsByModuleDict() if flags_by_module: modules = sorted(flags_by_module) # Print the help for the main module first, if possible. main_module = _GetMainModule() if main_module in modules: modules.remove(main_module) modules = [main_module] + modules for module in modules: self.__RenderOurModuleFlags(module, helplist) self.__RenderModuleFlags('gflags', _SPECIAL_FLAGS.FlagDict().values(), helplist) else: # Just print one long list of flags. self.__RenderFlagList( self.FlagDict().values() + _SPECIAL_FLAGS.FlagDict().values(), helplist, prefix) return '\n'.join(helplist) def __RenderModuleFlags(self, module, flags, output_lines, prefix=""): """Generates a help string for a given module.""" if not isinstance(module, str): module = module.__name__ output_lines.append('\n%s%s:' % (prefix, module)) self.__RenderFlagList(flags, output_lines, prefix + " ") def __RenderOurModuleFlags(self, module, output_lines, prefix=""): """Generates a help string for a given module.""" flags = self._GetFlagsDefinedByModule(module) if flags: self.__RenderModuleFlags(module, flags, output_lines, prefix) def __RenderOurModuleKeyFlags(self, module, output_lines, prefix=""): """Generates a help string for the key flags of a given module. Args: module: A module object or a module name (a string). output_lines: A list of strings. The generated help message lines will be appended to this list. prefix: A string that is prepended to each generated help line. """ key_flags = self._GetKeyFlagsForModule(module) if key_flags: self.__RenderModuleFlags(module, key_flags, output_lines, prefix) def ModuleHelp(self, module): """Describe the key flags of a module. Args: module: A module object or a module name (a string). Returns: string describing the key flags of a module. """ helplist = [] self.__RenderOurModuleKeyFlags(module, helplist) return '\n'.join(helplist) def MainModuleHelp(self): """Describe the key flags of the main module. Returns: string describing the key flags of a module. """ return self.ModuleHelp(_GetMainModule()) def __RenderFlagList(self, flaglist, output_lines, prefix=" "): fl = self.FlagDict() special_fl = _SPECIAL_FLAGS.FlagDict() flaglist = [(flag.name, flag) for flag in flaglist] flaglist.sort() flagset = {} for (name, flag) in flaglist: # It's possible this flag got deleted or overridden since being # registered in the per-module flaglist. Check now against the # canonical source of current flag information, the FlagDict. if fl.get(name, None) != flag and special_fl.get(name, None) != flag: # a different flag is using this name now continue # only print help once if flag in flagset: continue flagset[flag] = 1 flaghelp = "" if flag.short_name: flaghelp += "-%s," % flag.short_name if flag.boolean: flaghelp += "--[no]%s" % flag.name + ":" else: flaghelp += "--%s" % flag.name + ":" flaghelp += " " if flag.help: flaghelp += flag.help flaghelp = TextWrap(flaghelp, indent=prefix+" ", firstline_indent=prefix) if flag.default_as_str: flaghelp += "\n" flaghelp += TextWrap("(default: %s)" % flag.default_as_str, indent=prefix+" ") if flag.parser.syntactic_help: flaghelp += "\n" flaghelp += TextWrap("(%s)" % flag.parser.syntactic_help, indent=prefix+" ") output_lines.append(flaghelp) def get(self, name, default): """Returns the value of a flag (if not None) or a default value. Args: name: A string, the name of a flag. default: Default value to use if the flag value is None. """ value = self.__getattr__(name) if value is not None: # Can't do if not value, b/c value might be '0' or "" return value else: return default def ShortestUniquePrefixes(self, fl): """Returns: dictionary; maps flag names to their shortest unique prefix.""" # Sort the list of flag names sorted_flags = [] for name, flag in fl.items(): sorted_flags.append(name) if flag.boolean: sorted_flags.append('no%s' % name) sorted_flags.sort() # For each name in the sorted list, determine the shortest unique # prefix by comparing itself to the next name and to the previous # name (the latter check uses cached info from the previous loop). shortest_matches = {} prev_idx = 0 for flag_idx in range(len(sorted_flags)): curr = sorted_flags[flag_idx] if flag_idx == (len(sorted_flags) - 1): next = None else: next = sorted_flags[flag_idx+1] next_len = len(next) for curr_idx in range(len(curr)): if (next is None or curr_idx >= next_len or curr[curr_idx] != next[curr_idx]): # curr longer than next or no more chars in common shortest_matches[curr] = curr[:max(prev_idx, curr_idx) + 1] prev_idx = curr_idx break else: # curr shorter than (or equal to) next shortest_matches[curr] = curr prev_idx = curr_idx + 1 # next will need at least one more char return shortest_matches def __IsFlagFileDirective(self, flag_string): """Checks whether flag_string contain a --flagfile=<foo> directive.""" if isinstance(flag_string, type("")): if flag_string.startswith('--flagfile='): return 1 elif flag_string == '--flagfile': return 1 elif flag_string.startswith('-flagfile='): return 1 elif flag_string == '-flagfile': return 1 else: return 0 return 0 def ExtractFilename(self, flagfile_str): """Returns filename from a flagfile_str of form -[-]flagfile=filename. The cases of --flagfile foo and -flagfile foo shouldn't be hitting this function, as they are dealt with in the level above this function. """ if flagfile_str.startswith('--flagfile='): return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip()) elif flagfile_str.startswith('-flagfile='): return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip()) else: raise FlagsError('Hit illegal --flagfile type: %s' % flagfile_str) def __GetFlagFileLines(self, filename, parsed_file_list): """Returns the useful (!=comments, etc) lines from a file with flags. Args: filename: A string, the name of the flag file. parsed_file_list: A list of the names of the files we have already read. MUTATED BY THIS FUNCTION. Returns: List of strings. See the note below. NOTE(springer): This function checks for a nested --flagfile=<foo> tag and handles the lower file recursively. It returns a list of all the lines that _could_ contain command flags. This is EVERYTHING except whitespace lines and comments (lines starting with '#' or '//'). """ line_list = [] # All line from flagfile. flag_line_list = [] # Subset of lines w/o comments, blanks, flagfile= tags. try: file_obj = open(filename, 'r') except IOError, e_msg: raise CantOpenFlagFileError('ERROR:: Unable to open flagfile: %s' % e_msg) line_list = file_obj.readlines() file_obj.close() parsed_file_list.append(filename) # This is where we check each line in the file we just read. for line in line_list: if line.isspace(): pass # Checks for comment (a line that starts with '#'). elif line.startswith('#') or line.startswith('//'): pass # Checks for a nested "--flagfile=<bar>" flag in the current file. # If we find one, recursively parse down into that file. elif self.__IsFlagFileDirective(line): sub_filename = self.ExtractFilename(line) # We do a little safety check for reparsing a file we've already done. if not sub_filename in parsed_file_list: included_flags = self.__GetFlagFileLines(sub_filename, parsed_file_list) flag_line_list.extend(included_flags) else: # Case of hitting a circularly included file. sys.stderr.write('Warning: Hit circular flagfile dependency: %s\n' % (sub_filename,)) else: # Any line that's not a comment or a nested flagfile should get # copied into 2nd position. This leaves earlier arguments # further back in the list, thus giving them higher priority. flag_line_list.append(line.strip()) return flag_line_list def ReadFlagsFromFiles(self, argv, force_gnu=True): """Processes command line args, but also allow args to be read from file. Args: argv: A list of strings, usually sys.argv[1:], which may contain one or more flagfile directives of the form --flagfile="./filename". Note that the name of the program (sys.argv[0]) should be omitted. force_gnu: If False, --flagfile parsing obeys normal flag semantics. If True, --flagfile parsing instead follows gnu_getopt semantics. *** WARNING *** force_gnu=False may become the future default! Returns: A new list which has the original list combined with what we read from any flagfile(s). References: Global gflags.FLAG class instance. This function should be called before the normal FLAGS(argv) call. This function scans the input list for a flag that looks like: --flagfile=<somefile>. Then it opens <somefile>, reads all valid key and value pairs and inserts them into the input list between the first item of the list and any subsequent items in the list. Note that your application's flags are still defined the usual way using gflags DEFINE_flag() type functions. Notes (assuming we're getting a commandline of some sort as our input): --> Flags from the command line argv _should_ always take precedence! --> A further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile. It will be processed after the parent flag file is done. --> For duplicate flags, first one we hit should "win". --> In a flagfile, a line beginning with # or // is a comment. --> Entirely blank lines _should_ be ignored. """ parsed_file_list = [] rest_of_args = argv new_argv = [] while rest_of_args: current_arg = rest_of_args[0] rest_of_args = rest_of_args[1:] if self.__IsFlagFileDirective(current_arg): # This handles the case of -(-)flagfile foo. In this case the # next arg really is part of this one. if current_arg == '--flagfile' or current_arg == '-flagfile': if not rest_of_args: raise IllegalFlagValue('--flagfile with no argument') flag_filename = os.path.expanduser(rest_of_args[0]) rest_of_args = rest_of_args[1:] else: # This handles the case of (-)-flagfile=foo. flag_filename = self.ExtractFilename(current_arg) new_argv.extend( self.__GetFlagFileLines(flag_filename, parsed_file_list)) else: new_argv.append(current_arg) # Stop parsing after '--', like getopt and gnu_getopt. if current_arg == '--': break # Stop parsing after a non-flag, like getopt. if not current_arg.startswith('-'): if not force_gnu and not self.__dict__['__use_gnu_getopt']: break if rest_of_args: new_argv.extend(rest_of_args) return new_argv def FlagsIntoString(self): """Returns a string with the flags assignments from this FlagValues object. This function ignores flags whose value is None. Each flag assignment is separated by a newline. NOTE: MUST mirror the behavior of the C++ CommandlineFlagsIntoString from http://code.google.com/p/google-gflags """ s = '' for flag in self.FlagDict().values(): if flag.value is not None: s += flag.Serialize() + '\n' return s def AppendFlagsIntoFile(self, filename): """Appends all flags assignments from this FlagInfo object to a file. Output will be in the format of a flagfile. NOTE: MUST mirror the behavior of the C++ AppendFlagsIntoFile from http://code.google.com/p/google-gflags """ out_file = open(filename, 'a') out_file.write(self.FlagsIntoString()) out_file.close() def WriteHelpInXMLFormat(self, outfile=None): """Outputs flag documentation in XML format. NOTE: We use element names that are consistent with those used by the C++ command-line flag library, from http://code.google.com/p/google-gflags We also use a few new elements (e.g., <key>), but we do not interfere / overlap with existing XML elements used by the C++ library. Please maintain this consistency. Args: outfile: File object we write to. Default None means sys.stdout. """ outfile = outfile or sys.stdout outfile.write('<?xml version=\"1.0\"?>\n') outfile.write('<AllFlags>\n') indent = ' ' _WriteSimpleXMLElement(outfile, 'program', os.path.basename(sys.argv[0]), indent) usage_doc = sys.modules['__main__'].__doc__ if not usage_doc: usage_doc = '\nUSAGE: %s [flags]\n' % sys.argv[0] else: usage_doc = usage_doc.replace('%s', sys.argv[0]) _WriteSimpleXMLElement(outfile, 'usage', usage_doc, indent) # Get list of key flags for the main module. key_flags = self._GetKeyFlagsForModule(_GetMainModule()) # Sort flags by declaring module name and next by flag name. flags_by_module = self.FlagsByModuleDict() all_module_names = list(flags_by_module.keys()) all_module_names.sort() for module_name in all_module_names: flag_list = [(f.name, f) for f in flags_by_module[module_name]] flag_list.sort() for unused_flag_name, flag in flag_list: is_key = flag in key_flags flag.WriteInfoInXMLFormat(outfile, module_name, is_key=is_key, indent=indent) outfile.write('</AllFlags>\n') outfile.flush() def AddValidator(self, validator): """Register new flags validator to be checked. Args: validator: gflags_validators.Validator Raises: AttributeError: if validators work with a non-existing flag. """ for flag_name in validator.GetFlagsNames(): flag = self.FlagDict()[flag_name] flag.validators.append(validator) # end of FlagValues definition # The global FlagValues instance FLAGS = FlagValues() def _StrOrUnicode(value): """Converts value to a python string or, if necessary, unicode-string.""" try: return str(value) except UnicodeEncodeError: return unicode(value) def _MakeXMLSafe(s): """Escapes <, >, and & from s, and removes XML 1.0-illegal chars.""" s = cgi.escape(s) # Escape <, >, and & # Remove characters that cannot appear in an XML 1.0 document # (http://www.w3.org/TR/REC-xml/#charsets). # # NOTE: if there are problems with current solution, one may move to # XML 1.1, which allows such chars, if they're entity-escaped (&#xHH;). s = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f]', '', s) # Convert non-ascii characters to entities. Note: requires python >=2.3 s = s.encode('ascii', 'xmlcharrefreplace') # u'\xce\x88' -> 'u&#904;' return s def _WriteSimpleXMLElement(outfile, name, value, indent): """Writes a simple XML element. Args: outfile: File object we write the XML element to. name: A string, the name of XML element. value: A Python object, whose string representation will be used as the value of the XML element. indent: A string, prepended to each line of generated output. """ value_str = _StrOrUnicode(value) if isinstance(value, bool): # Display boolean values as the C++ flag library does: no caps. value_str = value_str.lower() safe_value_str = _MakeXMLSafe(value_str) outfile.write('%s<%s>%s</%s>\n' % (indent, name, safe_value_str, name)) class Flag: """Information about a command-line flag. 'Flag' objects define the following fields: .name - the name for this flag .default - the default value for this flag .default_as_str - default value as repr'd string, e.g., "'true'" (or None) .value - the most recent parsed value of this flag; set by Parse() .help - a help string or None if no help is available .short_name - the single letter alias for this flag (or None) .boolean - if 'true', this flag does not accept arguments .present - true if this flag was parsed from command line flags. .parser - an ArgumentParser object .serializer - an ArgumentSerializer object .allow_override - the flag may be redefined without raising an error The only public method of a 'Flag' object is Parse(), but it is typically only called by a 'FlagValues' object. The Parse() method is a thin wrapper around the 'ArgumentParser' Parse() method. The parsed value is saved in .value, and the .present attribute is updated. If this flag was already present, a FlagsError is raised. Parse() is also called during __init__ to parse the default value and initialize the .value attribute. This enables other python modules to safely use flags even if the __main__ module neglects to parse the command line arguments. The .present attribute is cleared after __init__ parsing. If the default value is set to None, then the __init__ parsing step is skipped and the .value attribute is initialized to None. Note: The default value is also presented to the user in the help string, so it is important that it be a legal value for this flag. """ def __init__(self, parser, serializer, name, default, help_string, short_name=None, boolean=0, allow_override=0): self.name = name if not help_string: help_string = '(no help available)' self.help = help_string self.short_name = short_name self.boolean = boolean self.present = 0 self.parser = parser self.serializer = serializer self.allow_override = allow_override self.value = None self.validators = [] self.SetDefault(default) def __hash__(self): return hash(id(self)) def __eq__(self, other): return self is other def __lt__(self, other): if isinstance(other, Flag): return id(self) < id(other) return NotImplemented def __GetParsedValueAsString(self, value): if value is None: return None if self.serializer: return repr(self.serializer.Serialize(value)) if self.boolean: if value: return repr('true') else: return repr('false') return repr(_StrOrUnicode(value)) def Parse(self, argument): try: self.value = self.parser.Parse(argument) except ValueError, e: # recast ValueError as IllegalFlagValue raise IllegalFlagValue("flag --%s=%s: %s" % (self.name, argument, e)) self.present += 1 def Unparse(self): if self.default is None: self.value = None else: self.Parse(self.default) self.present = 0 def Serialize(self): if self.value is None: return '' if self.boolean: if self.value: return "--%s" % self.name else: return "--no%s" % self.name else: if not self.serializer: raise FlagsError("Serializer not present for flag %s" % self.name) return "--%s=%s" % (self.name, self.serializer.Serialize(self.value)) def SetDefault(self, value): """Changes the default value (and current value too) for this Flag.""" # We can't allow a None override because it may end up not being # passed to C++ code when we're overriding C++ flags. So we # cowardly bail out until someone fixes the semantics of trying to # pass None to a C++ flag. See swig_flags.Init() for details on # this behavior. # TODO(olexiy): Users can directly call this method, bypassing all flags # validators (we don't have FlagValues here, so we can not check # validators). # The simplest solution I see is to make this method private. # Another approach would be to store reference to the corresponding # FlagValues with each flag, but this seems to be an overkill. if value is None and self.allow_override: raise DuplicateFlagCannotPropagateNoneToSwig(self.name) self.default = value self.Unparse() self.default_as_str = self.__GetParsedValueAsString(self.value) def Type(self): """Returns: a string that describes the type of this Flag.""" # NOTE: we use strings, and not the types.*Type constants because # our flags can have more exotic types, e.g., 'comma separated list # of strings', 'whitespace separated list of strings', etc. return self.parser.Type() def WriteInfoInXMLFormat(self, outfile, module_name, is_key=False, indent=''): """Writes common info about this flag, in XML format. This is information that is relevant to all flags (e.g., name, meaning, etc.). If you defined a flag that has some other pieces of info, then please override _WriteCustomInfoInXMLFormat. Please do NOT override this method. Args: outfile: File object we write to. module_name: A string, the name of the module that defines this flag. is_key: A boolean, True iff this flag is key for main module. indent: A string that is prepended to each generated line. """ outfile.write(indent + '<flag>\n') inner_indent = indent + ' ' if is_key: _WriteSimpleXMLElement(outfile, 'key', 'yes', inner_indent) _WriteSimpleXMLElement(outfile, 'file', module_name, inner_indent) # Print flag features that are relevant for all flags. _WriteSimpleXMLElement(outfile, 'name', self.name, inner_indent) if self.short_name: _WriteSimpleXMLElement(outfile, 'short_name', self.short_name, inner_indent) if self.help: _WriteSimpleXMLElement(outfile, 'meaning', self.help, inner_indent) # The default flag value can either be represented as a string like on the # command line, or as a Python object. We serialize this value in the # latter case in order to remain consistent. if self.serializer and not isinstance(self.default, str): default_serialized = self.serializer.Serialize(self.default) else: default_serialized = self.default _WriteSimpleXMLElement(outfile, 'default', default_serialized, inner_indent) _WriteSimpleXMLElement(outfile, 'current', self.value, inner_indent) _WriteSimpleXMLElement(outfile, 'type', self.Type(), inner_indent) # Print extra flag features this flag may have. self._WriteCustomInfoInXMLFormat(outfile, inner_indent) outfile.write(indent + '</flag>\n') def _WriteCustomInfoInXMLFormat(self, outfile, indent): """Writes extra info about this flag, in XML format. "Extra" means "not already printed by WriteInfoInXMLFormat above." Args: outfile: File object we write to. indent: A string that is prepended to each generated line. """ # Usually, the parser knows the extra details about the flag, so # we just forward the call to it. self.parser.WriteCustomInfoInXMLFormat(outfile, indent) # End of Flag definition class _ArgumentParserCache(type): """Metaclass used to cache and share argument parsers among flags.""" _instances = {} def __call__(mcs, *args, **kwargs): """Returns an instance of the argument parser cls. This method overrides behavior of the __new__ methods in all subclasses of ArgumentParser (inclusive). If an instance for mcs with the same set of arguments exists, this instance is returned, otherwise a new instance is created. If any keyword arguments are defined, or the values in args are not hashable, this method always returns a new instance of cls. Args: args: Positional initializer arguments. kwargs: Initializer keyword arguments. Returns: An instance of cls, shared or new. """ if kwargs: return type.__call__(mcs, *args, **kwargs) else: instances = mcs._instances key = (mcs,) + tuple(args) try: return instances[key] except KeyError: # No cache entry for key exists, create a new one. return instances.setdefault(key, type.__call__(mcs, *args)) except TypeError: # An object in args cannot be hashed, always return # a new instance. return type.__call__(mcs, *args) class ArgumentParser(object): """Base class used to parse and convert arguments. The Parse() method checks to make sure that the string argument is a legal value and convert it to a native type. If the value cannot be converted, it should throw a 'ValueError' exception with a human readable explanation of why the value is illegal. Subclasses should also define a syntactic_help string which may be presented to the user to describe the form of the legal values. Argument parser classes must be stateless, since instances are cached and shared between flags. Initializer arguments are allowed, but all member variables must be derived from initializer arguments only. """ __metaclass__ = _ArgumentParserCache syntactic_help = "" def Parse(self, argument): """Default implementation: always returns its argument unmodified.""" return argument def Type(self): return 'string' def WriteCustomInfoInXMLFormat(self, outfile, indent): pass class ArgumentSerializer: """Base class for generating string representations of a flag value.""" def Serialize(self, value): return _StrOrUnicode(value) class ListSerializer(ArgumentSerializer): def __init__(self, list_sep): self.list_sep = list_sep def Serialize(self, value): return self.list_sep.join([_StrOrUnicode(x) for x in value]) # Flags validators def RegisterValidator(flag_name, checker, message='Flag validation failed', flag_values=FLAGS): """Adds a constraint, which will be enforced during program execution. The constraint is validated when flags are initially parsed, and after each change of the corresponding flag's value. Args: flag_name: string, name of the flag to be checked. checker: method to validate the flag. input - value of the corresponding flag (string, boolean, etc. This value will be passed to checker by the library). See file's docstring for examples. output - Boolean. Must return True if validator constraint is satisfied. If constraint is not satisfied, it should either return False or raise gflags_validators.Error(desired_error_message). message: error text to be shown to the user if checker returns False. If checker raises gflags_validators.Error, message from the raised Error will be shown. flag_values: FlagValues Raises: AttributeError: if flag_name is not registered as a valid flag name. """ flag_values.AddValidator(gflags_validators.SimpleValidator(flag_name, checker, message)) def MarkFlagAsRequired(flag_name, flag_values=FLAGS): """Ensure that flag is not None during program execution. Registers a flag validator, which will follow usual validator rules. Args: flag_name: string, name of the flag flag_values: FlagValues Raises: AttributeError: if flag_name is not registered as a valid flag name. """ RegisterValidator(flag_name, lambda value: value is not None, message='Flag --%s must be specified.' % flag_name, flag_values=flag_values) def _RegisterBoundsValidatorIfNeeded(parser, name, flag_values): """Enforce lower and upper bounds for numeric flags. Args: parser: NumericParser (either FloatParser or IntegerParser). Provides lower and upper bounds, and help text to display. name: string, name of the flag flag_values: FlagValues """ if parser.lower_bound is not None or parser.upper_bound is not None: def Checker(value): if value is not None and parser.IsOutsideBounds(value): message = '%s is not %s' % (value, parser.syntactic_help) raise gflags_validators.Error(message) return True RegisterValidator(name, Checker, flag_values=flag_values) # The DEFINE functions are explained in mode details in the module doc string. def DEFINE(parser, name, default, help, flag_values=FLAGS, serializer=None, **args): """Registers a generic Flag object. NOTE: in the docstrings of all DEFINE* functions, "registers" is short for "creates a new flag and registers it". Auxiliary function: clients should use the specialized DEFINE_<type> function instead. Args: parser: ArgumentParser that is used to parse the flag arguments. name: A string, the flag name. default: The default value of the flag. help: A help string. flag_values: FlagValues object the flag will be registered with. serializer: ArgumentSerializer that serializes the flag value. args: Dictionary with extra keyword args that are passes to the Flag __init__. """ DEFINE_flag(Flag(parser, serializer, name, default, help, **args), flag_values) def DEFINE_flag(flag, flag_values=FLAGS): """Registers a 'Flag' object with a 'FlagValues' object. By default, the global FLAGS 'FlagValue' object is used. Typical users will use one of the more specialized DEFINE_xxx functions, such as DEFINE_string or DEFINE_integer. But developers who need to create Flag objects themselves should use this function to register their flags. """ # copying the reference to flag_values prevents pychecker warnings fv = flag_values fv[flag.name] = flag # Tell flag_values who's defining the flag. if isinstance(flag_values, FlagValues): # Regarding the above isinstance test: some users pass funny # values of flag_values (e.g., {}) in order to avoid the flag # registration (in the past, there used to be a flag_values == # FLAGS test here) and redefine flags with the same name (e.g., # debug). To avoid breaking their code, we perform the # registration only if flag_values is a real FlagValues object. module, module_name = _GetCallingModuleObjectAndName() flag_values._RegisterFlagByModule(module_name, flag) flag_values._RegisterFlagByModuleId(id(module), flag) def _InternalDeclareKeyFlags(flag_names, flag_values=FLAGS, key_flag_values=None): """Declares a flag as key for the calling module. Internal function. User code should call DECLARE_key_flag or ADOPT_module_key_flags instead. Args: flag_names: A list of strings that are names of already-registered Flag objects. flag_values: A FlagValues object that the flags listed in flag_names have registered with (the value of the flag_values argument from the DEFINE_* calls that defined those flags). This should almost never need to be overridden. key_flag_values: A FlagValues object that (among possibly many other things) keeps track of the key flags for each module. Default None means "same as flag_values". This should almost never need to be overridden. Raises: UnrecognizedFlagError: when we refer to a flag that was not defined yet. """ key_flag_values = key_flag_values or flag_values module = _GetCallingModule() for flag_name in flag_names: if flag_name not in flag_values: raise UnrecognizedFlagError(flag_name) flag = flag_values.FlagDict()[flag_name] key_flag_values._RegisterKeyFlagForModule(module, flag) def DECLARE_key_flag(flag_name, flag_values=FLAGS): """Declares one flag as key to the current module. Key flags are flags that are deemed really important for a module. They are important when listing help messages; e.g., if the --helpshort command-line flag is used, then only the key flags of the main module are listed (instead of all flags, as in the case of --help). Sample usage: gflags.DECLARED_key_flag('flag_1') Args: flag_name: A string, the name of an already declared flag. (Redeclaring flags as key, including flags implicitly key because they were declared in this module, is a no-op.) flag_values: A FlagValues object. This should almost never need to be overridden. """ if flag_name in _SPECIAL_FLAGS: # Take care of the special flags, e.g., --flagfile, --undefok. # These flags are defined in _SPECIAL_FLAGS, and are treated # specially during flag parsing, taking precedence over the # user-defined flags. _InternalDeclareKeyFlags([flag_name], flag_values=_SPECIAL_FLAGS, key_flag_values=flag_values) return _InternalDeclareKeyFlags([flag_name], flag_values=flag_values) def ADOPT_module_key_flags(module, flag_values=FLAGS): """Declares that all flags key to a module are key to the current module. Args: module: A module object. flag_values: A FlagValues object. This should almost never need to be overridden. Raises: FlagsError: When given an argument that is a module name (a string), instead of a module object. """ # NOTE(salcianu): an even better test would be if not # isinstance(module, types.ModuleType) but I didn't want to import # types for such a tiny use. if isinstance(module, str): raise FlagsError('Received module name %s; expected a module object.' % module) _InternalDeclareKeyFlags( [f.name for f in flag_values._GetKeyFlagsForModule(module.__name__)], flag_values=flag_values) # If module is this flag module, take _SPECIAL_FLAGS into account. if module == _GetThisModuleObjectAndName()[0]: _InternalDeclareKeyFlags( # As we associate flags with _GetCallingModuleObjectAndName(), the # special flags defined in this module are incorrectly registered with # a different module. So, we can't use _GetKeyFlagsForModule. # Instead, we take all flags from _SPECIAL_FLAGS (a private # FlagValues, where no other module should register flags). [f.name for f in _SPECIAL_FLAGS.FlagDict().values()], flag_values=_SPECIAL_FLAGS, key_flag_values=flag_values) # # STRING FLAGS # def DEFINE_string(name, default, help, flag_values=FLAGS, **args): """Registers a flag whose value can be any string.""" parser = ArgumentParser() serializer = ArgumentSerializer() DEFINE(parser, name, default, help, flag_values, serializer, **args) # # BOOLEAN FLAGS # class BooleanParser(ArgumentParser): """Parser of boolean values.""" def Convert(self, argument): """Converts the argument to a boolean; raise ValueError on errors.""" if type(argument) == str: if argument.lower() in ['true', 't', '1']: return True elif argument.lower() in ['false', 'f', '0']: return False bool_argument = bool(argument) if argument == bool_argument: # The argument is a valid boolean (True, False, 0, or 1), and not just # something that always converts to bool (list, string, int, etc.). return bool_argument raise ValueError('Non-boolean argument to boolean flag', argument) def Parse(self, argument): val = self.Convert(argument) return val def Type(self): return 'bool' class BooleanFlag(Flag): """Basic boolean flag. Boolean flags do not take any arguments, and their value is either True (1) or False (0). The false value is specified on the command line by prepending the word 'no' to either the long or the short flag name. For example, if a Boolean flag was created whose long name was 'update' and whose short name was 'x', then this flag could be explicitly unset through either --noupdate or --nox. """ def __init__(self, name, default, help, short_name=None, **args): p = BooleanParser() Flag.__init__(self, p, None, name, default, help, short_name, 1, **args) if not self.help: self.help = "a boolean value" def DEFINE_boolean(name, default, help, flag_values=FLAGS, **args): """Registers a boolean flag. Such a boolean flag does not take an argument. If a user wants to specify a false value explicitly, the long option beginning with 'no' must be used: i.e. --noflag This flag will have a value of None, True or False. None is possible if default=None and the user does not specify the flag on the command line. """ DEFINE_flag(BooleanFlag(name, default, help, **args), flag_values) # Match C++ API to unconfuse C++ people. DEFINE_bool = DEFINE_boolean class HelpFlag(BooleanFlag): """ HelpFlag is a special boolean flag that prints usage information and raises a SystemExit exception if it is ever found in the command line arguments. Note this is called with allow_override=1, so other apps can define their own --help flag, replacing this one, if they want. """ def __init__(self): BooleanFlag.__init__(self, "help", 0, "show this help", short_name="?", allow_override=1) def Parse(self, arg): if arg: doc = sys.modules["__main__"].__doc__ flags = str(FLAGS) print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0]) if flags: print "flags:" print flags sys.exit(1) class HelpXMLFlag(BooleanFlag): """Similar to HelpFlag, but generates output in XML format.""" def __init__(self): BooleanFlag.__init__(self, 'helpxml', False, 'like --help, but generates XML output', allow_override=1) def Parse(self, arg): if arg: FLAGS.WriteHelpInXMLFormat(sys.stdout) sys.exit(1) class HelpshortFlag(BooleanFlag): """ HelpshortFlag is a special boolean flag that prints usage information for the "main" module, and rasies a SystemExit exception if it is ever found in the command line arguments. Note this is called with allow_override=1, so other apps can define their own --helpshort flag, replacing this one, if they want. """ def __init__(self): BooleanFlag.__init__(self, "helpshort", 0, "show usage only for this module", allow_override=1) def Parse(self, arg): if arg: doc = sys.modules["__main__"].__doc__ flags = FLAGS.MainModuleHelp() print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0]) if flags: print "flags:" print flags sys.exit(1) # # Numeric parser - base class for Integer and Float parsers # class NumericParser(ArgumentParser): """Parser of numeric values. Parsed value may be bounded to a given upper and lower bound. """ def IsOutsideBounds(self, val): return ((self.lower_bound is not None and val < self.lower_bound) or (self.upper_bound is not None and val > self.upper_bound)) def Parse(self, argument): val = self.Convert(argument) if self.IsOutsideBounds(val): raise ValueError("%s is not %s" % (val, self.syntactic_help)) return val def WriteCustomInfoInXMLFormat(self, outfile, indent): if self.lower_bound is not None: _WriteSimpleXMLElement(outfile, 'lower_bound', self.lower_bound, indent) if self.upper_bound is not None: _WriteSimpleXMLElement(outfile, 'upper_bound', self.upper_bound, indent) def Convert(self, argument): """Default implementation: always returns its argument unmodified.""" return argument # End of Numeric Parser # # FLOAT FLAGS # class FloatParser(NumericParser): """Parser of floating point values. Parsed value may be bounded to a given upper and lower bound. """ number_article = "a" number_name = "number" syntactic_help = " ".join((number_article, number_name)) def __init__(self, lower_bound=None, upper_bound=None): super(FloatParser, self).__init__() self.lower_bound = lower_bound self.upper_bound = upper_bound sh = self.syntactic_help if lower_bound is not None and upper_bound is not None: sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound)) elif lower_bound == 0: sh = "a non-negative %s" % self.number_name elif upper_bound == 0: sh = "a non-positive %s" % self.number_name elif upper_bound is not None: sh = "%s <= %s" % (self.number_name, upper_bound) elif lower_bound is not None: sh = "%s >= %s" % (self.number_name, lower_bound) self.syntactic_help = sh def Convert(self, argument): """Converts argument to a float; raises ValueError on errors.""" return float(argument) def Type(self): return 'float' # End of FloatParser def DEFINE_float(name, default, help, lower_bound=None, upper_bound=None, flag_values=FLAGS, **args): """Registers a flag whose value must be a float. If lower_bound or upper_bound are set, then this flag must be within the given range. """ parser = FloatParser(lower_bound, upper_bound) serializer = ArgumentSerializer() DEFINE(parser, name, default, help, flag_values, serializer, **args) _RegisterBoundsValidatorIfNeeded(parser, name, flag_values=flag_values) # # INTEGER FLAGS # class IntegerParser(NumericParser): """Parser of an integer value. Parsed value may be bounded to a given upper and lower bound. """ number_article = "an" number_name = "integer" syntactic_help = " ".join((number_article, number_name)) def __init__(self, lower_bound=None, upper_bound=None): super(IntegerParser, self).__init__() self.lower_bound = lower_bound self.upper_bound = upper_bound sh = self.syntactic_help if lower_bound is not None and upper_bound is not None: sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound)) elif lower_bound == 1: sh = "a positive %s" % self.number_name elif upper_bound == -1: sh = "a negative %s" % self.number_name elif lower_bound == 0: sh = "a non-negative %s" % self.number_name elif upper_bound == 0: sh = "a non-positive %s" % self.number_name elif upper_bound is not None: sh = "%s <= %s" % (self.number_name, upper_bound) elif lower_bound is not None: sh = "%s >= %s" % (self.number_name, lower_bound) self.syntactic_help = sh def Convert(self, argument): __pychecker__ = 'no-returnvalues' if type(argument) == str: base = 10 if len(argument) > 2 and argument[0] == "0" and argument[1] == "x": base = 16 return int(argument, base) else: return int(argument) def Type(self): return 'int' def DEFINE_integer(name, default, help, lower_bound=None, upper_bound=None, flag_values=FLAGS, **args): """Registers a flag whose value must be an integer. If lower_bound, or upper_bound are set, then this flag must be within the given range. """ parser = IntegerParser(lower_bound, upper_bound) serializer = ArgumentSerializer() DEFINE(parser, name, default, help, flag_values, serializer, **args) _RegisterBoundsValidatorIfNeeded(parser, name, flag_values=flag_values) # # ENUM FLAGS # class EnumParser(ArgumentParser): """Parser of a string enum value (a string value from a given set). If enum_values (see below) is not specified, any string is allowed. """ def __init__(self, enum_values=None): super(EnumParser, self).__init__() self.enum_values = enum_values def Parse(self, argument): if self.enum_values and argument not in self.enum_values: raise ValueError("value should be one of <%s>" % "|".join(self.enum_values)) return argument def Type(self): return 'string enum' class EnumFlag(Flag): """Basic enum flag; its value can be any string from list of enum_values.""" def __init__(self, name, default, help, enum_values=None, short_name=None, **args): enum_values = enum_values or [] p = EnumParser(enum_values) g = ArgumentSerializer() Flag.__init__(self, p, g, name, default, help, short_name, **args) if not self.help: self.help = "an enum string" self.help = "<%s>: %s" % ("|".join(enum_values), self.help) def _WriteCustomInfoInXMLFormat(self, outfile, indent): for enum_value in self.parser.enum_values: _WriteSimpleXMLElement(outfile, 'enum_value', enum_value, indent) def DEFINE_enum(name, default, enum_values, help, flag_values=FLAGS, **args): """Registers a flag whose value can be any string from enum_values.""" DEFINE_flag(EnumFlag(name, default, help, enum_values, ** args), flag_values) # # LIST FLAGS # class BaseListParser(ArgumentParser): """Base class for a parser of lists of strings. To extend, inherit from this class; from the subclass __init__, call BaseListParser.__init__(self, token, name) where token is a character used to tokenize, and name is a description of the separator. """ def __init__(self, token=None, name=None): assert name super(BaseListParser, self).__init__() self._token = token self._name = name self.syntactic_help = "a %s separated list" % self._name def Parse(self, argument): if isinstance(argument, list): return argument elif argument == '': return [] else: return [s.strip() for s in argument.split(self._token)] def Type(self): return '%s separated list of strings' % self._name class ListParser(BaseListParser): """Parser for a comma-separated list of strings.""" def __init__(self): BaseListParser.__init__(self, ',', 'comma') def WriteCustomInfoInXMLFormat(self, outfile, indent): BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent) _WriteSimpleXMLElement(outfile, 'list_separator', repr(','), indent) class WhitespaceSeparatedListParser(BaseListParser): """Parser for a whitespace-separated list of strings.""" def __init__(self): BaseListParser.__init__(self, None, 'whitespace') def WriteCustomInfoInXMLFormat(self, outfile, indent): BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent) separators = list(string.whitespace) separators.sort() for ws_char in string.whitespace: _WriteSimpleXMLElement(outfile, 'list_separator', repr(ws_char), indent) def DEFINE_list(name, default, help, flag_values=FLAGS, **args): """Registers a flag whose value is a comma-separated list of strings.""" parser = ListParser() serializer = ListSerializer(',') DEFINE(parser, name, default, help, flag_values, serializer, **args) def DEFINE_spaceseplist(name, default, help, flag_values=FLAGS, **args): """Registers a flag whose value is a whitespace-separated list of strings. Any whitespace can be used as a separator. """ parser = WhitespaceSeparatedListParser() serializer = ListSerializer(' ') DEFINE(parser, name, default, help, flag_values, serializer, **args) # # MULTI FLAGS # class MultiFlag(Flag): """A flag that can appear multiple time on the command-line. The value of such a flag is a list that contains the individual values from all the appearances of that flag on the command-line. See the __doc__ for Flag for most behavior of this class. Only differences in behavior are described here: * The default value may be either a single value or a list of values. A single value is interpreted as the [value] singleton list. * The value of the flag is always a list, even if the option was only supplied once, and even if the default value is a single value """ def __init__(self, *args, **kwargs): Flag.__init__(self, *args, **kwargs) self.help += ';\n repeat this option to specify a list of values' def Parse(self, arguments): """Parses one or more arguments with the installed parser. Args: arguments: a single argument or a list of arguments (typically a list of default values); a single argument is converted internally into a list containing one item. """ if not isinstance(arguments, list): # Default value may be a list of values. Most other arguments # will not be, so convert them into a single-item list to make # processing simpler below. arguments = [arguments] if self.present: # keep a backup reference to list of previously supplied option values values = self.value else: # "erase" the defaults with an empty list values = [] for item in arguments: # have Flag superclass parse argument, overwriting self.value reference Flag.Parse(self, item) # also increments self.present values.append(self.value) # put list of option values back in the 'value' attribute self.value = values def Serialize(self): if not self.serializer: raise FlagsError("Serializer not present for flag %s" % self.name) if self.value is None: return '' s = '' multi_value = self.value for self.value in multi_value: if s: s += ' ' s += Flag.Serialize(self) self.value = multi_value return s def Type(self): return 'multi ' + self.parser.Type() def DEFINE_multi(parser, serializer, name, default, help, flag_values=FLAGS, **args): """Registers a generic MultiFlag that parses its args with a given parser. Auxiliary function. Normal users should NOT use it directly. Developers who need to create their own 'Parser' classes for options which can appear multiple times can call this module function to register their flags. """ DEFINE_flag(MultiFlag(parser, serializer, name, default, help, **args), flag_values) def DEFINE_multistring(name, default, help, flag_values=FLAGS, **args): """Registers a flag whose value can be a list of any strings. Use the flag on the command line multiple times to place multiple string values into the list. The 'default' may be a single string (which will be converted into a single-element list) or a list of strings. """ parser = ArgumentParser() serializer = ArgumentSerializer() DEFINE_multi(parser, serializer, name, default, help, flag_values, **args) def DEFINE_multi_int(name, default, help, lower_bound=None, upper_bound=None, flag_values=FLAGS, **args): """Registers a flag whose value can be a list of arbitrary integers. Use the flag on the command line multiple times to place multiple integer values into the list. The 'default' may be a single integer (which will be converted into a single-element list) or a list of integers. """ parser = IntegerParser(lower_bound, upper_bound) serializer = ArgumentSerializer() DEFINE_multi(parser, serializer, name, default, help, flag_values, **args) def DEFINE_multi_float(name, default, help, lower_bound=None, upper_bound=None, flag_values=FLAGS, **args): """Registers a flag whose value can be a list of arbitrary floats. Use the flag on the command line multiple times to place multiple float values into the list. The 'default' may be a single float (which will be converted into a single-element list) or a list of floats. """ parser = FloatParser(lower_bound, upper_bound) serializer = ArgumentSerializer() DEFINE_multi(parser, serializer, name, default, help, flag_values, **args) # Now register the flags that we want to exist in all applications. # These are all defined with allow_override=1, so user-apps can use # these flagnames for their own purposes, if they want. DEFINE_flag(HelpFlag()) DEFINE_flag(HelpshortFlag()) DEFINE_flag(HelpXMLFlag()) # Define special flags here so that help may be generated for them. # NOTE: Please do NOT use _SPECIAL_FLAGS from outside this module. _SPECIAL_FLAGS = FlagValues() DEFINE_string( 'flagfile', "", "Insert flag definitions from the given file into the command line.", _SPECIAL_FLAGS) DEFINE_string( 'undefok', "", "comma-separated list of flag names that it is okay to specify " "on the command line even if the program does not define a flag " "with that name. IMPORTANT: flags in this list that have " "arguments MUST use the --flag=value format.", _SPECIAL_FLAGS)
mit
plxaye/chromium
src/native_client_sdk/src/tools/run.py
34
2917
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Launch a local server on an ephemeral port, then launch a executable that points to that server. """ import copy import getos import optparse import os import subprocess import sys import httpd if sys.version_info < (2, 6, 0): sys.stderr.write("python 2.6 or later is required run this script\n") sys.exit(1) def main(args): usage = """usage: %prog [options] -- executable args... This command creates a local server on an ephemeral port, then runs: <executable> <args..> http://localhost:<port>/<page>. Where <page> can be set by -P, or uses index.html by default.""" parser = optparse.OptionParser(usage) parser.add_option('-C', '--serve-dir', help='Serve files out of this directory.', dest='serve_dir', default=os.path.abspath('.')) parser.add_option('-P', '--path', help='Path to load from local server.', dest='path', default='index.html') parser.add_option('-D', help='Add debug command-line when launching the chrome debug.', dest='debug', action='append', default=[]) parser.add_option('-E', help='Add environment variables when launching the executable.', dest='environ', action='append', default=[]) parser.add_option('--test-mode', help='Listen for posts to /ok or /fail and shut down the server with ' ' errorcodes 0 and 1 respectively.', dest='test_mode', action='store_true') parser.add_option('-p', '--port', help='Port to run server on. Default is 5103, ephemeral is 0.', default=5103) options, args = parser.parse_args(args) if not args: parser.error('No executable given.') # 0 means use an ephemeral port. server = httpd.LocalHTTPServer(options.serve_dir, options.port, options.test_mode) print 'Serving %s on %s...' % (options.serve_dir, server.GetURL('')) env = copy.copy(os.environ) for e in options.environ: key, value = map(str.strip, e.split('=')) env[key] = value cmd = args + [server.GetURL(options.path)] print 'Running: %s...' % (' '.join(cmd),) process = subprocess.Popen(cmd, env=env) # If any debug args are passed in, assume we want to debug if options.debug: if getos.GetPlatform() != 'win': cmd = ['xterm', '-title', 'NaCl Debugger', '-e'] else: cmd = [] cmd += options.debug print 'Starting debugger: ' + ' '.join(cmd) debug_process = subprocess.Popen(cmd, env=env) else: debug_process = False try: return server.ServeUntilSubprocessDies(process) finally: if process.returncode is None: process.kill() if debug_process and debug_process.returncode is None: debug_process.kill() if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
apache-2.0
Weicong-Lin/pymo-global
android/pgs4a-0.9.6/python-install/lib/python2.7/test/test_ast.py
13
16348
import sys, itertools, unittest from test import test_support import ast def to_tuple(t): if t is None or isinstance(t, (basestring, int, long, complex)): return t elif isinstance(t, list): return [to_tuple(e) for e in t] result = [t.__class__.__name__] if hasattr(t, 'lineno') and hasattr(t, 'col_offset'): result.append((t.lineno, t.col_offset)) if t._fields is None: return tuple(result) for f in t._fields: result.append(to_tuple(getattr(t, f))) return tuple(result) # These tests are compiled through "exec" # There should be atleast one test per statement exec_tests = [ # FunctionDef "def f(): pass", # ClassDef "class C:pass", # Return "def f():return 1", # Delete "del v", # Assign "v = 1", # AugAssign "v += 1", # Print "print >>f, 1, ", # For "for v in v:pass", # While "while v:pass", # If "if v:pass", # Raise "raise Exception, 'string'", # TryExcept "try:\n pass\nexcept Exception:\n pass", # TryFinally "try:\n pass\nfinally:\n pass", # Assert "assert v", # Import "import sys", # ImportFrom "from sys import v", # Exec "exec 'v'", # Global "global v", # Expr "1", # Pass, "pass", # Break "break", # Continue "continue", # for statements with naked tuples (see http://bugs.python.org/issue6704) "for a,b in c: pass", "[(a,b) for a,b in c]", "((a,b) for a,b in c)", ] # These are compiled through "single" # because of overlap with "eval", it just tests what # can't be tested with "eval" single_tests = [ "1+2" ] # These are compiled through "eval" # It should test all expressions eval_tests = [ # BoolOp "a and b", # BinOp "a + b", # UnaryOp "not v", # Lambda "lambda:None", # Dict "{ 1:2 }", # ListComp "[a for b in c if d]", # GeneratorExp "(a for b in c if d)", # Yield - yield expressions can't work outside a function # # Compare "1 < 2 < 3", # Call "f(1,2,c=3,*d,**e)", # Repr "`v`", # Num "10L", # Str "'string'", # Attribute "a.b", # Subscript "a[b:c]", # Name "v", # List "[1,2,3]", # Tuple "1,2,3", # Combination "a.b.c.d(a.b[1:2])", ] # TODO: expr_context, slice, boolop, operator, unaryop, cmpop, comprehension # excepthandler, arguments, keywords, alias class AST_Tests(unittest.TestCase): def _assertTrueorder(self, ast_node, parent_pos): if not isinstance(ast_node, ast.AST) or ast_node._fields is None: return if isinstance(ast_node, (ast.expr, ast.stmt, ast.excepthandler)): node_pos = (ast_node.lineno, ast_node.col_offset) self.assertTrue(node_pos >= parent_pos) parent_pos = (ast_node.lineno, ast_node.col_offset) for name in ast_node._fields: value = getattr(ast_node, name) if isinstance(value, list): for child in value: self._assertTrueorder(child, parent_pos) elif value is not None: self._assertTrueorder(value, parent_pos) def test_snippets(self): for input, output, kind in ((exec_tests, exec_results, "exec"), (single_tests, single_results, "single"), (eval_tests, eval_results, "eval")): for i, o in itertools.izip(input, output): ast_tree = compile(i, "?", kind, ast.PyCF_ONLY_AST) self.assertEqual(to_tuple(ast_tree), o) self._assertTrueorder(ast_tree, (0, 0)) def test_slice(self): slc = ast.parse("x[::]").body[0].value.slice self.assertIsNone(slc.upper) self.assertIsNone(slc.lower) self.assertIsInstance(slc.step, ast.Name) self.assertEqual(slc.step.id, "None") def test_from_import(self): im = ast.parse("from . import y").body[0] self.assertIsNone(im.module) def test_base_classes(self): self.assertTrue(issubclass(ast.For, ast.stmt)) self.assertTrue(issubclass(ast.Name, ast.expr)) self.assertTrue(issubclass(ast.stmt, ast.AST)) self.assertTrue(issubclass(ast.expr, ast.AST)) self.assertTrue(issubclass(ast.comprehension, ast.AST)) self.assertTrue(issubclass(ast.Gt, ast.AST)) def test_nodeclasses(self): x = ast.BinOp(1, 2, 3, lineno=0) self.assertEqual(x.left, 1) self.assertEqual(x.op, 2) self.assertEqual(x.right, 3) self.assertEqual(x.lineno, 0) # node raises exception when not given enough arguments self.assertRaises(TypeError, ast.BinOp, 1, 2) # can set attributes through kwargs too x = ast.BinOp(left=1, op=2, right=3, lineno=0) self.assertEqual(x.left, 1) self.assertEqual(x.op, 2) self.assertEqual(x.right, 3) self.assertEqual(x.lineno, 0) # this used to fail because Sub._fields was None x = ast.Sub() def test_pickling(self): import pickle mods = [pickle] try: import cPickle mods.append(cPickle) except ImportError: pass protocols = [0, 1, 2] for mod in mods: for protocol in protocols: for ast in (compile(i, "?", "exec", 0x400) for i in exec_tests): ast2 = mod.loads(mod.dumps(ast, protocol)) self.assertEqual(to_tuple(ast2), to_tuple(ast)) class ASTHelpers_Test(unittest.TestCase): def test_parse(self): a = ast.parse('foo(1 + 1)') b = compile('foo(1 + 1)', '<unknown>', 'exec', ast.PyCF_ONLY_AST) self.assertEqual(ast.dump(a), ast.dump(b)) def test_dump(self): node = ast.parse('spam(eggs, "and cheese")') self.assertEqual(ast.dump(node), "Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load()), " "args=[Name(id='eggs', ctx=Load()), Str(s='and cheese')], " "keywords=[], starargs=None, kwargs=None))])" ) self.assertEqual(ast.dump(node, annotate_fields=False), "Module([Expr(Call(Name('spam', Load()), [Name('eggs', Load()), " "Str('and cheese')], [], None, None))])" ) self.assertEqual(ast.dump(node, include_attributes=True), "Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load(), " "lineno=1, col_offset=0), args=[Name(id='eggs', ctx=Load(), " "lineno=1, col_offset=5), Str(s='and cheese', lineno=1, " "col_offset=11)], keywords=[], starargs=None, kwargs=None, " "lineno=1, col_offset=0), lineno=1, col_offset=0)])" ) def test_copy_location(self): src = ast.parse('1 + 1', mode='eval') src.body.right = ast.copy_location(ast.Num(2), src.body.right) self.assertEqual(ast.dump(src, include_attributes=True), 'Expression(body=BinOp(left=Num(n=1, lineno=1, col_offset=0), ' 'op=Add(), right=Num(n=2, lineno=1, col_offset=4), lineno=1, ' 'col_offset=0))' ) def test_fix_missing_locations(self): src = ast.parse('write("spam")') src.body.append(ast.Expr(ast.Call(ast.Name('spam', ast.Load()), [ast.Str('eggs')], [], None, None))) self.assertEqual(src, ast.fix_missing_locations(src)) self.assertEqual(ast.dump(src, include_attributes=True), "Module(body=[Expr(value=Call(func=Name(id='write', ctx=Load(), " "lineno=1, col_offset=0), args=[Str(s='spam', lineno=1, " "col_offset=6)], keywords=[], starargs=None, kwargs=None, " "lineno=1, col_offset=0), lineno=1, col_offset=0), " "Expr(value=Call(func=Name(id='spam', ctx=Load(), lineno=1, " "col_offset=0), args=[Str(s='eggs', lineno=1, col_offset=0)], " "keywords=[], starargs=None, kwargs=None, lineno=1, " "col_offset=0), lineno=1, col_offset=0)])" ) def test_increment_lineno(self): src = ast.parse('1 + 1', mode='eval') self.assertEqual(ast.increment_lineno(src, n=3), src) self.assertEqual(ast.dump(src, include_attributes=True), 'Expression(body=BinOp(left=Num(n=1, lineno=4, col_offset=0), ' 'op=Add(), right=Num(n=1, lineno=4, col_offset=4), lineno=4, ' 'col_offset=0))' ) # issue10869: do not increment lineno of root twice src = ast.parse('1 + 1', mode='eval') self.assertEqual(ast.increment_lineno(src.body, n=3), src.body) self.assertEqual(ast.dump(src, include_attributes=True), 'Expression(body=BinOp(left=Num(n=1, lineno=4, col_offset=0), ' 'op=Add(), right=Num(n=1, lineno=4, col_offset=4), lineno=4, ' 'col_offset=0))' ) def test_iter_fields(self): node = ast.parse('foo()', mode='eval') d = dict(ast.iter_fields(node.body)) self.assertEqual(d.pop('func').id, 'foo') self.assertEqual(d, {'keywords': [], 'kwargs': None, 'args': [], 'starargs': None}) def test_iter_child_nodes(self): node = ast.parse("spam(23, 42, eggs='leek')", mode='eval') self.assertEqual(len(list(ast.iter_child_nodes(node.body))), 4) iterator = ast.iter_child_nodes(node.body) self.assertEqual(next(iterator).id, 'spam') self.assertEqual(next(iterator).n, 23) self.assertEqual(next(iterator).n, 42) self.assertEqual(ast.dump(next(iterator)), "keyword(arg='eggs', value=Str(s='leek'))" ) def test_get_docstring(self): node = ast.parse('def foo():\n """line one\n line two"""') self.assertEqual(ast.get_docstring(node.body[0]), 'line one\nline two') def test_literal_eval(self): self.assertEqual(ast.literal_eval('[1, 2, 3]'), [1, 2, 3]) self.assertEqual(ast.literal_eval('{"foo": 42}'), {"foo": 42}) self.assertEqual(ast.literal_eval('(True, False, None)'), (True, False, None)) self.assertRaises(ValueError, ast.literal_eval, 'foo()') def test_literal_eval_issue4907(self): self.assertEqual(ast.literal_eval('2j'), 2j) self.assertEqual(ast.literal_eval('10 + 2j'), 10 + 2j) self.assertEqual(ast.literal_eval('1.5 - 2j'), 1.5 - 2j) self.assertRaises(ValueError, ast.literal_eval, '2 + (3 + 4j)') def test_main(): with test_support.check_py3k_warnings(("backquote not supported", SyntaxWarning)): test_support.run_unittest(AST_Tests, ASTHelpers_Test) def main(): if __name__ != '__main__': return if sys.argv[1:] == ['-g']: for statements, kind in ((exec_tests, "exec"), (single_tests, "single"), (eval_tests, "eval")): print kind+"_results = [" for s in statements: print repr(to_tuple(compile(s, "?", kind, 0x400)))+"," print "]" print "main()" raise SystemExit test_main() #### EVERYTHING BELOW IS GENERATED ##### exec_results = [ ('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], None, None, []), [('Pass', (1, 9))], [])]), ('Module', [('ClassDef', (1, 0), 'C', [], [('Pass', (1, 8))], [])]), ('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], None, None, []), [('Return', (1, 8), ('Num', (1, 15), 1))], [])]), ('Module', [('Delete', (1, 0), [('Name', (1, 4), 'v', ('Del',))])]), ('Module', [('Assign', (1, 0), [('Name', (1, 0), 'v', ('Store',))], ('Num', (1, 4), 1))]), ('Module', [('AugAssign', (1, 0), ('Name', (1, 0), 'v', ('Store',)), ('Add',), ('Num', (1, 5), 1))]), ('Module', [('Print', (1, 0), ('Name', (1, 8), 'f', ('Load',)), [('Num', (1, 11), 1)], False)]), ('Module', [('For', (1, 0), ('Name', (1, 4), 'v', ('Store',)), ('Name', (1, 9), 'v', ('Load',)), [('Pass', (1, 11))], [])]), ('Module', [('While', (1, 0), ('Name', (1, 6), 'v', ('Load',)), [('Pass', (1, 8))], [])]), ('Module', [('If', (1, 0), ('Name', (1, 3), 'v', ('Load',)), [('Pass', (1, 5))], [])]), ('Module', [('Raise', (1, 0), ('Name', (1, 6), 'Exception', ('Load',)), ('Str', (1, 17), 'string'), None)]), ('Module', [('TryExcept', (1, 0), [('Pass', (2, 2))], [('ExceptHandler', (3, 0), ('Name', (3, 7), 'Exception', ('Load',)), None, [('Pass', (4, 2))])], [])]), ('Module', [('TryFinally', (1, 0), [('Pass', (2, 2))], [('Pass', (4, 2))])]), ('Module', [('Assert', (1, 0), ('Name', (1, 7), 'v', ('Load',)), None)]), ('Module', [('Import', (1, 0), [('alias', 'sys', None)])]), ('Module', [('ImportFrom', (1, 0), 'sys', [('alias', 'v', None)], 0)]), ('Module', [('Exec', (1, 0), ('Str', (1, 5), 'v'), None, None)]), ('Module', [('Global', (1, 0), ['v'])]), ('Module', [('Expr', (1, 0), ('Num', (1, 0), 1))]), ('Module', [('Pass', (1, 0))]), ('Module', [('Break', (1, 0))]), ('Module', [('Continue', (1, 0))]), ('Module', [('For', (1, 0), ('Tuple', (1, 4), [('Name', (1, 4), 'a', ('Store',)), ('Name', (1, 6), 'b', ('Store',))], ('Store',)), ('Name', (1, 11), 'c', ('Load',)), [('Pass', (1, 14))], [])]), ('Module', [('Expr', (1, 0), ('ListComp', (1, 1), ('Tuple', (1, 2), [('Name', (1, 2), 'a', ('Load',)), ('Name', (1, 4), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11), [('Name', (1, 11), 'a', ('Store',)), ('Name', (1, 13), 'b', ('Store',))], ('Store',)), ('Name', (1, 18), 'c', ('Load',)), [])]))]), ('Module', [('Expr', (1, 0), ('GeneratorExp', (1, 1), ('Tuple', (1, 2), [('Name', (1, 2), 'a', ('Load',)), ('Name', (1, 4), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11), [('Name', (1, 11), 'a', ('Store',)), ('Name', (1, 13), 'b', ('Store',))], ('Store',)), ('Name', (1, 18), 'c', ('Load',)), [])]))]), ] single_results = [ ('Interactive', [('Expr', (1, 0), ('BinOp', (1, 0), ('Num', (1, 0), 1), ('Add',), ('Num', (1, 2), 2)))]), ] eval_results = [ ('Expression', ('BoolOp', (1, 0), ('And',), [('Name', (1, 0), 'a', ('Load',)), ('Name', (1, 6), 'b', ('Load',))])), ('Expression', ('BinOp', (1, 0), ('Name', (1, 0), 'a', ('Load',)), ('Add',), ('Name', (1, 4), 'b', ('Load',)))), ('Expression', ('UnaryOp', (1, 0), ('Not',), ('Name', (1, 4), 'v', ('Load',)))), ('Expression', ('Lambda', (1, 0), ('arguments', [], None, None, []), ('Name', (1, 7), 'None', ('Load',)))), ('Expression', ('Dict', (1, 0), [('Num', (1, 2), 1)], [('Num', (1, 4), 2)])), ('Expression', ('ListComp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), [('comprehension', ('Name', (1, 7), 'b', ('Store',)), ('Name', (1, 12), 'c', ('Load',)), [('Name', (1, 17), 'd', ('Load',))])])), ('Expression', ('GeneratorExp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), [('comprehension', ('Name', (1, 7), 'b', ('Store',)), ('Name', (1, 12), 'c', ('Load',)), [('Name', (1, 17), 'd', ('Load',))])])), ('Expression', ('Compare', (1, 0), ('Num', (1, 0), 1), [('Lt',), ('Lt',)], [('Num', (1, 4), 2), ('Num', (1, 8), 3)])), ('Expression', ('Call', (1, 0), ('Name', (1, 0), 'f', ('Load',)), [('Num', (1, 2), 1), ('Num', (1, 4), 2)], [('keyword', 'c', ('Num', (1, 8), 3))], ('Name', (1, 11), 'd', ('Load',)), ('Name', (1, 15), 'e', ('Load',)))), ('Expression', ('Repr', (1, 0), ('Name', (1, 1), 'v', ('Load',)))), ('Expression', ('Num', (1, 0), 10L)), ('Expression', ('Str', (1, 0), 'string')), ('Expression', ('Attribute', (1, 0), ('Name', (1, 0), 'a', ('Load',)), 'b', ('Load',))), ('Expression', ('Subscript', (1, 0), ('Name', (1, 0), 'a', ('Load',)), ('Slice', ('Name', (1, 2), 'b', ('Load',)), ('Name', (1, 4), 'c', ('Load',)), None), ('Load',))), ('Expression', ('Name', (1, 0), 'v', ('Load',))), ('Expression', ('List', (1, 0), [('Num', (1, 1), 1), ('Num', (1, 3), 2), ('Num', (1, 5), 3)], ('Load',))), ('Expression', ('Tuple', (1, 0), [('Num', (1, 0), 1), ('Num', (1, 2), 2), ('Num', (1, 4), 3)], ('Load',))), ('Expression', ('Call', (1, 0), ('Attribute', (1, 0), ('Attribute', (1, 0), ('Attribute', (1, 0), ('Name', (1, 0), 'a', ('Load',)), 'b', ('Load',)), 'c', ('Load',)), 'd', ('Load',)), [('Subscript', (1, 8), ('Attribute', (1, 8), ('Name', (1, 8), 'a', ('Load',)), 'b', ('Load',)), ('Slice', ('Num', (1, 12), 1), ('Num', (1, 14), 2), None), ('Load',))], [], None, None)), ] main()
mit
hkhamm/django_rest_tutorial_2
env/lib/python2.7/site-packages/django/contrib/formtools/wizard/storage/base.py
216
3949
from django.core.files.uploadedfile import UploadedFile from django.utils.datastructures import MultiValueDict from django.utils.functional import lazy_property from django.utils import six from django.contrib.formtools.wizard.storage.exceptions import NoFileStorageConfigured class BaseStorage(object): step_key = 'step' step_data_key = 'step_data' step_files_key = 'step_files' extra_data_key = 'extra_data' def __init__(self, prefix, request=None, file_storage=None): self.prefix = 'wizard_%s' % prefix self.request = request self.file_storage = file_storage def init_data(self): self.data = { self.step_key: None, self.step_data_key: {}, self.step_files_key: {}, self.extra_data_key: {}, } def reset(self): self.init_data() def _get_current_step(self): return self.data[self.step_key] def _set_current_step(self, step): self.data[self.step_key] = step current_step = lazy_property(_get_current_step, _set_current_step) def _get_extra_data(self): return self.data[self.extra_data_key] def _set_extra_data(self, extra_data): self.data[self.extra_data_key] = extra_data extra_data = lazy_property(_get_extra_data, _set_extra_data) def get_step_data(self, step): # When reading the serialized data, upconvert it to a MultiValueDict, # some serializers (json) don't preserve the type of the object. values = self.data[self.step_data_key].get(step, None) if values is not None: values = MultiValueDict(values) return values def set_step_data(self, step, cleaned_data): # If the value is a MultiValueDict, convert it to a regular dict of the # underlying contents. Some serializers call the public API on it (as # opposed to the underlying dict methods), in which case the content # can be truncated (__getitem__ returns only the first item). if isinstance(cleaned_data, MultiValueDict): cleaned_data = dict(cleaned_data.lists()) self.data[self.step_data_key][step] = cleaned_data @property def current_step_data(self): return self.get_step_data(self.current_step) def get_step_files(self, step): wizard_files = self.data[self.step_files_key].get(step, {}) if wizard_files and not self.file_storage: raise NoFileStorageConfigured( "You need to define 'file_storage' in your " "wizard view in order to handle file uploads.") files = {} for field, field_dict in six.iteritems(wizard_files): field_dict = field_dict.copy() tmp_name = field_dict.pop('tmp_name') files[field] = UploadedFile( file=self.file_storage.open(tmp_name), **field_dict) return files or None def set_step_files(self, step, files): if files and not self.file_storage: raise NoFileStorageConfigured( "You need to define 'file_storage' in your " "wizard view in order to handle file uploads.") if step not in self.data[self.step_files_key]: self.data[self.step_files_key][step] = {} for field, field_file in six.iteritems(files or {}): tmp_filename = self.file_storage.save(field_file.name, field_file) file_dict = { 'tmp_name': tmp_filename, 'name': field_file.name, 'content_type': field_file.content_type, 'size': field_file.size, 'charset': field_file.charset } self.data[self.step_files_key][step][field] = file_dict @property def current_step_files(self): return self.get_step_files(self.current_step) def update_response(self, response): pass
mit
smasala/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/__init__.py
122
3810
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # FIXME: Is this the right way to do this? from webkitpy.tool.steps.addsvnmimetypeforpng import AddSvnMimetypeForPng from webkitpy.tool.steps.applypatch import ApplyPatch from webkitpy.tool.steps.applypatchwithlocalcommit import ApplyPatchWithLocalCommit from webkitpy.tool.steps.applywatchlist import ApplyWatchList from webkitpy.tool.steps.attachtobug import AttachToBug from webkitpy.tool.steps.build import Build from webkitpy.tool.steps.checkstyle import CheckStyle from webkitpy.tool.steps.cleanworkingdirectory import CleanWorkingDirectory from webkitpy.tool.steps.closebug import CloseBug from webkitpy.tool.steps.closebugforlanddiff import CloseBugForLandDiff from webkitpy.tool.steps.closepatch import ClosePatch from webkitpy.tool.steps.commit import Commit from webkitpy.tool.steps.confirmdiff import ConfirmDiff from webkitpy.tool.steps.createbug import CreateBug from webkitpy.tool.steps.discardlocalchanges import DiscardLocalChanges from webkitpy.tool.steps.editchangelog import EditChangeLog from webkitpy.tool.steps.ensurebugisopenandassigned import EnsureBugIsOpenAndAssigned from webkitpy.tool.steps.ensurelocalcommitifneeded import EnsureLocalCommitIfNeeded from webkitpy.tool.steps.haslanded import HasLanded from webkitpy.tool.steps.obsoletepatches import ObsoletePatches from webkitpy.tool.steps.options import Options from webkitpy.tool.steps.postdiff import PostDiff from webkitpy.tool.steps.postdiffforcommit import PostDiffForCommit from webkitpy.tool.steps.postdiffforrevert import PostDiffForRevert from webkitpy.tool.steps.preparechangelog import PrepareChangeLog from webkitpy.tool.steps.preparechangelogforrevert import PrepareChangeLogForRevert from webkitpy.tool.steps.promptforbugortitle import PromptForBugOrTitle from webkitpy.tool.steps.reopenbugafterrollout import ReopenBugAfterRollout from webkitpy.tool.steps.revertrevision import RevertRevision from webkitpy.tool.steps.runtests import RunTests from webkitpy.tool.steps.suggestreviewers import SuggestReviewers from webkitpy.tool.steps.update import Update from webkitpy.tool.steps.updatechangelogswithreviewer import UpdateChangeLogsWithReviewer from webkitpy.tool.steps.validatechangelogs import ValidateChangeLogs from webkitpy.tool.steps.validatereviewer import ValidateReviewer
bsd-3-clause
jean/pybossa
pybossa/model/category.py
4
1456
# -*- coding: utf8 -*- # This file is part of PyBossa. # # Copyright (C) 2013 SF Isle of Man Limited # # PyBossa is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PyBossa is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with PyBossa. If not, see <http://www.gnu.org/licenses/>. from sqlalchemy import Integer, Text from sqlalchemy.schema import Column, ForeignKey from pybossa.core import db from pybossa.model import DomainObject, make_timestamp class Category(db.Model, DomainObject): '''A Table with Categories for Projects.''' __tablename__ = 'category' #: Category ID id = Column(Integer, primary_key=True) #: Name of the Category name = Column(Text, nullable=False, unique=True) #: Slug for the Category URL short_name = Column(Text, nullable=False, unique=True) #: Description of the Category description = Column(Text, nullable=False) #: UTC timestamp when the Category was created created = Column(Text, default=make_timestamp)
agpl-3.0
cindyyu/kuma
vendor/packages/translate/storage/versioncontrol/bzr.py
26
4781
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2004-2008,2012 Zuza Software Foundation # # This file is part of translate. # # translate is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # translate is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. from translate.storage.versioncontrol import (GenericRevisionControlSystem, prepare_filelist, run_command, youngest_ancestor) def is_available(): """check if bzr is installed""" exitcode, output, error = run_command(["bzr", "version"]) return exitcode == 0 _version = None def get_version(): """return a tuple of (major, minor) for the installed bazaar client""" global _version if _version: return _version import re command = ["bzr", "--version"] exitcode, output, error = run_command(command) if exitcode == 0: version_line = output.splitlines()[0] version_match = re.search(r"\d+\.\d+", version_line) if version_match: major, minor = version_match.group().split(".") if (major.isdigit() and minor.isdigit()): _version = (int(major), int(minor)) return _version # if anything broke before, then we return the invalid version number return (0, 0) class bzr(GenericRevisionControlSystem): """Class to manage items under revision control of bzr.""" RCS_METADIR = ".bzr" SCAN_PARENTS = True def update(self, revision=None, needs_revert=True): """Does a clean update of the given path""" output_revert = "" if needs_revert: # bzr revert command = ["bzr", "revert", self.location_abs] exitcode, output_revert, error = run_command(command) if exitcode != 0: raise IOError("[BZR] revert of '%s' failed: %s" % ( self.location_abs, error)) # bzr pull command = ["bzr", "pull"] exitcode, output_pull, error = run_command(command) if exitcode != 0: raise IOError("[BZR] pull of '%s' failed: %s" % ( self.location_abs, error)) return output_revert + output_pull def add(self, files, message=None, author=None): """Add and commit files.""" files = prepare_filelist(files) command = ["bzr", "add"] + files exitcode, output, error = run_command(command) if exitcode != 0: raise IOError("[BZR] add in '%s' failed: %s" % ( self.location_abs, error)) # go down as deep as possible in the tree to avoid accidental commits # TODO: explicitly commit files by name ancestor = youngest_ancestor(files) return output + type(self)(ancestor).commit(message, author) def commit(self, message=None, author=None): """Commits the file and supplies the given commit message if present""" # bzr commit command = ["bzr", "commit"] if message: command.extend(["-m", message]) # the "--author" argument is supported since bzr v0.91rc1 if author and (get_version() >= (0, 91)): command.extend(["--author", author]) # the filename is the last argument command.append(self.location_abs) exitcode, output_commit, error = run_command(command) if exitcode != 0: raise IOError("[BZR] commit of '%s' failed: %s" % ( self.location_abs, error)) # bzr push command = ["bzr", "push"] exitcode, output_push, error = run_command(command) if exitcode != 0: raise IOError("[BZR] push of '%s' failed: %s" % ( self.location_abs, error)) return output_commit + output_push def getcleanfile(self, revision=None): """Get a clean version of a file from the bzr repository""" # bzr cat command = ["bzr", "cat", self.location_abs] exitcode, output, error = run_command(command) if exitcode != 0: raise IOError("[BZR] cat failed for '%s': %s" % ( self.location_abs, error)) return output
mpl-2.0
devs1991/test_edx_docmode
common/djangoapps/auth_exchange/tests/utils.py
30
3916
""" Test utilities for OAuth access token exchange """ from social.apps.django_app.default.models import UserSocialAuth from third_party_auth.tests.utils import ThirdPartyOAuthTestMixin class AccessTokenExchangeTestMixin(ThirdPartyOAuthTestMixin): """ A mixin to define test cases for access token exchange. The following methods must be implemented by subclasses: * _assert_error(data, expected_error, expected_error_description) * _assert_success(data, expected_scopes) """ def setUp(self): # pylint: disable=arguments-differ super(AccessTokenExchangeTestMixin, self).setUp() # Initialize to minimal data self.data = { "access_token": self.access_token, "client_id": self.client_id, } def _assert_error(self, _data, _expected_error, _expected_error_description): """ Given request data, execute a test and check that the expected error was returned (along with any other appropriate assertions). """ raise NotImplementedError() def _assert_success(self, data, expected_scopes): """ Given request data, execute a test and check that the expected scopes were returned (along with any other appropriate assertions). """ raise NotImplementedError() def _create_client(self): """ Create an oauth2 client application using class defaults. """ return self.create_public_client(self.user, self.client_id) def test_minimal(self): self._setup_provider_response(success=True) self._assert_success(self.data, expected_scopes=[]) def test_scopes(self): self._setup_provider_response(success=True) self.data["scope"] = "profile email" self._assert_success(self.data, expected_scopes=["profile", "email"]) def test_missing_fields(self): for field in ["access_token", "client_id"]: data = dict(self.data) del data[field] self._assert_error(data, "invalid_request", "{} is required".format(field)) def test_invalid_client(self): self.data["client_id"] = "nonexistent_client" self._assert_error( self.data, "invalid_client", "nonexistent_client is not a valid client_id" ) def test_confidential_client(self): self.data['client_id'] += '_confidential' self.oauth_client = self.create_confidential_client(self.user, self.data['client_id']) self._assert_error( self.data, "invalid_client", "{}_confidential is not a public client".format(self.client_id), ) def test_inactive_user(self): self.user.is_active = False self.user.save() # pylint: disable=no-member self._setup_provider_response(success=True) self._assert_success(self.data, expected_scopes=[]) def test_invalid_acess_token(self): self._setup_provider_response(success=False) self._assert_error(self.data, "invalid_grant", "access_token is not valid") def test_no_linked_user(self): UserSocialAuth.objects.all().delete() self._setup_provider_response(success=True) self._assert_error(self.data, "invalid_grant", "access_token is not valid") def test_user_automatically_linked_by_email(self): UserSocialAuth.objects.all().delete() self._setup_provider_response(success=True, email=self.user.email) self._assert_success(self.data, expected_scopes=[]) def test_inactive_user_not_automatically_linked(self): UserSocialAuth.objects.all().delete() self._setup_provider_response(success=True, email=self.user.email) self.user.is_active = False self.user.save() # pylint: disable=no-member self._assert_error(self.data, "invalid_grant", "access_token is not valid")
agpl-3.0
ap3h3ad/roll20-character-sheets
Ars_Magica_5th/arm5_py_integration/__init__.py
8
22058
"""Module for providing the parts in the template.html file""" import csv from pathlib import Path import markdown from bs4 import BeautifulSoup as soup from .helpers import ( CHARACTERISTICS, FORMS, TECHNIQUES, enumerate_helper, repeat_template, ) from .translations import translation_attrs, translation_attrs_setup # Xp helper def xp( name: str, *, suffix="_exp", adv_suffix="_advancementExp", tot_suffix="_totalExp" ) -> str: """ Generate the HTML for the Xp parts of arts & abilities """ return f"""[<input type="text" class="number_3" name="attr_{name}{suffix}" value="0"/>/<input type="text" class="number_3 advance" name="attr_{name}{adv_suffix}" value="0" readonly/>/<input type="text" class="number_3 total" name="attr_{name}{tot_suffix}" value="0" readonly/>]""" def alert(title: str, text: str, *, level: str = "warning", ID: str = None): """ Generate the HTML to display a banner that can be permanently hidden This is used to inform player of important changes in updates. Arguments: text: Main text of the banner title: Title of the banner type: On of "warning", "info". The aspect of the banner ID: optional string ID of this banner, if you need to check if it is open/closed somewhere. Do NOT use numbers """ if not level in ("info", "warning"): raise ValueError("Level must be among 'info', 'warning'") if ID is None: alert_id = alert.numid alert.numid += 1 else: alert_id = str(ID) alert.strid.append(alert_id) return f"""<input type="hidden" class="alert-hidder" name="attr_alert-{alert_id}" value="0"/> <div class="alert alert-{level}"> <div> <h3> {level.title()} - {title}</h3> {text} </div> <label class="fakebutton"> <input type="checkbox" name="attr_alert-{alert_id}" value="1" /> × </label> </div>""" # python supports attributes on function # we use that to store the internal global variable used by the function alert.numid = 0 alert.strid = [] def disable_old_alerts(marker: str): lines = ",\n ".join( f'"alert-{i}": 1' for i in list(range(alert.numid)) + alert.strid ) return f"""setAttrs({{ "{marker}": 1, {lines} }}); """ # Add new parts to this dictionary # parts can be defined in other modules and imported if the generating # code is long GLOBALS = { # makes the module available "markdown": markdown, # makes those function available in the HTML "xp": xp, "alert": alert, "disable_old_alerts": disable_old_alerts, # Makes those values available in the HTML "translation_attrs": translation_attrs, "translation_attrs_setup": translation_attrs_setup, "html_header": "<!-- DO NOT MODIFY !\nThis file is automatically generated from a template. Any change will be overwritten\n-->", "css_header": "/* DO NOT MODIFY !\nThis file is automatically generated from a tempalte. Any change will be overwritten\n*/", } # Personality traits GLOBALS["personality_trait_rows"] = repeat_template( """<tr> <td><input type="text" class="heading_2" style="width:245px" name="attr_Personality_Trait%%"/></td> <td><input type="text" class="number_1" style="width:70px;" name="attr_Personality_Trait%%_score"/></td> <td><div class="flex-container"> <button type="roll" class="button simple-roll" name="roll_personality%%_simple" value="&{template:generic} {{Banner=^{personality} ^{roll}}} {{Label=@{Personality_Trait%%}}} {{Result=[[@{simple-die} + [[@{Personality_Trait%%_Score}]] [@{Personality_Trait%%}] + (?{@{circumstantial_i18n}|0}) [@{circumstances_i18n}] ]]}} "></button> <button type="roll" class="button stress-roll" name="roll_personality%%_stress" value="&{template:generic} {{Banner=^{personality} ^{roll}}} {{Label=@{Personality_Trait%%}}} {{Result=[[@{stress-die} + [[@{Personality_Trait%%_Score}]] [@{Personality_Trait%%}] + (?{@{circumstantial_i18n}|0}) [@{circumstances_i18n}] ]]}} {{stress=1}} {{botch-button=[@{botch_i18n}!](~@{character_name}|botch)}} {{crit-button=[@{critical_i18n}!](~@{character_name}|critical)}}"></button> </div></td> </tr>""", range(1, 7), ) # Reputations GLOBALS["reputation_rows"] = repeat_template( """<tr> <td><input type="text" class="heading_2" name="attr_Reputations%%"/></td> <td><input type="text" class="heading_2a" name="attr_Reputations%%_type"/></td> <td><input type="text" class="number_1" style="width:50px;" name="attr_Reputations%%_score"/></td> <td><div class="flex-container"> <button type="roll" class="button simple-roll" name="roll_reputation%%_simple" value="&{template:generic} {{Banner=^{reputation} ^{roll}}} {{Label=@{Reputations%%}}} {{Result=[[@{simple-die} + [[@{Reputations%%_Score}]] [@{Reputations%%}] + (?{@{circumstantial_i18n}|0}) [@{circumstances_i18n}] ]] }}"></button> <button type="roll" class="button stress-roll" name="roll_reputation%%_stress" value="&{template:generic} {{Banner=^{reputation} ^{roll}}} {{Label=@{Reputations%%}}} {{Result=[[@{stress-die} + [[@{Reputations%%_Score}]] [@{Reputations%%}] + (?{@{circumstantial_i18n}|0}) [@{circumstances_i18n}]]] }} {{stress=1}} {{botch-button=[@{botch_i18n}!](~@{character_name}|botch)}} {{crit-button=[@{critical_i18n}!](~@{character_name}|critical)}}"></button> </div></td> </tr>""", range(1, 7), ) # Characteristics definitions characteristic_roll = "(@{%(Char)s_Score}) [@{%(char)s_i18n}] + (@{wound_total}) [@{wounds_i18n}] + ([[floor(@{Fatigue})]]) [@{fatigue_i18n}] + (?{@{circumstantial_i18n}|0}) [@{circumstances_i18n}]" GLOBALS["characteristic_rows"] = repeat_template( """<tr> <th data-i18n="%(char)s" >%(Char)s</th> <td><input type="text" class="heading_2" name="attr_%(Char)s_Description"/></td> <td><input type="text" class="number_1" name="attr_%(Char)s_Score" value="0"/></td> <td><input type="text" class="number_1" name="attr_%(Char)s_Aging" value="0"/></td> <td><div class="flex-container"> <button type="roll" class="button simple-roll" name="roll_%(Char)s_simple" value="&{template:ability} {{name= @{character_name}}} {{label0=^{%(char)s}}} {{banner=@{%(Char)s_Description}}} {{label1=^{score}}} {{result1=@{%(Char)s_Score}}} {{label2=^{characteristic-m}}} {{label2=^{weakness-m}}} {{result2=[[[[floor(@{Fatigue})]][@{fatigue_i18n}] + @{wound_total}[@{wounds_i18n}]]]}} {{label3=^{circumstances-m}}} {{result3=[[(?{@{circumstantial_i18n}|0})]]}} {{result0=[[ @{simple-die} + $characteristic_roll$ ]]}}"></button> <button type="roll" class="button stress-roll" name="roll_%(Char)s_stress" value="&{template:ability} {{name= @{character_name}}} {{label0=^{%(char)s}}} {{banner=@{%(Char)s_Description}}} {{label1=^{score}}} {{result1=@{%(Char)s_Score}}} {{label2=^{characteristic-m}}} {{label2=^{weakness-m}}} {{result2=[[[[floor(@{Fatigue})]][@{fatigue_i18n}] + @{wound_total}[@{wounds_i18n}]]]}} {{label3=^{circumstances-m}}} {{result3=[[(?{@{circumstantial_i18n}|0})]]}} {{result0=[[ @{stress-die} + $characteristic_roll$ ]]}} {{stress=1}} {{botch-button=[@{botch_i18n}!](~@{character_name}|botch)}} {{crit-button=[@{critical_i18n}!](~@{character_name}|critical)}}"></button> </div></td> </tr>""".replace( "$characteristic_roll$", characteristic_roll ), CHARACTERISTICS, str_key="char", ) # Characteristic options GLOBALS["characteristic_score_options"] = repeat_template( """<option value="@{%(Char)s_Score}" data-i18n="%(char)s" >%(Char)s</option>""", CHARACTERISTICS, str_key="char", ) GLOBALS["characteristic_score_ask"] = ( "?{@{characteristic_i18n}|" + "| ".join( "@{%(char)s_i18n}, @{%(Char)s_Score} [@{%(char)s_i18n}]" % {"char": char, "Char": char.capitalize()} for char in CHARACTERISTICS ) + "}" ) GLOBALS["characteristic_name_options"] = repeat_template( """<option value="%(Char)s" data-i18n="%(char)s" >%(Char)s</option>""", CHARACTERISTICS, str_key="char", ) GLOBALS["characteristic_name_ask_attr"] = ( "?{@{characteristic_i18n}|" + "| ".join( "@{%(char)s_i18n},@{%(char)s_Score} [@{%(char)s_i18n}]" % {"char": char} for char in CHARACTERISTICS ) + "}" ) # Abilities ability_roll_template = "&{template:ability} {{name=@{character_name}}} {{label0=@{Ability_name}}} {{banner=@{Ability_Speciality}}} {{label1=^{rank}}} {{result1= [[ @{Ability_Score} + @{Ability_Puissant} ]]}} {{label2=@{Ability_CharacName}}} {{result2=[[@{sys_at}@{character_name}@{sys_pipe}@{Ability_CharacName}_Score@{sys_rbk}]]}} {{label3=^{weakness-m}}} {{result3=[[ ([[floor(@{Fatigue})]]) [@{fatigue_i18n}] + (@{wound_total}) [@{wounds_i18n}] ]]}} {{label4=^{circumstances-m}}} {{result4=[[(?{@{circumstantial_i18n}|0})]]}} {{result0=%(roll)s}} {{botch-button=[@{botch_i18n}!](~@{character_name}|botch)}} {{crit-button=[@{critical_i18n}!](~@{character_name}|critical)}}" ability_roll = "[[ %(die)s + (@{Ability_Score} + @{Ability_Puissant}) [@{Ability_name}] + (@{sys_at}@{character_name}@{sys_pipe}@{Ability_CharacName}_Score@{sys_rbk}) [@{sys_at}@{character_name}@{sys_pipe}@{Ability_CharacName}_i18n@{sys_rbk}] + (@{wound_total}) [@{wounds_i18n}] + ([[floor(@{Fatigue})]]) [@{fatigue_i18n}] + (?{@{circumstantial_i18n}|0}) [@{circumstances_i18n}] ]]" GLOBALS["ability_roll_simple"] = ability_roll_template % { "roll": ability_roll % {"die": "@{simple-die}"} } GLOBALS["ability_roll_stress"] = ( ability_roll_template % {"roll": ability_roll % {"die": "@{stress-die}"}} ) + " {{stress=1}}" # Technique definitions GLOBALS["technique_definitions"] = repeat_template( """<tr> <td><input type="text" class="number_3" name="attr_%(Tech)s_Score" value="0"/></td> <td data-i18n="%(tech)s" >%(Tech)s</td> <td>""" + xp("%(Tech)s") + """</td> <td style="text-align: center"><input type="text" class="number_3 minor" name="attr_%(Tech)s_Puissant" value="0"/></td> </tr>""", TECHNIQUES, str_key="tech", ) # Technique options GLOBALS["technique_score_options"] = repeat_template( """<option value="(@{%(Tech)s_Score} + @{%(Tech)s_Puissant}) [@{%(tech)s_i18n}]" data-i18n="%(tech)s" >%(Tech)s</option>""", TECHNIQUES, str_key="tech", ) GLOBALS["technique_score_options_unlabeled"] = repeat_template( """<option value="@{%(Tech)s_Score} + @{%(Tech)s_Puissant}" data-i18n="%(tech)s" >%(Tech)s</option>""", TECHNIQUES, str_key="tech", ) GLOBALS["technique_name_options"] = repeat_template( """<option value="%(Tech)s" data-i18n="%(tech)s" >%(Tech)s</option>""", TECHNIQUES, str_key="tech", ) GLOBALS["technique_enumerated_options"] = repeat_template( """<option value="%(index)s" data-i18n="%(tech)s" >%(Tech)s</option>""", enumerate_helper(TECHNIQUES, [str.capitalize], start=1), tuple_keys=("index", "tech", "Tech"), ) # Form definitions form_template = ( """<tr> <td><input type="text" class="number_3" name="attr_%(Form)s_Score" value="0"/></td> <td data-i18n="%(form)s" >%(Form)s</td> <td>""" + xp("%(Form)s") + """</td> <td style="text-align: center"><input type="text" class="number_3 minor" name="attr_%(Form)s_Puissant" value="0"/></td> </tr>""" ) GLOBALS["form_definitions_1"] = repeat_template( form_template, FORMS[:5], str_key="form" ) GLOBALS["form_definitions_2"] = repeat_template( form_template, FORMS[5:], str_key="form" ) # Form options GLOBALS["form_score_options"] = repeat_template( """<option value="(@{%(Form)s_Score} + @{%(Form)s_Puissant}) [@{%(form)s_i18n}]" data-i18n="%(form)s" >%(Form)s</option>""", FORMS, str_key="form", ) GLOBALS["form_score_options_unlabeled"] = repeat_template( """<option value="@{%(Form)s_Score} + @{%(Form)s_Puissant}" data-i18n="%(form)s" >%(Form)s</option>""", FORMS, str_key="form", ) GLOBALS["form_name_options"] = repeat_template( """<option value="%(Form)s" data-i18n="%(form)s" >%(Form)s</option>""", FORMS, str_key="form", ) GLOBALS["form_enumerated_options"] = repeat_template( """<option value="%(index)s" data-i18n="%(form)s" >%(Form)s</option>""", enumerate_helper(FORMS, [str.capitalize], start=1), tuple_keys=("index", "form", "Form"), ) # Casting rolls ## Magic tab spontaneous_roll_template = "&{template:arcane} {{label0=^{spontaneous} ^{casting}}} {{result0=%(roll)s}} {{label1=^{aura}}} {{result1=@{aura}}} {{label2=^{weakness-m}}} {{result2=[[ @{wound_total}[@{wounds_i18n}] + [[floor(@{fatigue})]][@{fatigue_i18n}] ]]}} {{label3=^{circumstances-m}}} {{result3=?{@{modifiers_i18n}|0}}} {{botch-button=[@{botch_i18n}!](~@{character_name}|botch)}} {{crit-button=[@{critical_i18n}!](~@{character_name}|critical-spontaneous)}}" spontaneous_roll = "[[(%(die)s + @{Spontaneous1_Technique} + @{Spontaneous1_Form} + ([[@{Spontaneous1_Focus}]]) [@{focus_i18n}] + (@{gestures}) + (@{words}) + (@{Stamina_Score}) [@{stamina_i18n}] + (@{aura}) [@{aura_i18n}] + ([[floor(@{Fatigue})]]) [@{fatigue_i18n}] + (@{wound_total}) [@{wounds_i18n}] + (?{@{modifiers_i18n}|0}) [@{modifiers_i18n}] )/2 ]]" GLOBALS["spontaneous_roll_simple"] = spontaneous_roll_template % { "roll": spontaneous_roll % {"die": "@{simple-die}"} } GLOBALS["spontaneous_roll_stress"] = ( spontaneous_roll_template % {"roll": spontaneous_roll % {"die": "@{stress-die}"}} ) + " {{stress=1}}" ceremonial_roll_template = "&{template:arcane} {{label0=^{ceremonial} ^{casting}}} {{result0= %(roll)s }} {{label1=^{aura}}} {{result1=@{aura}}} {{label2=^{weakness-m}}} {{result2=[[@{wound_total}[@{wounds_i18n}] + [[floor(@{fatigue})]][@{fatigue_i18n}] ]]}} {{label3=^{circumstances-m}}} {{result3=?{@{modifiers_i18n}|0}}} {{botch-button=[@{botch_i18n}!](~@{character_name}|botch)}} {{crit-button=[@{critical_i18n}!](~@{character_name}|critical-spontaneous)}}" ceremonial_roll = "[[(%(die)s+ @{Ceremonial_Technique} + @{Ceremonial_Form} + ([[@{Ceremonial_Focus}]]) [@{focus_i18n}] + (@{gestures}) + (@{words}) + (@{Stamina_Score}) [@{stamina_i18n}] + (@{aura}) [@{aura_i18n}] + ([[floor(@{Fatigue})]]) [@{fatigue_i18n}] + (@{wound_total}) [@{wounds_i18n}] + (@{Ceremonial_Artes_Lib}) [@{artes_i18n}] + (@{Ceremonial_Philos}) [@{philos_i18n}] + (?{@{modifiers_i18n}|0}) [@{modifiers_i18n}] )/2 ]]" GLOBALS["ceremonial_roll_simple"] = ceremonial_roll_template % { "roll": ceremonial_roll % {"die": "@{simple-die}"} } GLOBALS["ceremonial_roll_stress"] = ( ceremonial_roll_template % {"roll": ceremonial_roll % {"die": "@{stress-die}"}} ) + " {{stress=1}}" formulaic_roll_template = "&{template:arcane} {{label0=^{formulaic} ^{casting}}} {{result0= %(roll)s }} {{label1=^{aura}}} {{result1=@{aura}}} {{label2=^{weakness-m}}} {{result2=[[@{wound_total}[@{wounds_i18n}] + [[floor(@{fatigue})]][@{fatigue_i18n}] ]]}} {{label3=^{circumstances-m}}} {{result3=?{@{modifiers_i18n}|0}}} {{botch-button=[@{botch_i18n}!](~@{character_name}|botch)}} {{crit-button=[@{critical_i18n}!](~@{character_name}|critical)}}" formulaic_roll = "[[%(die)s + @{Formulaic_Technique} + @{Formulaic_Form} + ([[@{Formulaic_Focus}]]) [@{focus_i18n}] + (@{gestures}) + (@{words}) + (@{Stamina_Score}) [@{stamina_i18n}] + (@{aura}) [@{aura_i18n}] + ([[floor(@{Fatigue})]]) [@{fatigue_i18n}] + (@{wound_total}) [@{wounds_i18n}] + (?{@{modifiers_i18n}|0}) [@{modifiers_i18n}] ]]" GLOBALS["formulaic_roll_simple"] = formulaic_roll_template % { "roll": formulaic_roll % {"die": "@{simple-die}"} } GLOBALS["formulaic_roll_stress"] = ( formulaic_roll_template % {"roll": formulaic_roll % {"die": "@{stress-die}"}} ) + " {{stress=1}}" ritual_roll_template = "&{template:arcane} {{label0=^{ritual} ^{casting}}} {{result0= %(roll)s }} {{label1=^{aura}}} {{result1=@{aura}}} {{label2=^{weakness-m}}} {{result2=[[ @{wound_total}[@{wounds_i18n}] + [[floor(@{fatigue})]][@{fatigue_i18n}] ]]}} {{label3=^{circumstances-m}}} {{result3=?{@{modifiers_i18n}|0}}} {{botch-button=[@{botch_i18n}!](~@{character_name}|botch)}} {{crit-button=[@{critical_i18n}!](~@{character_name}|critical)}}" ritual_roll = "[[%(die)s + @{Ritual_Technique} + @{Ritual_Form} + ([[@{Ritual_Focus}]]) [@{focus_i18n}] + (@{Stamina_Score}) [@{stamina_i18n}] + (@{aura}) [@{aura_i18n}] + (@{Ritual_Artes_Lib}) [@{artes_i18n}] + (@{Ritual_Philos}) [@{philos_i18n}] + (@{wound_total}) [@{wounds_i18n}] + ([[floor(@{fatigue})]]) [@{fatigue_i18n}] + (?{@{modifiers_i18n}|0}) [@{modifiers_i18n}] ]]" GLOBALS["ritual_roll_simple"] = ritual_roll_template % { "roll": ritual_roll % {"die": "@{simple-die}"} } GLOBALS["ritual_roll_stress"] = ( ritual_roll_template % {"roll": ritual_roll % {"die": "@{stress-die}"}} ) + " {{stress=1}}" ## Spells # Deferred attribute access to get the spell's technique value spell_tech_value = "(@{sys_at}@{character_name}@{sys_pipe}@{spell_tech_name}_Score@{sys_rbk} + @{sys_at}@{character_name}@{sys_pipe}@{spell_tech_name}_Puissant@{sys_rbk}) [@{sys_at}@{character_name}@{sys_pipe}@{spell_tech_name}_i18n@{sys_rbk}]" spell_form_value = "(@{sys_at}@{character_name}@{sys_pipe}@{spell_form_name}_Score@{sys_rbk} + @{sys_at}@{character_name}@{sys_pipe}@{spell_form_name}_Puissant@{sys_rbk}) [@{sys_at}@{character_name}@{sys_pipe}@{spell_form_name}_i18n@{sys_rbk}]" # Export the deferred attribute access for use in the HTML since the focus depends on them GLOBALS["spell_tech_value"] = spell_tech_value GLOBALS["spell_form_value"] = spell_form_value spell_roll_template = "&{template:spell} {{spell= @{spell_name}}} {{character= @{character_name} }} {{sigil=@{sigil}}} {{roll= %(roll)s }} {{range= @{spell_range} }} {{duration= @{spell_duration} }} {{target= @{spell_target} }} {{effect= @{spell_note} }} {{mastery= @{spell_note-2} }} {{Technique= @{sys_at}@{character_name}@{sys_pipe}@{spell_tech_name}_i18n@{sys_rbk} }} {{Form= @{sys_at}@{character_name}@{sys_pipe}@{spell_form_name}_i18n@{sys_rbk} }} {{Level= @{spell_level} }} {{botch-button=[@{botch_i18n}!](~@{character_name}|botch)}} {{crit-button=[@{critical_i18n}!](~@{character_name}|critical)}}" spell_roll = ( "[[%(die)s + (@{Stamina_Score}) [@{stamina_i18n}] + " + spell_tech_value + " + " + spell_form_value + "+ ([[@{spell_Focus}]]) [@{focus_i18n}] + (@{spell_bonus}) [@{bonus_i18n}] + (@{gestures}) + (@{words}) + (@{aura}) [@{aura_i18n}] + ([[floor(@{Fatigue})]]) [@{fatigue_i18n}] + (@{wound_total}) [@{wounds_i18n}] + (?{@{modifiers_i18n}|0}) [@{modifiers_i18n}] ]]" ) GLOBALS["spell_roll_simple"] = spell_roll_template % { "roll": spell_roll % {"die": "@{simple-die}"} } GLOBALS["spell_roll_stress"] = ( spell_roll_template % {"roll": spell_roll % {"die": "@{stress-die}"}} ) + " {{stress=1}}" # Botch formula GLOBALS["botch_separate"] = ( "&{template:botch} {{roll= " + ( "?{@{botch_num_i18n} | " + "|".join( f"{n} {'Die' if n==1 else 'Dice'}," + " ".join(["[[1d10cf10cs0]]"] * n) for n in range(1, 9) ) + "}" ) + " }} {{type=Grouped}}" ) # Fatigue add_fatigue_lvl_num = 10 GLOBALS["fatigue_levels_options"] = repeat_template( """<option value="%%">%%</option>""", range(0, add_fatigue_lvl_num + 1) ) GLOBALS["additional_fatigue_levels"] = repeat_template( """<tr class="addfatigue-%(num)s"> <td><input type="radio" class="radio_1" name="attr_Fatigue" value="%(value)s"><span></span></td> <td style="text-align:center;">0</td> <td>2 min.</td> <td data-i18n="winded" >Winded</td> </tr>""", [(str(i), str(i / 1000)) for i in range(1, add_fatigue_lvl_num + 1)], tuple_keys=("num", "value"), ) GLOBALS["fatigue_level_css"] = "\n".join( ( # IF the fatigue selector is not on a value for which the level is visible "".join( ':not(.sheet-fatigue-proxy[value="%s"])' % val for val in range(lvl, add_fatigue_lvl_num + 1) ) # THEN hide the level + (" + table tr.sheet-addfatigue-%s" % lvl) + " {\n display: none;\n}" ) for lvl in range(1, add_fatigue_lvl_num + 1) ) # Documentation with open(Path(__file__).parents[1] / "documentation.md") as f: html = markdown.markdown("".join(f)) html = soup(html, "html.parser") for i in range(1, 10): for tag in html.find_all(f"h{i}"): tag.attrs["class"] = tag.get("class", "") + " heading_label" GLOBALS["documentation"] = html.prettify() # Rolltemplate ## Custom rolltemplate colors with open(Path(__file__).parent / "css_colors.csv", newline="") as f: reader = csv.DictReader(f) css_rules = [] for color_def in reader: # Base CSS rules lines_header = [ f".sheet-rolltemplate-custom .sheet-crt-container.sheet-crt-color-{color_def['color']} {{", f" --header-bg-color: {color_def['hex']};", ] lines_rolls = [ f".sheet-rolltemplate-custom .sheet-crt-container.sheet-crt-rlcolor-{color_def['color']} .inlinerollresult {{", f" --roll-bg-color: {color_def['hex']};", ] lines_buttons = [ f".sheet-rolltemplate-custom .sheet-crt-container.sheet-crt-btcolor-{color_def['color']} a {{", f" --button-bg-color: {color_def['hex']};", ] # Adapt text color to background color hex = color_def["hex"].lstrip("#") r, g, b = tuple(int(hex[2 * i : 2 * i + 2], 16) / 255 for i in range(3)) # Assuming sRGB -> Luma # may need fixing, color spaces are confusing luma = 0.2126 * r + 0.7152 * g + 0.0722 * b if luma > 0.5: # arbitrary threshold # switch to black text if luma is high enough lines_header.append(" --header-text-color: #000;") lines_buttons.append(" --button-text-color: #000;") if luma < 0.5: lines_rolls.append(" --roll-text-color: #FFF;") # Build the rules for lines in (lines_header, lines_rolls, lines_buttons): lines.append("}") css_rules.append("\n".join(lines)) GLOBALS["custom_rt_color_css"] = "\n".join(css_rules)
mit
rohitwaghchaure/vestasi-frappe
frappe/widgets/form/assign_to.py
17
4264
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals """assign/unassign to ToDo""" import frappe from frappe import _ from frappe.utils import cint from frappe.widgets.form.load import get_docinfo def get(args=None): """get assigned to""" if not args: args = frappe.local.form_dict get_docinfo(frappe.get_doc(args.get("doctype"), args.get("name"))) return frappe.db.sql("""select owner, description from `tabToDo` where reference_type=%(doctype)s and reference_name=%(name)s and status="Open" order by modified desc limit 5""", args, as_dict=True) @frappe.whitelist() def add(args=None): """add in someone's to do list args = { "assign_to": , "doctype": , "name": , "description": } """ if not args: args = frappe.local.form_dict if frappe.db.sql("""select owner from `tabToDo` where reference_type=%(doctype)s and reference_name=%(name)s and status="Open" and owner=%(assign_to)s""", args): frappe.msgprint(_("Already in user's To Do list"), raise_exception=True) return else: from frappe.utils import nowdate d = frappe.get_doc({ "doctype":"ToDo", "owner": args['assign_to'], "reference_type": args['doctype'], "reference_name": args['name'], "description": args.get('description'), "priority": args.get("priority", "Medium"), "status": "Open", "date": args.get('date', nowdate()), "assigned_by": args.get('assigned_by', frappe.user.name), }).insert(ignore_permissions=True) # set assigned_to if field exists if frappe.get_meta(args['doctype']).get_field("assigned_to"): frappe.db.set_value(args['doctype'], args['name'], "assigned_to", args['assign_to']) # notify if not args.get("no_notification"): notify_assignment(d.assigned_by, d.owner, d.reference_type, d.reference_name, action='ASSIGN', description=args.get("description"), notify=args.get('notify')) return get(args) @frappe.whitelist() def remove(doctype, name, assign_to): """remove from todo""" try: todo = frappe.get_doc("ToDo", {"reference_type":doctype, "reference_name":name, "owner":assign_to, "status":"Open"}) todo.status = "Closed" todo.save(ignore_permissions=True) notify_assignment(todo.assigned_by, todo.owner, todo.reference_type, todo.reference_name) except frappe.DoesNotExistError: pass # clear assigned_to if field exists if frappe.get_meta(doctype).get_field("assigned_to"): frappe.db.set_value(doctype, name, "assigned_to", None) return get({"doctype": doctype, "name": name}) def clear(doctype, name): for assign_to in frappe.db.sql_list("""select owner from `tabToDo` where reference_type=%(doctype)s and reference_name=%(name)s""", locals()): remove(doctype, name, assign_to) def notify_assignment(assigned_by, owner, doc_type, doc_name, action='CLOSE', description=None, notify=0): """ Notify assignee that there is a change in assignment """ if not (assigned_by and owner and doc_type and doc_name): return # self assignment / closing - no message if assigned_by==owner: return from frappe.boot import get_fullnames user_info = get_fullnames() # Search for email address in description -- i.e. assignee from frappe.utils import get_url_to_form assignment = get_url_to_form(doc_type, doc_name, label="%s: %s" % (doc_type, doc_name)) if action=='CLOSE': if owner == frappe.session.get('user'): arg = { 'contact': assigned_by, 'txt': _("The task %s, that you assigned to %s, has been closed.") % (assignment, user_info.get(owner, {}).get('fullname')) } else: arg = { 'contact': assigned_by, 'txt': _("The task %s, that you assigned to %s, has been closed by %s.") % (assignment, user_info.get(owner, {}).get('fullname'), user_info.get(frappe.session.get('user'), {}).get('fullname')) } else: arg = { 'contact': owner, 'txt': _("A new task, %s, has been assigned to you by %s. %s") % (assignment, user_info.get(frappe.session.get('user'), {}).get('fullname'), description and ("<p>" + _("Description") + ": " + description + "</p>") or ""), 'notify': notify } arg["parenttype"] = "Assignment" from frappe.core.page.messages import messages messages.post(**arg)
mit
benpetty/Code-Katas
katas/sort_cards/sort_cards.py
1
1347
"""Sort Cards. https://www.codewars.com/kata/56f399b59821793533000683 Write a function sort_cards() that sorts a shuffled list of cards, so that any given list of cards is sorted by rank, no matter the starting collection. All cards in the list are represented as strings, so that sorted list of cards looks like this: ['A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K'] Example: >>> sort_cards( ['3', '9', 'A', '5', 'T', '8', '2', '4', 'Q', '7', 'J', '6', 'K'] ['A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K'] Hint: Tests will have many occurrences of same rank cards, as well as vary in length. You can assume though, that input list is always going to have at least 1 element. """ def sort_cards(cards): """Input a list of strings representing cards and return them sorted.""" rank = { "A": 0, "2": 1, "3": 2, "4": 3, "5": 4, "6": 5, "7": 6, "8": 7, "9": 8, "T": 9, "J": 10, "Q": 11, "K": 12, } ranked = [] for card in cards: card = str(card).upper() if card in rank: card = (rank[card], card) ranked.append(card) ranked = sorted(ranked) result = [] for card in ranked: result.append(card[1]) return result
mit
sivaramakrishnansr/ryu
ryu/contrib/tinyrpc/protocols/jsonrpc.py
41
8589
#!/usr/bin/env python # -*- coding: utf-8 -*- from .. import RPCBatchProtocol, RPCRequest, RPCResponse, RPCErrorResponse,\ InvalidRequestError, MethodNotFoundError, ServerError,\ InvalidReplyError, RPCError, RPCBatchRequest, RPCBatchResponse import json class FixedErrorMessageMixin(object): def __init__(self, *args, **kwargs): if not args: args = [self.message] super(FixedErrorMessageMixin, self).__init__(*args, **kwargs) def error_respond(self): response = JSONRPCErrorResponse() response.error = self.message response.unique_id = None response._jsonrpc_error_code = self.jsonrpc_error_code return response class JSONRPCParseError(FixedErrorMessageMixin, InvalidRequestError): jsonrpc_error_code = -32700 message = 'Parse error' class JSONRPCInvalidRequestError(FixedErrorMessageMixin, InvalidRequestError): jsonrpc_error_code = -32600 message = 'Invalid Request' class JSONRPCMethodNotFoundError(FixedErrorMessageMixin, MethodNotFoundError): jsonrpc_error_code = -32601 message = 'Method not found' class JSONRPCInvalidParamsError(FixedErrorMessageMixin, InvalidRequestError): jsonrpc_error_code = -32602 message = 'Invalid params' class JSONRPCInternalError(FixedErrorMessageMixin, InvalidRequestError): jsonrpc_error_code = -32603 message = 'Internal error' class JSONRPCServerError(FixedErrorMessageMixin, InvalidRequestError): jsonrpc_error_code = -32000 message = '' class JSONRPCSuccessResponse(RPCResponse): def _to_dict(self): return { 'jsonrpc': JSONRPCProtocol.JSON_RPC_VERSION, 'id': self.unique_id, 'result': self.result, } def serialize(self): return json.dumps(self._to_dict()) class JSONRPCErrorResponse(RPCErrorResponse): def _to_dict(self): return { 'jsonrpc': JSONRPCProtocol.JSON_RPC_VERSION, 'id': self.unique_id, 'error': { 'message': str(self.error), 'code': self._jsonrpc_error_code, } } def serialize(self): return json.dumps(self._to_dict()) def _get_code_and_message(error): assert isinstance(error, (Exception, basestring)) if isinstance(error, Exception): if hasattr(error, 'jsonrpc_error_code'): code = error.jsonrpc_error_code msg = str(error) elif isinstance(error, InvalidRequestError): code = JSONRPCInvalidRequestError.jsonrpc_error_code msg = JSONRPCInvalidRequestError.message elif isinstance(error, MethodNotFoundError): code = JSONRPCMethodNotFoundError.jsonrpc_error_code msg = JSONRPCMethodNotFoundError.message else: # allow exception message to propagate code = JSONRPCServerError.jsonrpc_error_code msg = str(error) else: code = -32000 msg = error return code, msg class JSONRPCRequest(RPCRequest): def error_respond(self, error): if not self.unique_id: return None response = JSONRPCErrorResponse() code, msg = _get_code_and_message(error) response.error = msg response.unique_id = self.unique_id response._jsonrpc_error_code = code return response def respond(self, result): response = JSONRPCSuccessResponse() if not self.unique_id: return None response.result = result response.unique_id = self.unique_id return response def _to_dict(self): jdata = { 'jsonrpc': JSONRPCProtocol.JSON_RPC_VERSION, 'method': self.method, } if self.args: jdata['params'] = self.args if self.kwargs: jdata['params'] = self.kwargs if self.unique_id != None: jdata['id'] = self.unique_id return jdata def serialize(self): return json.dumps(self._to_dict()) class JSONRPCBatchRequest(RPCBatchRequest): def create_batch_response(self): if self._expects_response(): return JSONRPCBatchResponse() def _expects_response(self): for request in self: if isinstance(request, Exception): return True if request.unique_id != None: return True return False def serialize(self): return json.dumps([req._to_dict() for req in self]) class JSONRPCBatchResponse(RPCBatchResponse): def serialize(self): return json.dumps([resp._to_dict() for resp in self if resp != None]) class JSONRPCProtocol(RPCBatchProtocol): """JSONRPC protocol implementation. Currently, only version 2.0 is supported.""" JSON_RPC_VERSION = "2.0" _ALLOWED_REPLY_KEYS = sorted(['id', 'jsonrpc', 'error', 'result']) _ALLOWED_REQUEST_KEYS = sorted(['id', 'jsonrpc', 'method', 'params']) def __init__(self, *args, **kwargs): super(JSONRPCProtocol, self).__init__(*args, **kwargs) self._id_counter = 0 def _get_unique_id(self): self._id_counter += 1 return self._id_counter def create_batch_request(self, requests=None): return JSONRPCBatchRequest(requests or []) def create_request(self, method, args=None, kwargs=None, one_way=False): if args and kwargs: raise InvalidRequestError('Does not support args and kwargs at '\ 'the same time') request = JSONRPCRequest() if not one_way: request.unique_id = self._get_unique_id() request.method = method request.args = args request.kwargs = kwargs return request def parse_reply(self, data): try: rep = json.loads(data) except Exception as e: raise InvalidReplyError(e) for k in rep.iterkeys(): if not k in self._ALLOWED_REPLY_KEYS: raise InvalidReplyError('Key not allowed: %s' % k) if not 'jsonrpc' in rep: raise InvalidReplyError('Missing jsonrpc (version) in response.') if rep['jsonrpc'] != self.JSON_RPC_VERSION: raise InvalidReplyError('Wrong JSONRPC version') if not 'id' in rep: raise InvalidReplyError('Missing id in response') if ('error' in rep) == ('result' in rep): raise InvalidReplyError( 'Reply must contain exactly one of result and error.' ) if 'error' in rep: response = JSONRPCErrorResponse() error = rep['error'] response.error = error['message'] response._jsonrpc_error_code = error['code'] else: response = JSONRPCSuccessResponse() response.result = rep.get('result', None) response.unique_id = rep['id'] return response def parse_request(self, data): try: req = json.loads(data) except Exception as e: raise JSONRPCParseError() if isinstance(req, list): # batch request requests = JSONRPCBatchRequest() for subreq in req: try: requests.append(self._parse_subrequest(subreq)) except RPCError as e: requests.append(e) except Exception as e: requests.append(JSONRPCInvalidRequestError()) if not requests: raise JSONRPCInvalidRequestError() return requests else: return self._parse_subrequest(req) def _parse_subrequest(self, req): for k in req.iterkeys(): if not k in self._ALLOWED_REQUEST_KEYS: raise JSONRPCInvalidRequestError() if req.get('jsonrpc', None) != self.JSON_RPC_VERSION: raise JSONRPCInvalidRequestError() if not isinstance(req['method'], basestring): raise JSONRPCInvalidRequestError() request = JSONRPCRequest() request.method = str(req['method']) request.unique_id = req.get('id', None) params = req.get('params', None) if params != None: if isinstance(params, list): request.args = req['params'] elif isinstance(params, dict): request.kwargs = req['params'] else: raise JSONRPCInvalidParamsError() return request
apache-2.0
mfazliazran/raft
thirdparty/pdfminer/pdfminer/runlength.py
67
1666
#!/usr/bin/env python2 # # RunLength decoder (Adobe version) implementation based on PDF Reference # version 1.4 section 3.3.4. # # * public domain * # import sys def rldecode(data): """ RunLength decoder (Adobe version) implementation based on PDF Reference version 1.4 section 3.3.4: The RunLengthDecode filter decodes data that has been encoded in a simple byte-oriented format based on run length. The encoded data is a sequence of runs, where each run consists of a length byte followed by 1 to 128 bytes of data. If the length byte is in the range 0 to 127, the following length + 1 (1 to 128) bytes are copied literally during decompression. If length is in the range 129 to 255, the following single byte is to be copied 257 - length (2 to 128) times during decompression. A length value of 128 denotes EOD. >>> s = "\x05123456\xfa7\x04abcde\x80junk" >>> rldecode(s) '1234567777777abcde' """ decoded = [] i=0 while i < len(data): #print "data[%d]=:%d:" % (i,ord(data[i])) length = ord(data[i]) if length == 128: break if length >= 0 and length < 128: run = data[i+1:(i+1)+(length+1)] #print "length=%d, run=%s" % (length+1,run) decoded.append(run) i = (i+1) + (length+1) if length > 128: run = data[i+1]*(257-length) #print "length=%d, run=%s" % (257-length,run) decoded.append(run) i = (i+1) + 1 return ''.join(decoded) if __name__ == '__main__': import doctest doctest.testmod()
gpl-3.0
spcui/avocado-vt
virttest/libvirt_xml/devices/watchdog.py
21
1026
""" watchdog device support class(es) http://libvirt.org/formatdomain.html#elementsWatchdog """ from virttest.libvirt_xml import accessors from virttest.libvirt_xml.devices import base class Watchdog(base.UntypedDeviceBase): __slots__ = ('model_type', 'action', 'address') def __init__(self, virsh_instance=base.base.virsh): accessors.XMLAttribute('model_type', self, parent_xpath='/', tag_name='watchdog', attribute='model') accessors.XMLAttribute('action', self, parent_xpath='/', tag_name='watchdog', attribute='action') accessors.XMLElementDict('address', self, parent_xpath='/', tag_name='address') super(Watchdog, self).__init__(device_tag='watchdog', virsh_instance=virsh_instance)
gpl-2.0
ns950/calibre
src/calibre/gui2/convert/look_and_feel.py
13
5251
#!/usr/bin/env python2 # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai from __future__ import with_statement __license__ = 'GPL v3' __copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' from PyQt5.Qt import Qt from calibre.gui2.convert.look_and_feel_ui import Ui_Form from calibre.gui2.convert import Widget class LookAndFeelWidget(Widget, Ui_Form): TITLE = _('Look & Feel') ICON = I('lookfeel.png') HELP = _('Control the look and feel of the output') COMMIT_NAME = 'look_and_feel' FILTER_CSS = { 'fonts': {'font-family'}, 'margins': {'margin', 'margin-left', 'margin-right', 'margin-top', 'margin-bottom'}, 'padding': {'padding', 'padding-left', 'padding-right', 'padding-top', 'padding-bottom'}, 'floats': {'float'}, 'colors': {'color', 'background', 'background-color'}, } def __init__(self, parent, get_option, get_help, db=None, book_id=None): Widget.__init__(self, parent, ['change_justification', 'extra_css', 'base_font_size', 'font_size_mapping', 'line_height', 'minimum_line_height', 'embed_font_family', 'embed_all_fonts', 'subset_embedded_fonts', 'smarten_punctuation', 'unsmarten_punctuation', 'disable_font_rescaling', 'insert_blank_line', 'remove_paragraph_spacing', 'remove_paragraph_spacing_indent_size', 'insert_blank_line_size', 'input_encoding', 'filter_css', 'expand_css', 'asciiize', 'keep_ligatures', 'linearize_tables'] ) for val, text in [ ('original', _('Original')), ('left', _('Left align')), ('justify', _('Justify text')) ]: self.opt_change_justification.addItem(text, (val)) self.db, self.book_id = db, book_id self.initialize_options(get_option, get_help, db, book_id) self.opt_disable_font_rescaling.toggle() self.opt_disable_font_rescaling.toggle() self.button_font_key.clicked.connect(self.font_key_wizard) self.opt_remove_paragraph_spacing.toggle() self.opt_remove_paragraph_spacing.toggle() self.opt_smarten_punctuation.stateChanged.connect( lambda state: state != Qt.Unchecked and self.opt_unsmarten_punctuation.setCheckState(Qt.Unchecked)) self.opt_unsmarten_punctuation.stateChanged.connect( lambda state: state != Qt.Unchecked and self.opt_smarten_punctuation.setCheckState(Qt.Unchecked)) def get_value_handler(self, g): if g is self.opt_change_justification: ans = unicode(g.itemData(g.currentIndex()) or '') return ans if g is self.opt_filter_css: ans = set() for key, item in self.FILTER_CSS.iteritems(): w = getattr(self, 'filter_css_%s'%key) if w.isChecked(): ans = ans.union(item) ans = ans.union(set([x.strip().lower() for x in unicode(self.filter_css_others.text()).split(',')])) return ','.join(ans) if ans else None if g is self.opt_font_size_mapping: val = unicode(g.text()).strip() val = [x.strip() for x in val.split(',' if ',' in val else ' ') if x.strip()] return ', '.join(val) or None return Widget.get_value_handler(self, g) def set_value_handler(self, g, val): if g is self.opt_change_justification: for i in range(g.count()): c = unicode(g.itemData(i) or '') if val == c: g.setCurrentIndex(i) break return True if g is self.opt_filter_css: if not val: val = '' items = frozenset([x.strip().lower() for x in val.split(',')]) for key, vals in self.FILTER_CSS.iteritems(): w = getattr(self, 'filter_css_%s'%key) if not vals - items: items = items - vals w.setChecked(True) else: w.setChecked(False) self.filter_css_others.setText(', '.join(items)) return True def connect_gui_obj_handler(self, gui_obj, slot): if gui_obj is self.opt_filter_css: for key in self.FILTER_CSS: w = getattr(self, 'filter_css_%s'%key) w.stateChanged.connect(slot) self.filter_css_others.textChanged.connect(slot) return raise NotImplementedError() def font_key_wizard(self): from calibre.gui2.convert.font_key import FontKeyChooser d = FontKeyChooser(self, self.opt_base_font_size.value(), unicode(self.opt_font_size_mapping.text()).strip()) if d.exec_() == d.Accepted: self.opt_font_size_mapping.setText(', '.join(['%.1f'%x for x in d.fsizes])) self.opt_base_font_size.setValue(d.dbase)
gpl-3.0
kcpawan/django
tests/auth_tests/models/custom_permissions.py
295
1433
""" The CustomPermissionsUser users email as the identifier, but uses the normal Django permissions model. This allows us to check that the PermissionsMixin includes everything that is needed to interact with the ModelBackend. """ from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin from django.contrib.auth.tests.custom_user import ( CustomUserManager, RemoveGroupsAndPermissions, ) from django.db import models from django.utils.encoding import python_2_unicode_compatible class CustomPermissionsUserManager(CustomUserManager): def create_superuser(self, email, password, date_of_birth): u = self.create_user(email, password=password, date_of_birth=date_of_birth) u.is_superuser = True u.save(using=self._db) return u with RemoveGroupsAndPermissions(): @python_2_unicode_compatible class CustomPermissionsUser(AbstractBaseUser, PermissionsMixin): email = models.EmailField(verbose_name='email address', max_length=255, unique=True) date_of_birth = models.DateField() custom_objects = CustomPermissionsUserManager() USERNAME_FIELD = 'email' REQUIRED_FIELDS = ['date_of_birth'] class Meta: app_label = 'auth' def get_full_name(self): return self.email def get_short_name(self): return self.email def __str__(self): return self.email
bsd-3-clause
axbaretto/beam
sdks/python/.tox/lint/lib/python2.7/site-packages/nose/twistedtools.py
86
5525
""" Twisted integration ------------------- This module provides a very simple way to integrate your tests with the Twisted_ event loop. You must import this module *before* importing anything from Twisted itself! Example:: from nose.twistedtools import reactor, deferred @deferred() def test_resolve(): return reactor.resolve("www.python.org") Or, more realistically:: @deferred(timeout=5.0) def test_resolve(): d = reactor.resolve("www.python.org") def check_ip(ip): assert ip == "67.15.36.43" d.addCallback(check_ip) return d .. _Twisted: http://twistedmatrix.com/trac/ """ import sys from Queue import Queue, Empty from nose.tools import make_decorator, TimeExpired __all__ = [ 'threaded_reactor', 'reactor', 'deferred', 'TimeExpired', 'stop_reactor' ] _twisted_thread = None def threaded_reactor(): """ Start the Twisted reactor in a separate thread, if not already done. Returns the reactor. The thread will automatically be destroyed when all the tests are done. """ global _twisted_thread try: from twisted.internet import reactor except ImportError: return None, None if not _twisted_thread: from twisted.python import threadable from threading import Thread _twisted_thread = Thread(target=lambda: reactor.run( \ installSignalHandlers=False)) _twisted_thread.setDaemon(True) _twisted_thread.start() return reactor, _twisted_thread # Export global reactor variable, as Twisted does reactor, reactor_thread = threaded_reactor() def stop_reactor(): """Stop the reactor and join the reactor thread until it stops. Call this function in teardown at the module or package level to reset the twisted system after your tests. You *must* do this if you mix tests using these tools and tests using twisted.trial. """ global _twisted_thread def stop_reactor(): '''Helper for calling stop from withing the thread.''' reactor.stop() reactor.callFromThread(stop_reactor) reactor_thread.join() for p in reactor.getDelayedCalls(): if p.active(): p.cancel() _twisted_thread = None def deferred(timeout=None): """ By wrapping a test function with this decorator, you can return a twisted Deferred and the test will wait for the deferred to be triggered. The whole test function will run inside the Twisted event loop. The optional timeout parameter specifies the maximum duration of the test. The difference with timed() is that timed() will still wait for the test to end, while deferred() will stop the test when its timeout has expired. The latter is more desireable when dealing with network tests, because the result may actually never arrive. If the callback is triggered, the test has passed. If the errback is triggered or the timeout expires, the test has failed. Example:: @deferred(timeout=5.0) def test_resolve(): return reactor.resolve("www.python.org") Attention! If you combine this decorator with other decorators (like "raises"), deferred() must be called *first*! In other words, this is good:: @raises(DNSLookupError) @deferred() def test_error(): return reactor.resolve("xxxjhjhj.biz") and this is bad:: @deferred() @raises(DNSLookupError) def test_error(): return reactor.resolve("xxxjhjhj.biz") """ reactor, reactor_thread = threaded_reactor() if reactor is None: raise ImportError("twisted is not available or could not be imported") # Check for common syntax mistake # (otherwise, tests can be silently ignored # if one writes "@deferred" instead of "@deferred()") try: timeout is None or timeout + 0 except TypeError: raise TypeError("'timeout' argument must be a number or None") def decorate(func): def wrapper(*args, **kargs): q = Queue() def callback(value): q.put(None) def errback(failure): # Retrieve and save full exception info try: failure.raiseException() except: q.put(sys.exc_info()) def g(): try: d = func(*args, **kargs) try: d.addCallbacks(callback, errback) # Check for a common mistake and display a nice error # message except AttributeError: raise TypeError("you must return a twisted Deferred " "from your test case!") # Catch exceptions raised in the test body (from the # Twisted thread) except: q.put(sys.exc_info()) reactor.callFromThread(g) try: error = q.get(timeout=timeout) except Empty: raise TimeExpired("timeout expired before end of test (%f s.)" % timeout) # Re-raise all exceptions if error is not None: exc_type, exc_value, tb = error raise exc_type, exc_value, tb wrapper = make_decorator(func)(wrapper) return wrapper return decorate
apache-2.0
lwiecek/django
django/test/html.py
56
7943
""" Comparing two html documents. """ from __future__ import unicode_literals import re from django.utils import six from django.utils.encoding import force_text, python_2_unicode_compatible from django.utils.html_parser import HTMLParseError, HTMLParser WHITESPACE = re.compile('\s+') def normalize_whitespace(string): return WHITESPACE.sub(' ', string) @python_2_unicode_compatible class Element(object): def __init__(self, name, attributes): self.name = name self.attributes = sorted(attributes) self.children = [] def append(self, element): if isinstance(element, six.string_types): element = force_text(element) element = normalize_whitespace(element) if self.children: if isinstance(self.children[-1], six.string_types): self.children[-1] += element self.children[-1] = normalize_whitespace(self.children[-1]) return elif self.children: # removing last children if it is only whitespace # this can result in incorrect dom representations since # whitespace between inline tags like <span> is significant if isinstance(self.children[-1], six.string_types): if self.children[-1].isspace(): self.children.pop() if element: self.children.append(element) def finalize(self): def rstrip_last_element(children): if children: if isinstance(children[-1], six.string_types): children[-1] = children[-1].rstrip() if not children[-1]: children.pop() children = rstrip_last_element(children) return children rstrip_last_element(self.children) for i, child in enumerate(self.children): if isinstance(child, six.string_types): self.children[i] = child.strip() elif hasattr(child, 'finalize'): child.finalize() def __eq__(self, element): if not hasattr(element, 'name'): return False if hasattr(element, 'name') and self.name != element.name: return False if len(self.attributes) != len(element.attributes): return False if self.attributes != element.attributes: # attributes without a value is same as attribute with value that # equals the attributes name: # <input checked> == <input checked="checked"> for i in range(len(self.attributes)): attr, value = self.attributes[i] other_attr, other_value = element.attributes[i] if value is None: value = attr if other_value is None: other_value = other_attr if attr != other_attr or value != other_value: return False if self.children != element.children: return False return True def __hash__(self): return hash((self.name,) + tuple(a for a in self.attributes)) def __ne__(self, element): return not self.__eq__(element) def _count(self, element, count=True): if not isinstance(element, six.string_types): if self == element: return 1 i = 0 for child in self.children: # child is text content and element is also text content, then # make a simple "text" in "text" if isinstance(child, six.string_types): if isinstance(element, six.string_types): if count: i += child.count(element) elif element in child: return 1 else: i += child._count(element, count=count) if not count and i: return i return i def __contains__(self, element): return self._count(element, count=False) > 0 def count(self, element): return self._count(element, count=True) def __getitem__(self, key): return self.children[key] def __str__(self): output = '<%s' % self.name for key, value in self.attributes: if value: output += ' %s="%s"' % (key, value) else: output += ' %s' % key if self.children: output += '>\n' output += ''.join(six.text_type(c) for c in self.children) output += '\n</%s>' % self.name else: output += ' />' return output def __repr__(self): return six.text_type(self) @python_2_unicode_compatible class RootElement(Element): def __init__(self): super(RootElement, self).__init__(None, ()) def __str__(self): return ''.join(six.text_type(c) for c in self.children) class Parser(HTMLParser): SELF_CLOSING_TAGS = ( 'br', 'hr', 'input', 'img', 'meta', 'spacer', 'link', 'frame', 'base', 'col', ) def __init__(self): HTMLParser.__init__(self) self.root = RootElement() self.open_tags = [] self.element_positions = {} def error(self, msg): raise HTMLParseError(msg, self.getpos()) def format_position(self, position=None, element=None): if not position and element: position = self.element_positions[element] if position is None: position = self.getpos() if hasattr(position, 'lineno'): position = position.lineno, position.offset return 'Line %d, Column %d' % position @property def current(self): if self.open_tags: return self.open_tags[-1] else: return self.root def handle_startendtag(self, tag, attrs): self.handle_starttag(tag, attrs) if tag not in self.SELF_CLOSING_TAGS: self.handle_endtag(tag) def handle_starttag(self, tag, attrs): # Special case handling of 'class' attribute, so that comparisons of DOM # instances are not sensitive to ordering of classes. attrs = [ (name, " ".join(sorted(value.split(" ")))) if name == "class" else (name, value) for name, value in attrs ] element = Element(tag, attrs) self.current.append(element) if tag not in self.SELF_CLOSING_TAGS: self.open_tags.append(element) self.element_positions[element] = self.getpos() def handle_endtag(self, tag): if not self.open_tags: self.error("Unexpected end tag `%s` (%s)" % ( tag, self.format_position())) element = self.open_tags.pop() while element.name != tag: if not self.open_tags: self.error("Unexpected end tag `%s` (%s)" % ( tag, self.format_position())) element = self.open_tags.pop() def handle_data(self, data): self.current.append(data) def handle_charref(self, name): self.current.append('&%s;' % name) def handle_entityref(self, name): self.current.append('&%s;' % name) def parse_html(html): """ Takes a string that contains *valid* HTML and turns it into a Python object structure that can be easily compared against other HTML on semantic equivalence. Syntactical differences like which quotation is used on arguments will be ignored. """ parser = Parser() parser.feed(html) parser.close() document = parser.root document.finalize() # Removing ROOT element if it's not necessary if len(document.children) == 1: if not isinstance(document.children[0], six.string_types): document = document.children[0] return document
bsd-3-clause
Jaemu/haiku.py
yaml/events.py
985
2445
# Abstract classes. class Event(object): def __init__(self, start_mark=None, end_mark=None): self.start_mark = start_mark self.end_mark = end_mark def __repr__(self): attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] if hasattr(self, key)] arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) for key in attributes]) return '%s(%s)' % (self.__class__.__name__, arguments) class NodeEvent(Event): def __init__(self, anchor, start_mark=None, end_mark=None): self.anchor = anchor self.start_mark = start_mark self.end_mark = end_mark class CollectionStartEvent(NodeEvent): def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, flow_style=None): self.anchor = anchor self.tag = tag self.implicit = implicit self.start_mark = start_mark self.end_mark = end_mark self.flow_style = flow_style class CollectionEndEvent(Event): pass # Implementations. class StreamStartEvent(Event): def __init__(self, start_mark=None, end_mark=None, encoding=None): self.start_mark = start_mark self.end_mark = end_mark self.encoding = encoding class StreamEndEvent(Event): pass class DocumentStartEvent(Event): def __init__(self, start_mark=None, end_mark=None, explicit=None, version=None, tags=None): self.start_mark = start_mark self.end_mark = end_mark self.explicit = explicit self.version = version self.tags = tags class DocumentEndEvent(Event): def __init__(self, start_mark=None, end_mark=None, explicit=None): self.start_mark = start_mark self.end_mark = end_mark self.explicit = explicit class AliasEvent(NodeEvent): pass class ScalarEvent(NodeEvent): def __init__(self, anchor, tag, implicit, value, start_mark=None, end_mark=None, style=None): self.anchor = anchor self.tag = tag self.implicit = implicit self.value = value self.start_mark = start_mark self.end_mark = end_mark self.style = style class SequenceStartEvent(CollectionStartEvent): pass class SequenceEndEvent(CollectionEndEvent): pass class MappingStartEvent(CollectionStartEvent): pass class MappingEndEvent(CollectionEndEvent): pass
apache-2.0
NewpTone/stacklab-nova
nova/tests/compute/test_stats.py
8
7803
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2012 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for compute node stats""" from nova.compute import stats from nova.compute import task_states from nova.compute import vm_states from nova import db from nova import exception from nova import test class StatsTestCase(test.TestCase): def setUp(self): super(StatsTestCase, self).setUp() self.stats = stats.Stats() def _create_instance(self, values=None): instance = { "os_type": "Linux", "project_id": "1234", "task_state": None, "vm_state": vm_states.BUILDING, "vcpus": 1, "uuid": "12-34-56-78-90", } if values: instance.update(values) return instance def test_os_type_count(self): os_type = "Linux" self.assertEqual(0, self.stats.num_os_type(os_type)) self.stats._increment("num_os_type_" + os_type) self.stats._increment("num_os_type_" + os_type) self.stats._increment("num_os_type_Vax") self.assertEqual(2, self.stats.num_os_type(os_type)) self.stats["num_os_type_" + os_type] -= 1 self.assertEqual(1, self.stats.num_os_type(os_type)) def test_update_project_count(self): proj_id = "1234" def _get(): return self.stats.num_instances_for_project(proj_id) self.assertEqual(0, _get()) self.stats._increment("num_proj_" + proj_id) self.assertEqual(1, _get()) self.stats["num_proj_" + proj_id] -= 1 self.assertEqual(0, _get()) def test_instance_count(self): self.assertEqual(0, self.stats.num_instances) for i in range(5): self.stats._increment("num_instances") self.stats["num_instances"] -= 1 self.assertEqual(4, self.stats.num_instances) def test_add_stats_for_instance(self): instance = { "os_type": "Linux", "project_id": "1234", "task_state": None, "vm_state": vm_states.BUILDING, "vcpus": 3, "uuid": "12-34-56-78-90", } self.stats.update_stats_for_instance(instance) instance = { "os_type": "FreeBSD", "project_id": "1234", "task_state": task_states.SCHEDULING, "vm_state": None, "vcpus": 1, "uuid": "23-45-67-89-01", } self.stats.update_stats_for_instance(instance) instance = { "os_type": "Linux", "project_id": "2345", "task_state": task_states.SCHEDULING, "vm_state": vm_states.BUILDING, "vcpus": 2, "uuid": "34-56-78-90-12", } self.stats.update_stats_for_instance(instance) self.assertEqual(2, self.stats.num_os_type("Linux")) self.assertEqual(1, self.stats.num_os_type("FreeBSD")) self.assertEquals(2, self.stats.num_instances_for_project("1234")) self.assertEquals(1, self.stats.num_instances_for_project("2345")) self.assertEqual(1, self.stats["num_task_None"]) self.assertEqual(2, self.stats["num_task_" + task_states.SCHEDULING]) self.assertEqual(1, self.stats["num_vm_None"]) self.assertEqual(2, self.stats["num_vm_" + vm_states.BUILDING]) self.assertEqual(6, self.stats.num_vcpus_used) def test_calculate_workload(self): self.stats._increment("num_task_None") self.stats._increment("num_task_" + task_states.SCHEDULING) self.stats._increment("num_task_" + task_states.SCHEDULING) self.assertEqual(2, self.stats.calculate_workload()) def test_update_stats_for_instance_no_change(self): instance = self._create_instance() self.stats.update_stats_for_instance(instance) self.stats.update_stats_for_instance(instance) # no change self.assertEqual(1, self.stats.num_instances) self.assertEqual(1, self.stats.num_instances_for_project("1234")) self.assertEqual(1, self.stats["num_os_type_Linux"]) self.assertEqual(1, self.stats["num_task_None"]) self.assertEqual(1, self.stats["num_vm_" + vm_states.BUILDING]) def test_update_stats_for_instance_vm_change(self): instance = self._create_instance() self.stats.update_stats_for_instance(instance) instance["vm_state"] = vm_states.PAUSED self.stats.update_stats_for_instance(instance) self.assertEqual(1, self.stats.num_instances) self.assertEqual(1, self.stats.num_instances_for_project(1234)) self.assertEqual(1, self.stats["num_os_type_Linux"]) self.assertEqual(0, self.stats["num_vm_%s" % vm_states.BUILDING]) self.assertEqual(1, self.stats["num_vm_%s" % vm_states.PAUSED]) def test_update_stats_for_instance_task_change(self): instance = self._create_instance() self.stats.update_stats_for_instance(instance) instance["task_state"] = task_states.REBUILDING self.stats.update_stats_for_instance(instance) self.assertEqual(1, self.stats.num_instances) self.assertEqual(1, self.stats.num_instances_for_project("1234")) self.assertEqual(1, self.stats["num_os_type_Linux"]) self.assertEqual(0, self.stats["num_task_None"]) self.assertEqual(1, self.stats["num_task_%s" % task_states.REBUILDING]) def test_update_stats_for_instance_deleted(self): instance = self._create_instance() self.stats.update_stats_for_instance(instance) self.assertEqual(1, self.stats["num_proj_1234"]) instance["vm_state"] = vm_states.DELETED self.stats.update_stats_for_instance(instance) self.assertEqual(0, self.stats.num_instances) self.assertEqual(0, self.stats.num_instances_for_project("1234")) self.assertEqual(0, self.stats.num_os_type("Linux")) self.assertEqual(0, self.stats["num_vm_" + vm_states.BUILDING]) self.assertEqual(0, self.stats.num_vcpus_used) def test_io_workload(self): vms = [vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED] tasks = [task_states.RESIZE_MIGRATING, task_states.REBUILDING, task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT, task_states.IMAGE_BACKUP, task_states.RESCUING] for state in vms: self.stats._increment("num_vm_" + state) for state in tasks: self.stats._increment("num_task_" + state) self.assertEqual(6, self.stats.io_workload) def test_io_workload_saved_to_stats(self): values = {'task_state': task_states.RESIZE_MIGRATING} instance = self._create_instance(values) self.stats.update_stats_for_instance(instance) self.assertEqual(2, self.stats["io_workload"]) def test_clear(self): instance = self._create_instance() self.stats.update_stats_for_instance(instance) self.assertNotEqual(0, len(self.stats)) self.assertEqual(1, len(self.stats.states)) self.stats.clear() self.assertEqual(0, len(self.stats)) self.assertEqual(0, len(self.stats.states))
apache-2.0
ytaben/cyphesis
rulesets/deeds/world/tasks/Drying.py
3
2009
#This file is distributed under the terms of the GNU General Public license. #Copyright (C) 2011 Jekin Trivedi <jekintrivedi@gmail.com> (See the file COPYING for details). from atlas import * from physics import * from physics import Quaternion from physics import Vector3D import server class Drying(server.Task): """ A task for Drying grass to fibre.""" def cut_operation(self, op): """ Op handler for cut op which activates this task """ # print "Drying.cut" if len(op) < 1: sys.stderr.write("Drying task has no target in cut op") # FIXME Use weak references, once we have them self.target = server.world.get_object_ref(op[0].id) self.tool = op.to def tick_operation(self, op): """ Op handler for regular tick op """ # print "Drying.tick" if self.target() is None: # print "Target is no more" self.irrelevant() return self.rate = 0.2 / 0.75 self.progress += 0.5 if square_distance(self.character.location, self.target().location) > self.target().location.bbox.square_bounding_radius(): self.rate = 0 # print "Too far away" return self.next_tick(0.75) res=Oplist() if self.progress < 1: # print "Not done yet" return self.next_tick(0.75) self.progress = 0 chunk_loc = self.target().location.copy() chunk_loc.coordinates = self.target().location.coordinates chunk_loc.orientation = self.target().location.orientation create=Operation("create", Entity(name = "fibre", type = "fibre", location = chunk_loc), to = self.target()) res.append(create) set = Operation("set", Entity(self.target().id, status = -1), to = self.target()) res.append(set) res.append(self.next_tick(0.75)) return res
gpl-2.0
Gateworks/platform-external-chromium_org
tools/find_depot_tools.py
74
1374
# Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Small utility function to find depot_tools and add it to the python path. Will throw an ImportError exception if depot_tools can't be found since it imports breakpad. """ import os import sys def add_depot_tools_to_path(): """Search for depot_tools and add it to sys.path.""" # First look if depot_tools is already in PYTHONPATH. for i in sys.path: if i.rstrip(os.sep).endswith('depot_tools'): return i # Then look if depot_tools is in PATH, common case. for i in os.environ['PATH'].split(os.pathsep): if i.rstrip(os.sep).endswith('depot_tools'): sys.path.append(i.rstrip(os.sep)) return i # Rare case, it's not even in PATH, look upward up to root. root_dir = os.path.dirname(os.path.abspath(__file__)) previous_dir = os.path.abspath(__file__) while root_dir and root_dir != previous_dir: if os.path.isfile(os.path.join(root_dir, 'depot_tools', 'breakpad.py')): i = os.path.join(root_dir, 'depot_tools') sys.path.append(i) return i previous_dir = root_dir root_dir = os.path.dirname(root_dir) print >> sys.stderr, 'Failed to find depot_tools' return None add_depot_tools_to_path() # pylint: disable=W0611 import breakpad
bsd-3-clause
vmanoria/bluemix-hue-filebrowser
hue-3.8.1-bluemix/desktop/core/ext-py/Django-1.6.10/django/contrib/gis/gdal/error.py
466
1517
""" This module houses the OGR & SRS Exception objects, and the check_err() routine which checks the status code returned by OGR methods. """ #### OGR & SRS Exceptions #### class GDALException(Exception): pass class OGRException(Exception): pass class SRSException(Exception): pass class OGRIndexError(OGRException, KeyError): """ This exception is raised when an invalid index is encountered, and has the 'silent_variable_feature' attribute set to true. This ensures that django's templates proceed to use the next lookup type gracefully when an Exception is raised. Fixes ticket #4740. """ silent_variable_failure = True #### OGR error checking codes and routine #### # OGR Error Codes OGRERR_DICT = { 1 : (OGRException, 'Not enough data.'), 2 : (OGRException, 'Not enough memory.'), 3 : (OGRException, 'Unsupported geometry type.'), 4 : (OGRException, 'Unsupported operation.'), 5 : (OGRException, 'Corrupt data.'), 6 : (OGRException, 'OGR failure.'), 7 : (SRSException, 'Unsupported SRS.'), 8 : (OGRException, 'Invalid handle.'), } OGRERR_NONE = 0 def check_err(code): "Checks the given OGRERR, and raises an exception where appropriate." if code == OGRERR_NONE: return elif code in OGRERR_DICT: e, msg = OGRERR_DICT[code] raise e(msg) else: raise OGRException('Unknown error code: "%s"' % code)
gpl-2.0
roadmapper/ansible
test/support/windows-integration/plugins/become/runas.py
75
2457
# -*- coding: utf-8 -*- # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ become: runas short_description: Run As user description: - This become plugins allows your remote/login user to execute commands as another user via the windows runas facility. author: ansible (@core) version_added: "2.8" options: become_user: description: User you 'become' to execute the task ini: - section: privilege_escalation key: become_user - section: runas_become_plugin key: user vars: - name: ansible_become_user - name: ansible_runas_user env: - name: ANSIBLE_BECOME_USER - name: ANSIBLE_RUNAS_USER required: True become_flags: description: Options to pass to runas, a space delimited list of k=v pairs default: '' ini: - section: privilege_escalation key: become_flags - section: runas_become_plugin key: flags vars: - name: ansible_become_flags - name: ansible_runas_flags env: - name: ANSIBLE_BECOME_FLAGS - name: ANSIBLE_RUNAS_FLAGS become_pass: description: password ini: - section: runas_become_plugin key: password vars: - name: ansible_become_password - name: ansible_become_pass - name: ansible_runas_pass env: - name: ANSIBLE_BECOME_PASS - name: ANSIBLE_RUNAS_PASS notes: - runas is really implemented in the powershell module handler and as such can only be used with winrm connections. - This plugin ignores the 'become_exe' setting as it uses an API and not an executable. - The Secondary Logon service (seclogon) must be running to use runas """ from ansible.plugins.become import BecomeBase class BecomeModule(BecomeBase): name = 'runas' def build_become_command(self, cmd, shell): # runas is implemented inside the winrm connection plugin return cmd
gpl-3.0
lmprice/ansible
contrib/inventory/digital_ocean.py
23
21119
#!/usr/bin/env python """ DigitalOcean external inventory script ====================================== Generates Ansible inventory of DigitalOcean Droplets. In addition to the --list and --host options used by Ansible, there are options for generating JSON of other DigitalOcean data. This is useful when creating droplets. For example, --regions will return all the DigitalOcean Regions. This information can also be easily found in the cache file, whose default location is /tmp/ansible-digital_ocean.cache). The --pretty (-p) option pretty-prints the output for better human readability. ---- Although the cache stores all the information received from DigitalOcean, the cache is not used for current droplet information (in --list, --host, --all, and --droplets). This is so that accurate droplet information is always found. You can force this script to use the cache with --force-cache. ---- Configuration is read from `digital_ocean.ini`, then from environment variables, and then from command-line arguments. Most notably, the DigitalOcean API Token must be specified. It can be specified in the INI file or with the following environment variables: export DO_API_TOKEN='abc123' or export DO_API_KEY='abc123' Alternatively, it can be passed on the command-line with --api-token. If you specify DigitalOcean credentials in the INI file, a handy way to get them into your environment (e.g., to use the digital_ocean module) is to use the output of the --env option with export: export $(digital_ocean.py --env) ---- The following groups are generated from --list: - ID (droplet ID) - NAME (droplet NAME) - digital_ocean - image_ID - image_NAME - distro_NAME (distribution NAME from image) - region_NAME - size_NAME - status_STATUS For each host, the following variables are registered: - do_backup_ids - do_created_at - do_disk - do_features - list - do_id - do_image - object - do_ip_address - do_private_ip_address - do_kernel - object - do_locked - do_memory - do_name - do_networks - object - do_next_backup_window - do_region - object - do_size - object - do_size_slug - do_snapshot_ids - list - do_status - do_tags - do_vcpus - do_volume_ids ----- ``` usage: digital_ocean.py [-h] [--list] [--host HOST] [--all] [--droplets] [--regions] [--images] [--sizes] [--ssh-keys] [--domains] [--tags] [--pretty] [--cache-path CACHE_PATH] [--cache-max_age CACHE_MAX_AGE] [--force-cache] [--refresh-cache] [--env] [--api-token API_TOKEN] Produce an Ansible Inventory file based on DigitalOcean credentials optional arguments: -h, --help show this help message and exit --list List all active Droplets as Ansible inventory (default: True) --host HOST Get all Ansible inventory variables about a specific Droplet --all List all DigitalOcean information as JSON --droplets, -d List Droplets as JSON --regions List Regions as JSON --images List Images as JSON --sizes List Sizes as JSON --ssh-keys List SSH keys as JSON --domains List Domains as JSON --tags List Tags as JSON --pretty, -p Pretty-print results --cache-path CACHE_PATH Path to the cache files (default: .) --cache-max_age CACHE_MAX_AGE Maximum age of the cached items (default: 0) --force-cache Only use data from the cache --refresh-cache, -r Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files) --env, -e Display DO_API_TOKEN --api-token API_TOKEN, -a API_TOKEN DigitalOcean API Token ``` """ # (c) 2013, Evan Wies <evan@neomantra.net> # (c) 2017, Ansible Project # (c) 2017, Abhijeet Kasurde <akasurde@redhat.com> # # Inspired by the EC2 inventory plugin: # https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ###################################################################### import argparse import ast import os import re import requests import sys from time import time try: import ConfigParser except ImportError: import configparser as ConfigParser try: import json except ImportError: import simplejson as json class DoManager: def __init__(self, api_token): self.api_token = api_token self.api_endpoint = 'https://api.digitalocean.com/v2' self.headers = {'Authorization': 'Bearer {0}'.format(self.api_token), 'Content-type': 'application/json'} self.timeout = 60 def _url_builder(self, path): if path[0] == '/': path = path[1:] return '%s/%s' % (self.api_endpoint, path) def send(self, url, method='GET', data=None): url = self._url_builder(url) data = json.dumps(data) try: if method == 'GET': resp_data = {} incomplete = True while incomplete: resp = requests.get(url, data=data, headers=self.headers, timeout=self.timeout) json_resp = resp.json() for key, value in json_resp.items(): if isinstance(value, list) and key in resp_data: resp_data[key] += value else: resp_data[key] = value try: url = json_resp['links']['pages']['next'] except KeyError: incomplete = False except ValueError as e: sys.exit("Unable to parse result from %s: %s" % (url, e)) return resp_data def all_active_droplets(self): resp = self.send('droplets/') return resp['droplets'] def all_regions(self): resp = self.send('regions/') return resp['regions'] def all_images(self, filter_name='global'): params = {'filter': filter_name} resp = self.send('images/', data=params) return resp['images'] def sizes(self): resp = self.send('sizes/') return resp['sizes'] def all_ssh_keys(self): resp = self.send('account/keys') return resp['ssh_keys'] def all_domains(self): resp = self.send('domains/') return resp['domains'] def show_droplet(self, droplet_id): resp = self.send('droplets/%s' % droplet_id) return resp['droplet'] def all_tags(self): resp = self.send('tags/') return resp['tags'] class DigitalOceanInventory(object): ########################################################################### # Main execution path ########################################################################### def __init__(self): """Main execution path """ # DigitalOceanInventory data self.data = {} # All DigitalOcean data self.inventory = {} # Ansible Inventory # Define defaults self.cache_path = '.' self.cache_max_age = 0 self.use_private_network = False self.group_variables = {} # Read settings, environment variables, and CLI arguments self.read_settings() self.read_environment() self.read_cli_args() # Verify credentials were set if not hasattr(self, 'api_token'): msg = 'Could not find values for DigitalOcean api_token. They must be specified via either ini file, ' \ 'command line argument (--api-token), or environment variables (DO_API_TOKEN)\n' sys.stderr.write(msg) sys.exit(-1) # env command, show DigitalOcean credentials if self.args.env: print("DO_API_TOKEN=%s" % self.api_token) sys.exit(0) # Manage cache self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache" self.cache_refreshed = False if self.is_cache_valid(): self.load_from_cache() if len(self.data) == 0: if self.args.force_cache: sys.stderr.write('Cache is empty and --force-cache was specified\n') sys.exit(-1) self.manager = DoManager(self.api_token) # Pick the json_data to print based on the CLI command if self.args.droplets: self.load_from_digital_ocean('droplets') json_data = {'droplets': self.data['droplets']} elif self.args.regions: self.load_from_digital_ocean('regions') json_data = {'regions': self.data['regions']} elif self.args.images: self.load_from_digital_ocean('images') json_data = {'images': self.data['images']} elif self.args.sizes: self.load_from_digital_ocean('sizes') json_data = {'sizes': self.data['sizes']} elif self.args.ssh_keys: self.load_from_digital_ocean('ssh_keys') json_data = {'ssh_keys': self.data['ssh_keys']} elif self.args.domains: self.load_from_digital_ocean('domains') json_data = {'domains': self.data['domains']} elif self.args.tags: self.load_from_digital_ocean('tags') json_data = {'tags': self.data['tags']} elif self.args.all: self.load_from_digital_ocean() json_data = self.data elif self.args.host: json_data = self.load_droplet_variables_for_host() else: # '--list' this is last to make it default self.load_from_digital_ocean('droplets') self.build_inventory() json_data = self.inventory if self.cache_refreshed: self.write_to_cache() if self.args.pretty: print(json.dumps(json_data, indent=2)) else: print(json.dumps(json_data)) ########################################################################### # Script configuration ########################################################################### def read_settings(self): """ Reads the settings from the digital_ocean.ini file """ config = ConfigParser.SafeConfigParser() config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'digital_ocean.ini') config.read(config_path) # Credentials if config.has_option('digital_ocean', 'api_token'): self.api_token = config.get('digital_ocean', 'api_token') # Cache related if config.has_option('digital_ocean', 'cache_path'): self.cache_path = config.get('digital_ocean', 'cache_path') if config.has_option('digital_ocean', 'cache_max_age'): self.cache_max_age = config.getint('digital_ocean', 'cache_max_age') # Private IP Address if config.has_option('digital_ocean', 'use_private_network'): self.use_private_network = config.getboolean('digital_ocean', 'use_private_network') # Group variables if config.has_option('digital_ocean', 'group_variables'): self.group_variables = ast.literal_eval(config.get('digital_ocean', 'group_variables')) def read_environment(self): """ Reads the settings from environment variables """ # Setup credentials if os.getenv("DO_API_TOKEN"): self.api_token = os.getenv("DO_API_TOKEN") if os.getenv("DO_API_KEY"): self.api_token = os.getenv("DO_API_KEY") def read_cli_args(self): """ Command line argument processing """ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials') parser.add_argument('--list', action='store_true', help='List all active Droplets as Ansible inventory (default: True)') parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet') parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON') parser.add_argument('--droplets', '-d', action='store_true', help='List Droplets as JSON') parser.add_argument('--regions', action='store_true', help='List Regions as JSON') parser.add_argument('--images', action='store_true', help='List Images as JSON') parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON') parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON') parser.add_argument('--domains', action='store_true', help='List Domains as JSON') parser.add_argument('--tags', action='store_true', help='List Tags as JSON') parser.add_argument('--pretty', '-p', action='store_true', help='Pretty-print results') parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)') parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)') parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache') parser.add_argument('--refresh-cache', '-r', action='store_true', default=False, help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)') parser.add_argument('--env', '-e', action='store_true', help='Display DO_API_TOKEN') parser.add_argument('--api-token', '-a', action='store', help='DigitalOcean API Token') self.args = parser.parse_args() if self.args.api_token: self.api_token = self.args.api_token # Make --list default if none of the other commands are specified if (not self.args.droplets and not self.args.regions and not self.args.images and not self.args.sizes and not self.args.ssh_keys and not self.args.domains and not self.args.tags and not self.args.all and not self.args.host): self.args.list = True ########################################################################### # Data Management ########################################################################### def load_from_digital_ocean(self, resource=None): """Get JSON from DigitalOcean API """ if self.args.force_cache and os.path.isfile(self.cache_filename): return # We always get fresh droplets if self.is_cache_valid() and not (resource == 'droplets' or resource is None): return if self.args.refresh_cache: resource = None if resource == 'droplets' or resource is None: self.data['droplets'] = self.manager.all_active_droplets() self.cache_refreshed = True if resource == 'regions' or resource is None: self.data['regions'] = self.manager.all_regions() self.cache_refreshed = True if resource == 'images' or resource is None: self.data['images'] = self.manager.all_images() self.cache_refreshed = True if resource == 'sizes' or resource is None: self.data['sizes'] = self.manager.sizes() self.cache_refreshed = True if resource == 'ssh_keys' or resource is None: self.data['ssh_keys'] = self.manager.all_ssh_keys() self.cache_refreshed = True if resource == 'domains' or resource is None: self.data['domains'] = self.manager.all_domains() self.cache_refreshed = True if resource == 'tags' or resource is None: self.data['tags'] = self.manager.all_tags() self.cache_refreshed = True def build_inventory(self): """ Build Ansible inventory of droplets """ self.inventory = { 'all': { 'hosts': [], 'vars': self.group_variables }, '_meta': {'hostvars': {}} } # add all droplets by id and name for droplet in self.data['droplets']: for net in droplet['networks']['v4']: if net['type'] == 'public': dest = net['ip_address'] else: continue self.inventory['all']['hosts'].append(dest) self.inventory[droplet['id']] = [dest] self.inventory[droplet['name']] = [dest] # groups that are always present for group in ('digital_ocean', 'region_' + droplet['region']['slug'], 'image_' + str(droplet['image']['id']), 'size_' + droplet['size']['slug'], 'distro_' + DigitalOceanInventory.to_safe(droplet['image']['distribution']), 'status_' + droplet['status']): if group not in self.inventory: self.inventory[group] = {'hosts': [], 'vars': {}} self.inventory[group]['hosts'].append(dest) # groups that are not always present for group in (droplet['image']['slug'], droplet['image']['name']): if group: image = 'image_' + DigitalOceanInventory.to_safe(group) if image not in self.inventory: self.inventory[image] = {'hosts': [], 'vars': {}} self.inventory[image]['hosts'].append(dest) if droplet['tags']: for tag in droplet['tags']: if tag not in self.inventory: self.inventory[tag] = {'hosts': [], 'vars': {}} self.inventory[tag]['hosts'].append(dest) # hostvars info = self.do_namespace(droplet) self.inventory['_meta']['hostvars'][dest] = info def load_droplet_variables_for_host(self): """ Generate a JSON response to a --host call """ host = int(self.args.host) droplet = self.manager.show_droplet(host) info = self.do_namespace(droplet) return {'droplet': info} ########################################################################### # Cache Management ########################################################################### def is_cache_valid(self): """ Determines if the cache files have expired, or if it is still valid """ if os.path.isfile(self.cache_filename): mod_time = os.path.getmtime(self.cache_filename) current_time = time() if (mod_time + self.cache_max_age) > current_time: return True return False def load_from_cache(self): """ Reads the data from the cache file and assigns it to member variables as Python Objects """ try: with open(self.cache_filename, 'r') as cache: json_data = cache.read() data = json.loads(json_data) except IOError: data = {'data': {}, 'inventory': {}} self.data = data['data'] self.inventory = data['inventory'] def write_to_cache(self): """ Writes data in JSON format to a file """ data = {'data': self.data, 'inventory': self.inventory} json_data = json.dumps(data, indent=2) with open(self.cache_filename, 'w') as cache: cache.write(json_data) ########################################################################### # Utilities ########################################################################### @staticmethod def to_safe(word): """ Converts 'bad' characters in a string to underscores so they can be used as Ansible groups """ return re.sub(r"[^A-Za-z0-9\-.]", "_", word) @staticmethod def do_namespace(data): """ Returns a copy of the dictionary with all the keys put in a 'do_' namespace """ info = {} for k, v in data.items(): info['do_' + k] = v return info ########################################################################### # Run the script DigitalOceanInventory()
gpl-3.0
coderbone/SickRage
lib/hachoir_parser/misc/ole2.py
74
14203
""" Microsoft Office documents parser. OLE2 files are also used by many other programs to store data. Informations: * wordole.c of AntiWord program (v0.35) Copyright (C) 1998-2003 A.J. van Os Released under GNU GPL http://www.winfield.demon.nl/ * File gsf-infile-msole.c of libgsf library (v1.14.0) Copyright (C) 2002-2004 Jody Goldberg (jody@gnome.org) Released under GNU LGPL 2.1 http://freshmeat.net/projects/libgsf/ * PDF from AAF Association Copyright (C) 2004 AAF Association Copyright (C) 1991-2003 Microsoft Corporation http://www.aafassociation.org/html/specs/aafcontainerspec-v1.0.1.pdf Author: Victor Stinner Creation: 2006-04-23 """ from hachoir_parser import HachoirParser from hachoir_core.field import ( FieldSet, ParserError, SeekableFieldSet, RootSeekableFieldSet, UInt8, UInt16, UInt32, UInt64, TimestampWin64, Enum, Bytes, NullBytes, String) from hachoir_core.text_handler import filesizeHandler from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN from hachoir_parser.common.win32 import GUID from hachoir_parser.misc.msoffice import PROPERTY_NAME, RootEntry, RawParser, CustomFragment MIN_BIG_BLOCK_LOG2 = 6 # 512 bytes MAX_BIG_BLOCK_LOG2 = 14 # 64 kB # Number of items in DIFAT NB_DIFAT = 109 class SECT(UInt32): UNUSED = 0xFFFFFFFF # -1 END_OF_CHAIN = 0xFFFFFFFE # -2 BFAT_SECTOR = 0xFFFFFFFD # -3 DIFAT_SECTOR = 0xFFFFFFFC # -4 SPECIALS = set((END_OF_CHAIN, UNUSED, BFAT_SECTOR, DIFAT_SECTOR)) special_value_name = { UNUSED: "unused", END_OF_CHAIN: "end of a chain", BFAT_SECTOR: "BFAT sector (in a FAT)", DIFAT_SECTOR: "DIFAT sector (in a FAT)", } def __init__(self, parent, name, description=None): UInt32.__init__(self, parent, name, description) def createDisplay(self): val = self.value return SECT.special_value_name.get(val, str(val)) class Property(FieldSet): TYPE_ROOT = 5 TYPE_NAME = { 1: "storage", 2: "stream", 3: "ILockBytes", 4: "IPropertyStorage", 5: "root" } DECORATOR_NAME = { 0: "red", 1: "black", } static_size = 128 * 8 def createFields(self): bytes = self.stream.readBytes(self.absolute_address, 4) if bytes == "\0R\0\0": charset = "UTF-16-BE" else: charset = "UTF-16-LE" yield String(self, "name", 64, charset=charset, truncate="\0") yield UInt16(self, "namelen", "Length of the name") yield Enum(UInt8(self, "type", "Property type"), self.TYPE_NAME) yield Enum(UInt8(self, "decorator", "Decorator"), self.DECORATOR_NAME) yield SECT(self, "left") yield SECT(self, "right") yield SECT(self, "child", "Child node (valid for storage and root types)") yield GUID(self, "clsid", "CLSID of this storage (valid for storage and root types)") yield NullBytes(self, "flags", 4, "User flags") yield TimestampWin64(self, "creation", "Creation timestamp(valid for storage and root types)") yield TimestampWin64(self, "lastmod", "Modify timestamp (valid for storage and root types)") yield SECT(self, "start", "Starting SECT of the stream (valid for stream and root types)") if self["/header/bb_shift"].value == 9: yield filesizeHandler(UInt32(self, "size", "Size in bytes (valid for stream and root types)")) yield NullBytes(self, "padding", 4) else: yield filesizeHandler(UInt64(self, "size", "Size in bytes (valid for stream and root types)")) def createDescription(self): name = self["name"].display size = self["size"].display return "Property: %s (%s)" % (name, size) class DIFat(SeekableFieldSet): def __init__(self, parent, name, db_start, db_count, description=None): SeekableFieldSet.__init__(self, parent, name, description) self.start=db_start self.count=db_count def createFields(self): for index in xrange(NB_DIFAT): yield SECT(self, "index[%u]" % index) difat_sect = self.start index = NB_DIFAT entries_per_sect = self.parent.sector_size / 32 - 1 for ctr in xrange(self.count): # this is relative to real DIFAT start self.seekBit(NB_DIFAT*SECT.static_size + self.parent.sector_size*difat_sect) for sect_index in xrange(entries_per_sect): yield SECT(self, "index[%u]" % (index+sect_index)) index += entries_per_sect next = SECT(self, "difat[%u]" % ctr) yield next difat_sect = next.value class Header(FieldSet): static_size = 68 * 8 def createFields(self): yield GUID(self, "clsid", "16 bytes GUID used by some apps") yield UInt16(self, "ver_min", "Minor version") yield UInt16(self, "ver_maj", "Major version") yield Bytes(self, "endian", 2, "Endian (\\xfe\\xff for little endian)") yield UInt16(self, "bb_shift", "Log, base 2, of the big block size") yield UInt16(self, "sb_shift", "Log, base 2, of the small block size") yield NullBytes(self, "reserved[]", 6, "(reserved)") yield UInt32(self, "csectdir", "Number of SECTs in directory chain for 4 KB sectors (version 4)") yield UInt32(self, "bb_count", "Number of Big Block Depot blocks") yield SECT(self, "bb_start", "Root start block") yield NullBytes(self, "transaction", 4, "Signature used for transactions (must be zero)") yield UInt32(self, "threshold", "Maximum size for a mini stream (typically 4096 bytes)") yield SECT(self, "sb_start", "Small Block Depot start block") yield UInt32(self, "sb_count") yield SECT(self, "db_start", "First block of DIFAT") yield UInt32(self, "db_count", "Number of SECTs in DIFAT") # Header (ole_id, header, difat) size in bytes HEADER_SIZE = 64 + Header.static_size + NB_DIFAT * SECT.static_size class SectFat(FieldSet): def __init__(self, parent, name, start, count, description=None): FieldSet.__init__(self, parent, name, description, size=count*32) self.count = count self.start = start def createFields(self): for i in xrange(self.start, self.start + self.count): yield SECT(self, "index[%u]" % i) class OLE2_File(HachoirParser, RootSeekableFieldSet): PARSER_TAGS = { "id": "ole2", "category": "misc", "file_ext": ( "db", # Thumbs.db "doc", "dot", # Microsoft Word "ppt", "ppz", "pps", "pot", # Microsoft Powerpoint "xls", "xla", # Microsoft Excel "msi", # Windows installer ), "mime": ( u"application/msword", u"application/msexcel", u"application/mspowerpoint", ), "min_size": 512*8, "description": "Microsoft Office document", "magic": (("\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1", 0),), } endian = LITTLE_ENDIAN def __init__(self, stream, **args): RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self)) HachoirParser.__init__(self, stream, **args) def validate(self): if self["ole_id"].value != "\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1": return "Invalid magic" if self["header/ver_maj"].value not in (3, 4): return "Unknown major version (%s)" % self["header/ver_maj"].value if self["header/endian"].value not in ("\xFF\xFE", "\xFE\xFF"): return "Unknown endian (%s)" % self["header/endian"].raw_display if not(MIN_BIG_BLOCK_LOG2 <= self["header/bb_shift"].value <= MAX_BIG_BLOCK_LOG2): return "Invalid (log 2 of) big block size (%s)" % self["header/bb_shift"].value if self["header/bb_shift"].value < self["header/sb_shift"].value: return "Small block size (log2=%s) is bigger than big block size (log2=%s)!" \ % (self["header/sb_shift"].value, self["header/bb_shift"].value) return True def createFields(self): # Signature yield Bytes(self, "ole_id", 8, "OLE object signature") header = Header(self, "header") yield header # Configure values self.sector_size = (8 << header["bb_shift"].value) self.fat_count = header["bb_count"].value self.items_per_bbfat = self.sector_size / SECT.static_size self.ss_size = (8 << header["sb_shift"].value) self.items_per_ssfat = self.items_per_bbfat # Read DIFAT (one level of indirection) yield DIFat(self, "difat", header["db_start"].value, header["db_count"].value, "Double Indirection FAT") # Read FAT (one level of indirection) for field in self.readBFAT(): yield field # Read SFAT for field in self.readSFAT(): yield field # Read properties chain = self.getChain(self["header/bb_start"].value) prop_per_sector = self.sector_size // Property.static_size self.properties = [] for block in chain: self.seekBlock(block) for index in xrange(prop_per_sector): property = Property(self, "property[]") yield property self.properties.append(property) # Parse first property for index, property in enumerate(self.properties): if index == 0: name, parser = 'root', RootEntry else: try: name, parser = PROPERTY_NAME[property["name"].value] except LookupError: name = property.name+"content" parser = RawParser for field in self.parseProperty(property, name, parser): yield field def parseProperty(self, property, name_prefix, parser=RawParser): if not property["size"].value: return if property["size"].value < self["header/threshold"].value and name_prefix!='root': return name = "%s[]" % name_prefix first = None previous = None size = 0 fragment_group = None chain = self.getChain(property["start"].value) while True: try: block = chain.next() contiguous = False if first is None: first = block contiguous = True if previous is not None and block == (previous+1): contiguous = True if contiguous: previous = block size += self.sector_size continue except StopIteration: block = None if first is None: break self.seekBlock(first) desc = "Big blocks %s..%s (%s)" % (first, previous, previous-first+1) desc += " of %s bytes" % (self.sector_size // 8) field = CustomFragment(self, name, size, parser, desc, fragment_group) if not fragment_group: fragment_group = field.group fragment_group.args["datasize"] = property["size"].value fragment_group.args["ole2name"] = property["name"].value yield field if block is None: break first = block previous = block size = self.sector_size def getChain(self, start, use_sfat=False): if use_sfat: fat = self.ss_fat items_per_fat = self.items_per_ssfat err_prefix = "SFAT chain" else: fat = self.bb_fat items_per_fat = self.items_per_bbfat err_prefix = "BFAT chain" block = start block_set = set() previous = block while block != SECT.END_OF_CHAIN: if block in SECT.SPECIALS: raise ParserError("%s: Invalid block index (0x%08x), previous=%s" % (err_prefix, block, previous)) if block in block_set: raise ParserError("%s: Found a loop (%s=>%s)" % (err_prefix, previous, block)) block_set.add(block) yield block previous = block index = block // items_per_fat try: block = fat[index]["index[%u]" % block].value except LookupError, err: break def readBFAT(self): self.bb_fat = [] start = 0 count = self.items_per_bbfat for index, block in enumerate(self.array("difat/index")): block = block.value if block == SECT.UNUSED: break desc = "FAT %u/%u at block %u" % \ (1+index, self["header/bb_count"].value, block) self.seekBlock(block) field = SectFat(self, "bbfat[]", start, count, desc) yield field self.bb_fat.append(field) start += count def readSFAT(self): chain = self.getChain(self["header/sb_start"].value) start = 0 self.ss_fat = [] count = self.items_per_ssfat for index, block in enumerate(chain): self.seekBlock(block) field = SectFat(self, "sfat[]", \ start, count, \ "SFAT %u/%u at block %u" % \ (1+index, self["header/sb_count"].value, block)) yield field self.ss_fat.append(field) start += count def createContentSize(self): max_block = 0 for fat in self.array("bbfat"): for entry in fat: block = entry.value if block not in SECT.SPECIALS: max_block = max(block, max_block) if max_block in SECT.SPECIALS: return None else: return HEADER_SIZE + (max_block+1) * self.sector_size def seekBlock(self, block): self.seekBit(HEADER_SIZE + block * self.sector_size)
gpl-3.0
torchingloom/edx-platform
common/djangoapps/external_auth/tests/test_openid_provider.py
46
16144
#-*- encoding=utf-8 -*- ''' Created on Jan 18, 2013 @author: brian ''' import openid import json from openid.fetchers import HTTPFetcher, HTTPResponse from urlparse import parse_qs, urlparse from django.conf import settings from django.test import TestCase, LiveServerTestCase from django.core.cache import cache from django.test.utils import override_settings from django.core.urlresolvers import reverse from django.test.client import RequestFactory from unittest import skipUnless from student.tests.factories import UserFactory from external_auth.views import provider_login class MyFetcher(HTTPFetcher): """A fetcher that uses server-internal calls for performing HTTP requests. """ def __init__(self, client): """@param client: A test client object""" super(MyFetcher, self).__init__() self.client = client def fetch(self, url, body=None, headers=None): """Perform an HTTP request @raises Exception: Any exception that can be raised by Django @see: C{L{HTTPFetcher.fetch}} """ if body: # method = 'POST' # undo the URL encoding of the POST arguments data = parse_qs(body) response = self.client.post(url, data) else: # method = 'GET' data = {} if headers and 'Accept' in headers: data['CONTENT_TYPE'] = headers['Accept'] response = self.client.get(url, data) # Translate the test client response to the fetcher's HTTP response abstraction content = response.content final_url = url response_headers = {} if 'Content-Type' in response: response_headers['content-type'] = response['Content-Type'] if 'X-XRDS-Location' in response: response_headers['x-xrds-location'] = response['X-XRDS-Location'] status = response.status_code return HTTPResponse( body=content, final_url=final_url, headers=response_headers, status=status, ) class OpenIdProviderTest(TestCase): """ Tests of the OpenId login """ @skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'), 'OpenID not enabled') def test_begin_login_with_xrds_url(self): # the provider URL must be converted to an absolute URL in order to be # used as an openid provider. provider_url = reverse('openid-provider-xrds') factory = RequestFactory() request = factory.request() abs_provider_url = request.build_absolute_uri(location=provider_url) # In order for this absolute URL to work (i.e. to get xrds, then authentication) # in the test environment, we either need a live server that works with the default # fetcher (i.e. urlopen2), or a test server that is reached through a custom fetcher. # Here we do the latter: fetcher = MyFetcher(self.client) openid.fetchers.setDefaultFetcher(fetcher, wrap_exceptions=False) # now we can begin the login process by invoking a local openid client, # with a pointer to the (also-local) openid provider: with self.settings(OPENID_SSO_SERVER_URL=abs_provider_url): url = reverse('openid-login') resp = self.client.post(url) code = 200 self.assertEqual(resp.status_code, code, "got code {0} for url '{1}'. Expected code {2}" .format(resp.status_code, url, code)) @skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'), 'OpenID not enabled') def test_begin_login_with_login_url(self): # the provider URL must be converted to an absolute URL in order to be # used as an openid provider. provider_url = reverse('openid-provider-login') factory = RequestFactory() request = factory.request() abs_provider_url = request.build_absolute_uri(location=provider_url) # In order for this absolute URL to work (i.e. to get xrds, then authentication) # in the test environment, we either need a live server that works with the default # fetcher (i.e. urlopen2), or a test server that is reached through a custom fetcher. # Here we do the latter: fetcher = MyFetcher(self.client) openid.fetchers.setDefaultFetcher(fetcher, wrap_exceptions=False) # now we can begin the login process by invoking a local openid client, # with a pointer to the (also-local) openid provider: with self.settings(OPENID_SSO_SERVER_URL=abs_provider_url): url = reverse('openid-login') resp = self.client.post(url) code = 200 self.assertEqual(resp.status_code, code, "got code {0} for url '{1}'. Expected code {2}" .format(resp.status_code, url, code)) self.assertContains(resp, '<input name="openid.mode" type="hidden" value="checkid_setup" />', html=True) self.assertContains(resp, '<input name="openid.ns" type="hidden" value="http://specs.openid.net/auth/2.0" />', html=True) self.assertContains(resp, '<input name="openid.identity" type="hidden" value="http://specs.openid.net/auth/2.0/identifier_select" />', html=True) self.assertContains(resp, '<input name="openid.claimed_id" type="hidden" value="http://specs.openid.net/auth/2.0/identifier_select" />', html=True) self.assertContains(resp, '<input name="openid.ns.ax" type="hidden" value="http://openid.net/srv/ax/1.0" />', html=True) self.assertContains(resp, '<input name="openid.ax.mode" type="hidden" value="fetch_request" />', html=True) self.assertContains(resp, '<input name="openid.ax.required" type="hidden" value="email,fullname,old_email,firstname,old_nickname,lastname,old_fullname,nickname" />', html=True) self.assertContains(resp, '<input name="openid.ax.type.fullname" type="hidden" value="http://axschema.org/namePerson" />', html=True) self.assertContains(resp, '<input name="openid.ax.type.lastname" type="hidden" value="http://axschema.org/namePerson/last" />', html=True) self.assertContains(resp, '<input name="openid.ax.type.firstname" type="hidden" value="http://axschema.org/namePerson/first" />', html=True) self.assertContains(resp, '<input name="openid.ax.type.nickname" type="hidden" value="http://axschema.org/namePerson/friendly" />', html=True) self.assertContains(resp, '<input name="openid.ax.type.email" type="hidden" value="http://axschema.org/contact/email" />', html=True) self.assertContains(resp, '<input name="openid.ax.type.old_email" type="hidden" value="http://schema.openid.net/contact/email" />', html=True) self.assertContains(resp, '<input name="openid.ax.type.old_nickname" type="hidden" value="http://schema.openid.net/namePerson/friendly" />', html=True) self.assertContains(resp, '<input name="openid.ax.type.old_fullname" type="hidden" value="http://schema.openid.net/namePerson" />', html=True) self.assertContains(resp, '<input type="submit" value="Continue" />', html=True) # this should work on the server: self.assertContains(resp, '<input name="openid.realm" type="hidden" value="http://testserver/" />', html=True) # not included here are elements that will vary from run to run: # <input name="openid.return_to" type="hidden" value="http://testserver/openid/complete/?janrain_nonce=2013-01-23T06%3A20%3A17ZaN7j6H" /> # <input name="openid.assoc_handle" type="hidden" value="{HMAC-SHA1}{50ff8120}{rh87+Q==}" /> def attempt_login(self, expected_code, **kwargs): """ Attempt to log in through the open id provider login """ url = reverse('openid-provider-login') post_args = { "openid.mode": "checkid_setup", "openid.return_to": "http://testserver/openid/complete/?janrain_nonce=2013-01-23T06%3A20%3A17ZaN7j6H", "openid.assoc_handle": "{HMAC-SHA1}{50ff8120}{rh87+Q==}", "openid.claimed_id": "http://specs.openid.net/auth/2.0/identifier_select", "openid.ns": "http://specs.openid.net/auth/2.0", "openid.realm": "http://testserver/", "openid.identity": "http://specs.openid.net/auth/2.0/identifier_select", "openid.ns.ax": "http://openid.net/srv/ax/1.0", "openid.ax.mode": "fetch_request", "openid.ax.required": "email,fullname,old_email,firstname,old_nickname,lastname,old_fullname,nickname", "openid.ax.type.fullname": "http://axschema.org/namePerson", "openid.ax.type.lastname": "http://axschema.org/namePerson/last", "openid.ax.type.firstname": "http://axschema.org/namePerson/first", "openid.ax.type.nickname": "http://axschema.org/namePerson/friendly", "openid.ax.type.email": "http://axschema.org/contact/email", "openid.ax.type.old_email": "http://schema.openid.net/contact/email", "openid.ax.type.old_nickname": "http://schema.openid.net/namePerson/friendly", "openid.ax.type.old_fullname": "http://schema.openid.net/namePerson", } # override the default args with any given arguments for key in kwargs: post_args["openid." + key] = kwargs[key] resp = self.client.post(url, post_args) code = expected_code self.assertEqual(resp.status_code, code, "got code {0} for url '{1}'. Expected code {2}" .format(resp.status_code, url, code)) @skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'), 'OpenID not enabled') def test_open_id_setup(self): """ Attempt a standard successful login """ self.attempt_login(200) @skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'), 'OpenID not enabled') def test_invalid_namespace(self): """ Test for 403 error code when the namespace of the request is invalid""" self.attempt_login(403, ns="http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0") @override_settings(OPENID_PROVIDER_TRUSTED_ROOTS=['http://apps.cs50.edx.org']) @skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'), 'OpenID not enabled') def test_invalid_return_url(self): """ Test for 403 error code when the url""" self.attempt_login(403, return_to="http://apps.cs50.edx.or") def _send_bad_redirection_login(self): """ Attempt to log in to the provider with setup parameters Intentionally fail the login to force a redirect """ user = UserFactory() factory = RequestFactory() post_params = {'email': user.email, 'password': 'password'} fake_url = 'fake url' request = factory.post(reverse('openid-provider-login'), post_params) openid_setup = { 'request': factory.request(), 'url': fake_url } request.session = { 'openid_setup': openid_setup } response = provider_login(request) return response @skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'), 'OpenID not enabled') def test_login_openid_handle_redirection(self): """ Test to see that we can handle login redirection properly""" response = self._send_bad_redirection_login() self.assertEquals(response.status_code, 302) @skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'), 'OpenID not enabled') def test_login_openid_handle_redirection_ratelimited(self): # try logging in 30 times, the default limit in the number of failed # log in attempts before the rate gets limited for _ in xrange(30): self._send_bad_redirection_login() response = self._send_bad_redirection_login() # verify that we are not returning the default 403 self.assertEquals(response.status_code, 302) # clear the ratelimit cache so that we don't fail other logins cache.clear() @skipUnless(settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'), 'OpenID not enabled') def test_openid_final_response(self): url = reverse('openid-provider-login') user = UserFactory() # login to the client so that we can persist session information for name in ['Robot 33', '☃']: user.profile.name = name user.profile.save() self.client.login(username=user.username, password='test') # login once to get the right session information self.attempt_login(200) post_args = { 'email': user.email, 'password': 'test', } # call url again, this time with username and password resp = self.client.post(url, post_args) # all information is embedded in the redirect url location = resp['Location'] # parse the url parsed_url = urlparse(location) parsed_qs = parse_qs(parsed_url.query) self.assertEquals(parsed_qs['openid.ax.type.ext1'][0], 'http://axschema.org/contact/email') self.assertEquals(parsed_qs['openid.ax.type.ext0'][0], 'http://axschema.org/namePerson') self.assertEquals(parsed_qs['openid.ax.value.ext1.1'][0], user.email) self.assertEquals(parsed_qs['openid.ax.value.ext0.1'][0], user.profile.name) class OpenIdProviderLiveServerTest(LiveServerTestCase): """ In order for this absolute URL to work (i.e. to get xrds, then authentication) in the test environment, we either need a live server that works with the default fetcher (i.e. urlopen2), or a test server that is reached through a custom fetcher. Here we do the former. """ @skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'), 'OpenID not enabled') def test_begin_login(self): # the provider URL must be converted to an absolute URL in order to be # used as an openid provider. provider_url = reverse('openid-provider-xrds') factory = RequestFactory() request = factory.request() abs_provider_url = request.build_absolute_uri(location=provider_url) # now we can begin the login process by invoking a local openid client, # with a pointer to the (also-local) openid provider: with self.settings(OPENID_SSO_SERVER_URL=abs_provider_url): url = reverse('openid-login') resp = self.client.post(url) code = 200 self.assertEqual(resp.status_code, code, "got code {0} for url '{1}'. Expected code {2}" .format(resp.status_code, url, code)) @classmethod def tearDownClass(cls): """ Workaround for a runtime error that occurs intermittently when the server thread doesn't shut down within 2 seconds. Since the server is running in a Django thread and will be terminated when the test suite terminates, this shouldn't cause a resource allocation issue. """ try: super(OpenIdProviderLiveServerTest, cls).tearDownClass() except RuntimeError: print "Warning: Could not shut down test server."
agpl-3.0
helenst/django
django/template/loaders/app_directories.py
95
1560
""" Wrapper for loading templates from "templates" directories in INSTALLED_APPS packages. """ import io from django.core.exceptions import SuspiciousFileOperation from django.template.base import TemplateDoesNotExist from django.template.utils import get_app_template_dirs from django.utils._os import safe_join from .base import Loader as BaseLoader class Loader(BaseLoader): is_usable = True def get_template_sources(self, template_name, template_dirs=None): """ Returns the absolute paths to "template_name", when appended to each directory in "template_dirs". Any paths that don't lie inside one of the template dirs are excluded from the result set, for security reasons. """ if not template_dirs: template_dirs = get_app_template_dirs('templates') for template_dir in template_dirs: try: yield safe_join(template_dir, template_name) except SuspiciousFileOperation: # The joined path was located outside of this template_dir # (it might be inside another one, so this isn't fatal). pass def load_template_source(self, template_name, template_dirs=None): for filepath in self.get_template_sources(template_name, template_dirs): try: with io.open(filepath, encoding=self.engine.file_charset) as fp: return fp.read(), filepath except IOError: pass raise TemplateDoesNotExist(template_name)
bsd-3-clause
adlai/p2pool
wstools/XMLname.py
291
2479
"""Translate strings to and from SOAP 1.2 XML name encoding Implements rules for mapping application defined name to XML names specified by the w3 SOAP working group for SOAP version 1.2 in Appendix A of "SOAP Version 1.2 Part 2: Adjuncts", W3C Working Draft 17, December 2001, <http://www.w3.org/TR/soap12-part2/#namemap> Also see <http://www.w3.org/2000/xp/Group/xmlp-issues>. Author: Gregory R. Warnes <Gregory.R.Warnes@Pfizer.com> Date:: 2002-04-25 Version 0.9.0 """ ident = "$Id$" from re import * def _NCNameChar(x): return x.isalpha() or x.isdigit() or x=="." or x=='-' or x=="_" def _NCNameStartChar(x): return x.isalpha() or x=="_" def _toUnicodeHex(x): hexval = hex(ord(x[0]))[2:] hexlen = len(hexval) # Make hexval have either 4 or 8 digits by prepending 0's if (hexlen==1): hexval = "000" + hexval elif (hexlen==2): hexval = "00" + hexval elif (hexlen==3): hexval = "0" + hexval elif (hexlen==4): hexval = "" + hexval elif (hexlen==5): hexval = "000" + hexval elif (hexlen==6): hexval = "00" + hexval elif (hexlen==7): hexval = "0" + hexval elif (hexlen==8): hexval = "" + hexval else: raise Exception, "Illegal Value returned from hex(ord(x))" return "_x"+ hexval + "_" def _fromUnicodeHex(x): return eval( r'u"\u'+x[2:-1]+'"' ) def toXMLname(string): """Convert string to a XML name.""" if string.find(':') != -1 : (prefix, localname) = string.split(':',1) else: prefix = None localname = string T = unicode(localname) N = len(localname) X = []; for i in range(N) : if i< N-1 and T[i]==u'_' and T[i+1]==u'x': X.append(u'_x005F_') elif i==0 and N >= 3 and \ ( T[0]==u'x' or T[0]==u'X' ) and \ ( T[1]==u'm' or T[1]==u'M' ) and \ ( T[2]==u'l' or T[2]==u'L' ): X.append(u'_xFFFF_' + T[0]) elif (not _NCNameChar(T[i])) or (i==0 and not _NCNameStartChar(T[i])): X.append(_toUnicodeHex(T[i])) else: X.append(T[i]) if prefix: return "%s:%s" % (prefix, u''.join(X)) return u''.join(X) def fromXMLname(string): """Convert XML name to unicode string.""" retval = sub(r'_xFFFF_','', string ) def fun( matchobj ): return _fromUnicodeHex( matchobj.group(0) ) retval = sub(r'_x[0-9A-Za-z]+_', fun, retval ) return retval
gpl-3.0
jjdmol/LOFAR
CEP/Pipeline/recipes/sip/nodes/setupparmdb.py
2
1516
# LOFAR IMAGING PIPELINE # # setupparmdb nodes recipe # Marcel Loose, 2012 # loose@astron.nl # ------------------------------------------------------------------------------ from lofarpipe.support.lofarnode import LOFARnodeTCP from lofarpipe.support.utilities import log_time import shutil import sys class setupparmdb(LOFARnodeTCP): """ Put the provided template parmdb at the target location: 1. Remove a possible old parmdb at the target location. 2. Copy the template to the target location """ def run(self, pdb_in, pdb_out): with log_time(self.logger): self.logger.debug("Copying parmdb: %s --> %s" % (pdb_in, pdb_out)) # Remove any old parmdb database shutil.rmtree(pdb_out, ignore_errors=True) # And copy the new one into place shutil.copytree(pdb_in, pdb_out) return 0 if __name__ == "__main__": # If invoked directly, parse command line arguments for logger information # and pass the rest to the run() method defined above # -------------------------------------------------------------------------- jobid, jobhost, jobport = sys.argv[1:4] sys.exit(setupparmdb(jobid, jobhost, jobport).run_with_stored_arguments())
gpl-3.0
mixja/eap-sim-lab
lib/pyscard-1.6.16/build/lib.macosx-10.10-x86_64-2.7/smartcard/pcsc/PCSCContext.py
3
1857
"""PCSC context singleton. __author__ = "http://www.gemalto.com" Copyright 2001-2012 gemalto Author: Jean-Daniel Aussel, mailto:jean-daniel.aussel@gemalto.com This file is part of pyscard. pyscard is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. pyscard is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with pyscard; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA """ from threading import RLock from smartcard.scard import * from smartcard.pcsc.PCSCExceptions import EstablishContextException class PCSCContext(object): """Manage a singleton pcsc context handle.""" class __PCSCContextSingleton: """The actual pcsc context class as a singleton.""" def __init__(self): hresult, self.hcontext = SCardEstablishContext(SCARD_SCOPE_USER) if hresult != 0: raise EstablishContextException(hresult) def getContext(self): return self.hcontext # the singleton mutex = RLock() instance = None def __init__(self): PCSCContext.mutex.acquire() try: if not PCSCContext.instance: PCSCContext.instance = PCSCContext.__PCSCContextSingleton() finally: PCSCContext.mutex.release() def __getattr__(self, name): if self.instance: return getattr(self.instance, name)
mit
GGoussar/scikit-image
doc/examples/segmentation/plot_marked_watershed.py
9
1988
""" =============================== Markers for watershed transform =============================== The watershed is a classical algorithm used for **segmentation**, that is, for separating different objects in an image. Here a marker image is built from the region of low gradient inside the image. In a gradient image, the areas of high values provide barriers that help to segment the image. Using markers on the lower values will ensure that the segmented objects are found. See Wikipedia_ for more details on the algorithm. .. _Wikipedia: http://en.wikipedia.org/wiki/Watershed_(image_processing) """ from scipy import ndimage as ndi import matplotlib.pyplot as plt from skimage.morphology import watershed, disk from skimage import data from skimage.filters import rank from skimage.util import img_as_ubyte image = img_as_ubyte(data.camera()) # denoise image denoised = rank.median(image, disk(2)) # find continuous region (low gradient - # where less than 10 for this image) --> markers # disk(5) is used here to get a more smooth image markers = rank.gradient(denoised, disk(5)) < 10 markers = ndi.label(markers)[0] # local gradient (disk(2) is used to keep edges thin) gradient = rank.gradient(denoised, disk(2)) # process the watershed labels = watershed(gradient, markers) # display results fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(8, 8), sharex=True, sharey=True, subplot_kw={'adjustable':'box-forced'}) ax = axes.ravel() ax[0].imshow(image, cmap=plt.cm.gray, interpolation='nearest') ax[0].set_title("Original") ax[1].imshow(gradient, cmap=plt.cm.spectral, interpolation='nearest') ax[1].set_title("Local Gradient") ax[2].imshow(markers, cmap=plt.cm.spectral, interpolation='nearest') ax[2].set_title("Markers") ax[3].imshow(image, cmap=plt.cm.gray, interpolation='nearest') ax[3].imshow(labels, cmap=plt.cm.spectral, interpolation='nearest', alpha=.7) ax[3].set_title("Segmented") for a in ax: a.axis('off') fig.tight_layout() plt.show()
bsd-3-clause
pjdelport/pip
pip/_vendor/requests/packages/chardet/sbcharsetprober.py
2927
4793
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### import sys from . import constants from .charsetprober import CharSetProber from .compat import wrap_ord SAMPLE_SIZE = 64 SB_ENOUGH_REL_THRESHOLD = 1024 POSITIVE_SHORTCUT_THRESHOLD = 0.95 NEGATIVE_SHORTCUT_THRESHOLD = 0.05 SYMBOL_CAT_ORDER = 250 NUMBER_OF_SEQ_CAT = 4 POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1 #NEGATIVE_CAT = 0 class SingleByteCharSetProber(CharSetProber): def __init__(self, model, reversed=False, nameProber=None): CharSetProber.__init__(self) self._mModel = model # TRUE if we need to reverse every pair in the model lookup self._mReversed = reversed # Optional auxiliary prober for name decision self._mNameProber = nameProber self.reset() def reset(self): CharSetProber.reset(self) # char order of last character self._mLastOrder = 255 self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT self._mTotalSeqs = 0 self._mTotalChar = 0 # characters that fall in our sampling range self._mFreqChar = 0 def get_charset_name(self): if self._mNameProber: return self._mNameProber.get_charset_name() else: return self._mModel['charsetName'] def feed(self, aBuf): if not self._mModel['keepEnglishLetter']: aBuf = self.filter_without_english_letters(aBuf) aLen = len(aBuf) if not aLen: return self.get_state() for c in aBuf: order = self._mModel['charToOrderMap'][wrap_ord(c)] if order < SYMBOL_CAT_ORDER: self._mTotalChar += 1 if order < SAMPLE_SIZE: self._mFreqChar += 1 if self._mLastOrder < SAMPLE_SIZE: self._mTotalSeqs += 1 if not self._mReversed: i = (self._mLastOrder * SAMPLE_SIZE) + order model = self._mModel['precedenceMatrix'][i] else: # reverse the order of the letters in the lookup i = (order * SAMPLE_SIZE) + self._mLastOrder model = self._mModel['precedenceMatrix'][i] self._mSeqCounters[model] += 1 self._mLastOrder = order if self.get_state() == constants.eDetecting: if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD: cf = self.get_confidence() if cf > POSITIVE_SHORTCUT_THRESHOLD: if constants._debug: sys.stderr.write('%s confidence = %s, we have a' 'winner\n' % (self._mModel['charsetName'], cf)) self._mState = constants.eFoundIt elif cf < NEGATIVE_SHORTCUT_THRESHOLD: if constants._debug: sys.stderr.write('%s confidence = %s, below negative' 'shortcut threshhold %s\n' % (self._mModel['charsetName'], cf, NEGATIVE_SHORTCUT_THRESHOLD)) self._mState = constants.eNotMe return self.get_state() def get_confidence(self): r = 0.01 if self._mTotalSeqs > 0: r = ((1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs / self._mModel['mTypicalPositiveRatio']) r = r * self._mFreqChar / self._mTotalChar if r >= 1.0: r = 0.99 return r
mit
chugunovyar/factoryForBuild
env/lib/python2.7/site-packages/tornado/tcpclient.py
208
6802
#!/usr/bin/env python # # Copyright 2014 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A non-blocking TCP connection factory. """ from __future__ import absolute_import, division, print_function, with_statement import functools import socket from tornado.concurrent import Future from tornado.ioloop import IOLoop from tornado.iostream import IOStream from tornado import gen from tornado.netutil import Resolver _INITIAL_CONNECT_TIMEOUT = 0.3 class _Connector(object): """A stateless implementation of the "Happy Eyeballs" algorithm. "Happy Eyeballs" is documented in RFC6555 as the recommended practice for when both IPv4 and IPv6 addresses are available. In this implementation, we partition the addresses by family, and make the first connection attempt to whichever address was returned first by ``getaddrinfo``. If that connection fails or times out, we begin a connection in parallel to the first address of the other family. If there are additional failures we retry with other addresses, keeping one connection attempt per family in flight at a time. http://tools.ietf.org/html/rfc6555 """ def __init__(self, addrinfo, io_loop, connect): self.io_loop = io_loop self.connect = connect self.future = Future() self.timeout = None self.last_error = None self.remaining = len(addrinfo) self.primary_addrs, self.secondary_addrs = self.split(addrinfo) @staticmethod def split(addrinfo): """Partition the ``addrinfo`` list by address family. Returns two lists. The first list contains the first entry from ``addrinfo`` and all others with the same family, and the second list contains all other addresses (normally one list will be AF_INET and the other AF_INET6, although non-standard resolvers may return additional families). """ primary = [] secondary = [] primary_af = addrinfo[0][0] for af, addr in addrinfo: if af == primary_af: primary.append((af, addr)) else: secondary.append((af, addr)) return primary, secondary def start(self, timeout=_INITIAL_CONNECT_TIMEOUT): self.try_connect(iter(self.primary_addrs)) self.set_timout(timeout) return self.future def try_connect(self, addrs): try: af, addr = next(addrs) except StopIteration: # We've reached the end of our queue, but the other queue # might still be working. Send a final error on the future # only when both queues are finished. if self.remaining == 0 and not self.future.done(): self.future.set_exception(self.last_error or IOError("connection failed")) return future = self.connect(af, addr) future.add_done_callback(functools.partial(self.on_connect_done, addrs, af, addr)) def on_connect_done(self, addrs, af, addr, future): self.remaining -= 1 try: stream = future.result() except Exception as e: if self.future.done(): return # Error: try again (but remember what happened so we have an # error to raise in the end) self.last_error = e self.try_connect(addrs) if self.timeout is not None: # If the first attempt failed, don't wait for the # timeout to try an address from the secondary queue. self.io_loop.remove_timeout(self.timeout) self.on_timeout() return self.clear_timeout() if self.future.done(): # This is a late arrival; just drop it. stream.close() else: self.future.set_result((af, addr, stream)) def set_timout(self, timeout): self.timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout, self.on_timeout) def on_timeout(self): self.timeout = None self.try_connect(iter(self.secondary_addrs)) def clear_timeout(self): if self.timeout is not None: self.io_loop.remove_timeout(self.timeout) class TCPClient(object): """A non-blocking TCP connection factory. .. versionchanged:: 4.1 The ``io_loop`` argument is deprecated. """ def __init__(self, resolver=None, io_loop=None): self.io_loop = io_loop or IOLoop.current() if resolver is not None: self.resolver = resolver self._own_resolver = False else: self.resolver = Resolver(io_loop=io_loop) self._own_resolver = True def close(self): if self._own_resolver: self.resolver.close() @gen.coroutine def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None, max_buffer_size=None): """Connect to the given host and port. Asynchronously returns an `.IOStream` (or `.SSLIOStream` if ``ssl_options`` is not None). """ addrinfo = yield self.resolver.resolve(host, port, af) connector = _Connector( addrinfo, self.io_loop, functools.partial(self._create_stream, max_buffer_size)) af, addr, stream = yield connector.start() # TODO: For better performance we could cache the (af, addr) # information here and re-use it on subsequent connections to # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2) if ssl_options is not None: stream = yield stream.start_tls(False, ssl_options=ssl_options, server_hostname=host) raise gen.Return(stream) def _create_stream(self, max_buffer_size, af, addr): # Always connect in plaintext; we'll convert to ssl if necessary # after one connection has completed. stream = IOStream(socket.socket(af), io_loop=self.io_loop, max_buffer_size=max_buffer_size) return stream.connect(addr)
gpl-3.0
karlnapf/kameleon-mcmc
kameleon_mcmc/tools/Visualise.py
1
5656
""" This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. Written (W) 2013 Heiko Strathmann Written (W) 2013 Dino Sejdinovic """ from kameleon_mcmc.distribution.Gaussian import Gaussian from matplotlib.patches import Ellipse from matplotlib.pyplot import imshow, ylim, xlim, contour, plot, hold, gca from numpy import linspace from numpy.linalg.linalg import eigh from numpy import zeros, array, exp, arctan2, sqrt import numpy class Visualise(object): def __init__(self): pass @staticmethod def get_plotting_arrays(distribution): bounds = distribution.get_plotting_bounds() assert(len(bounds) == 2) Xs = linspace(bounds[0][0], bounds[0][1]) Ys = linspace(bounds[1][0], bounds[1][1]) return Xs, Ys @staticmethod def visualise_distribution(distribution, Z=None, log_density=False, Xs=None, Ys=None): """ Plots the density of a given Distribution instance and plots some samples on top. """ if Xs is None or Ys is None: Xs, Ys = Visualise.get_plotting_arrays(distribution) Visualise.plot_density(distribution, Xs, Ys) if Z is not None: hold(True) Visualise.plot_data(Z) hold(False) @staticmethod def plot_density(distribution, Xs, Ys, log_domain=False): """ Plots a 2D density density - density - distribution instance to plot Xs - x values the density is evaluated at Ys - y values the density is evaluated at log_domain - if False, density will be put into exponential function """ assert(distribution.dimension == 2) D = zeros((len(Xs), len(Ys))) # compute log-density for i in range(len(Xs)): for j in range(len(Ys)): x = array([[Xs[i], Ys[j]]]) D[j, i] = distribution.log_pdf(x) if log_domain == False: D = exp(D) im = imshow(D, origin='lower') im.set_extent([Xs.min(), Xs.max(), Ys.min(), Ys.max()]) im.set_interpolation('nearest') im.set_cmap('gray') ylim([Ys.min(), Ys.max()]) xlim([Xs.min(), Xs.max()]) @staticmethod def contour_plot_density(distribution, Xs=None, Ys=None, log_domain=False): """ Contour-plots a 2D density. If Gaussian, plots 1.96 interval contour only density - distribution instance to plot Xs - x values the density is evaluated at Ys - y values the density is evaluated at log_domain - if False, density will be put into exponential function """ if isinstance(distribution, Gaussian) and log_domain == False: gca().add_artist(Visualise.get_gaussian_ellipse_artist(distribution)) gca().plot(distribution.mu[0], distribution.mu[1], 'r*', \ markersize=3.0, markeredgewidth=.1) return assert(distribution.dimension == 2) if Xs is None: (xmin, xmax), _ = distribution.get_plotting_bounds() Xs = linspace(xmin, xmax) if Ys is None: _, (ymin, ymax) = distribution.get_plotting_bounds() Ys = linspace(ymin, ymax) D = zeros((len(Ys), len(Xs))) # compute log-density for i in range(len(Xs)): for j in range(len(Ys)): x = array([[Xs[i], Ys[j]]]) D[j, i] = distribution.log_pdf(x) if log_domain == False: D = exp(D) contour(Xs, Ys, D, origin='lower') @staticmethod def plot_array(Xs, Ys, D): """ Plots a 2D array Xs - x values the density is evaluated at Ys - y values the density is evaluated at D - array to plot """ im = imshow(D, origin='lower') im.set_extent([Xs.min(), Xs.max(), Ys.min(), Ys.max()]) im.set_interpolation('nearest') im.set_cmap('gray') ylim([Ys.min(), Ys.max()]) xlim([Xs.min(), Xs.max()]) @staticmethod def plot_data(Z, y=None): """ Plots collection of 2D points and optionally adds a marker to one of them Z - set of row-vectors points to plot y - one point that is marked in red, might be None """ plot(Z[:, 0], Z[:, 1], '*', markersize=3.0, markeredgewidth=.1) if y is not None: plot(y[0, 0], y[0, 1], 'r*', markersize=10.0, markeredgewidth=.1) @staticmethod def get_gaussian_ellipse_artist(gaussian, nstd=1.96, linewidth=1): """ Returns an allipse artist for nstd times the standard deviation of this Gaussian """ assert(isinstance(gaussian, Gaussian)) assert(gaussian.dimension == 2) # compute eigenvalues (ordered) vals, vecs = eigh(gaussian.L.dot(gaussian.L.T)) order = vals.argsort()[::-1] vals, vecs = vals[order], vecs[:, order] theta = numpy.degrees(arctan2(*vecs[:, 0][::-1])) # width and height are "full" widths, not radius width, height = 2 * nstd * sqrt(vals) e = Ellipse(xy=gaussian.mu, width=width, height=height, angle=theta, \ edgecolor="red", fill=False, linewidth=linewidth) return e
bsd-2-clause
cyberphox/MissionPlanner
Lib/site-packages/numpy/distutils/command/build.py
99
1255
import os import sys from distutils.command.build import build as old_build from distutils.util import get_platform from numpy.distutils.command.config_compiler import show_fortran_compilers class build(old_build): sub_commands = [('config_cc', lambda *args: True), ('config_fc', lambda *args: True), ('build_src', old_build.has_ext_modules), ] + old_build.sub_commands user_options = old_build.user_options + [ ('fcompiler=', None, "specify the Fortran compiler type"), ] help_options = old_build.help_options + [ ('help-fcompiler',None, "list available Fortran compilers", show_fortran_compilers), ] def initialize_options(self): old_build.initialize_options(self) self.fcompiler = None def finalize_options(self): build_scripts = self.build_scripts old_build.finalize_options(self) plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3]) if build_scripts is None: self.build_scripts = os.path.join(self.build_base, 'scripts' + plat_specifier) def run(self): old_build.run(self)
gpl-3.0
santisiri/popego
envs/ALPHA-POPEGO/lib/python2.5/site-packages/numpy-1.0.4-py2.5-linux-x86_64.egg/numpy/add_newdocs.py
1
44163
from lib import add_newdoc add_newdoc('numpy.core','dtype', [('fields', "Fields of the data-type or None if no fields"), ('names', "Names of fields or None if no fields"), ('alignment', "Needed alignment for this data-type"), ('byteorder', "Little-endian (<), big-endian (>), native (=), or "\ "not-applicable (|)"), ('char', "Letter typecode for this data-type"), ('type', "Type object associated with this data-type"), ('kind', "Character giving type-family of this data-type"), ('itemsize', "Size of each item"), ('hasobject', "Non-zero if Python objects are in "\ "this data-type"), ('num', "Internally-used number for builtin base"), ('newbyteorder', """self.newbyteorder(<endian>) returns a copy of the dtype object with altered byteorders. If <endian> is not given all byteorders are swapped. Otherwise endian can be '>', '<', or '=' to force a particular byteorder. Data-types in all fields are also updated in the new dtype object. """), ("__reduce__", "self.__reduce__() for pickling"), ("__setstate__", "self.__setstate__() for pickling"), ("subdtype", "A tuple of (descr, shape) or None"), ("descr", "The array_interface data-type descriptor."), ("str", "The array interface typestring."), ("name", "The name of the true data-type"), ("base", "The base data-type or self if no subdtype"), ("shape", "The shape of the subdtype or (1,)"), ("isbuiltin", "Is this a built-in data-type?"), ("isnative", "Is the byte-order of this data-type native?") ] ) ############################################################################### # # flatiter # # flatiter needs a toplevel description # ############################################################################### # attributes add_newdoc('numpy.core', 'flatiter', ('base', """documentation needed """)) add_newdoc('numpy.core', 'flatiter', ('coords', """An N-d tuple of current coordinates. """)) add_newdoc('numpy.core', 'flatiter', ('index', """documentation needed """)) # functions add_newdoc('numpy.core', 'flatiter', ('__array__', """__array__(type=None) Get array from iterator """)) add_newdoc('numpy.core', 'flatiter', ('copy', """copy() Get a copy of the iterator as a 1-d array """)) ############################################################################### # # broadcast # ############################################################################### # attributes add_newdoc('numpy.core', 'broadcast', ('index', """current index in broadcasted result """)) add_newdoc('numpy.core', 'broadcast', ('iters', """tuple of individual iterators """)) add_newdoc('numpy.core', 'broadcast', ('nd', """number of dimensions of broadcasted result """)) add_newdoc('numpy.core', 'broadcast', ('numiter', """number of iterators """)) add_newdoc('numpy.core', 'broadcast', ('shape', """shape of broadcasted result """)) add_newdoc('numpy.core', 'broadcast', ('size', """total size of broadcasted result """)) ############################################################################### # # numpy functions # ############################################################################### add_newdoc('numpy.core.multiarray','array', """array(object, dtype=None, copy=1,order=None, subok=0,ndmin=0) Return an array from object with the specified date-type. Inputs: object - an array, any object exposing the array interface, any object whose __array__ method returns an array, or any (nested) sequence. dtype - The desired data-type for the array. If not given, then the type will be determined as the minimum type required to hold the objects in the sequence. This argument can only be used to 'upcast' the array. For downcasting, use the .astype(t) method. copy - If true, then force a copy. Otherwise a copy will only occur if __array__ returns a copy, obj is a nested sequence, or a copy is needed to satisfy any of the other requirements order - Specify the order of the array. If order is 'C', then the array will be in C-contiguous order (last-index varies the fastest). If order is 'FORTRAN', then the returned array will be in Fortran-contiguous order (first-index varies the fastest). If order is None, then the returned array may be in either C-, or Fortran-contiguous order or even discontiguous. subok - If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array ndmin - Specifies the minimum number of dimensions that the resulting array should have. 1's will be pre-pended to the shape as needed to meet this requirement. """) add_newdoc('numpy.core.multiarray','empty', """empty((d1,...,dn),dtype=float,order='C') Return a new array of shape (d1,...,dn) and given type with all its entries uninitialized. This can be faster than zeros. """) add_newdoc('numpy.core.multiarray','scalar', """scalar(dtype,obj) Return a new scalar array of the given type initialized with obj. Mainly for pickle support. The dtype must be a valid data-type descriptor. If dtype corresponds to an OBJECT descriptor, then obj can be any object, otherwise obj must be a string. If obj is not given it will be interpreted as None for object type and zeros for all other types. """) add_newdoc('numpy.core.multiarray','zeros', """zeros((d1,...,dn),dtype=float,order='C') Return a new array of shape (d1,...,dn) and type typecode with all it's entries initialized to zero. """) add_newdoc('numpy.core.multiarray','set_typeDict', """set_typeDict(dict) Set the internal dictionary that can look up an array type using a registered code. """) add_newdoc('numpy.core.multiarray','fromstring', """fromstring(string, dtype=float, count=-1, sep='') Return a new 1d array initialized from the raw binary data in string. If count is positive, the new array will have count elements, otherwise its size is determined by the size of string. If sep is not empty then the string is interpreted in ASCII mode and converted to the desired number type using sep as the separator between elements (extra whitespace is ignored). """) add_newdoc('numpy.core.multiarray','fromiter', """fromiter(iterable, dtype, count=-1) Return a new 1d array initialized from iterable. If count is nonegative, the new array will have count elements, otherwise it's size is determined by the generator. """) add_newdoc('numpy.core.multiarray','fromfile', """fromfile(file=, dtype=float, count=-1, sep='') -> array. Required arguments: file -- open file object or string containing file name. Keyword arguments: dtype -- type and order of the returned array (default float) count -- number of items to input (default all) sep -- separater between items if file is a text file (default "") Return an array of the given data type from a text or binary file. The 'file' argument can be an open file or a string with the name of a file to read from. If 'count' == -1 the entire file is read, otherwise count is the number of items of the given type to read in. If 'sep' is "" it means to read binary data from the file using the specified dtype, otherwise it gives the separator between elements in a text file. The 'dtype' value is also used to determine the size and order of the items in binary files. Data written using the tofile() method can be conveniently recovered using this function. WARNING: This function should be used sparingly as the binary files are not platform independent. In particular, they contain no endianess or datatype information. Nevertheless it can be useful for reading in simply formatted or binary data quickly. """) add_newdoc('numpy.core.multiarray','frombuffer', """frombuffer(buffer=, dtype=float, count=-1, offset=0) Returns a 1-d array of data type dtype from buffer. The buffer argument must be an object that exposes the buffer interface. If count is -1 then the entire buffer is used, otherwise, count is the size of the output. If offset is given then jump that far into the buffer. If the buffer has data that is out not in machine byte-order, than use a propert data type descriptor. The data will not be byteswapped, but the array will manage it in future operations. """) add_newdoc('numpy.core.multiarray','concatenate', """concatenate((a1, a2, ...), axis=0) Join arrays together. The tuple of sequences (a1, a2, ...) are joined along the given axis (default is the first one) into a single numpy array. Example: >>> concatenate( ([0,1,2], [5,6,7]) ) array([0, 1, 2, 5, 6, 7]) """) add_newdoc('numpy.core.multiarray','inner', """inner(a,b) Returns the dot product of two arrays, which has shape a.shape[:-1] + b.shape[:-1] with elements computed by the product of the elements from the last dimensions of a and b. """) add_newdoc('numpy.core','fastCopyAndTranspose', """_fastCopyAndTranspose(a)""") add_newdoc('numpy.core.multiarray','correlate', """cross_correlate(a,v, mode=0)""") add_newdoc('numpy.core.multiarray','arange', """arange([start,] stop[, step,], dtype=None) For integer arguments, just like range() except it returns an array whose type can be specified by the keyword argument dtype. If dtype is not specified, the type of the result is deduced from the type of the arguments. For floating point arguments, the length of the result is ceil((stop - start)/step). This rule may result in the last element of the result being greater than stop. """) add_newdoc('numpy.core.multiarray','_get_ndarray_c_version', """_get_ndarray_c_version() Return the compile time NDARRAY_VERSION number. """) add_newdoc('numpy.core.multiarray','_reconstruct', """_reconstruct(subtype, shape, dtype) Construct an empty array. Used by Pickles. """) add_newdoc('numpy.core.multiarray','set_string_function', """set_string_function(f, repr=1) Set the python function f to be the function used to obtain a pretty printable string version of an array whenever an array is printed. f(M) should expect an array argument M, and should return a string consisting of the desired representation of M for printing. """) add_newdoc('numpy.core.multiarray','set_numeric_ops', """set_numeric_ops(op=func, ...) Set some or all of the number methods for all array objects. Do not forget **dict can be used as the argument list. Return the functions that were replaced, which can be stored and set later. """) add_newdoc('numpy.core.multiarray','where', """where(condition, x, y) or where(condition) Return elements from `x` or `y`, depending on `condition`. *Parameters*: condition : array of bool When True, yield x, otherwise yield y. x,y : 1-dimensional arrays Values from which to choose. *Notes* This is equivalent to [xv if c else yv for (c,xv,yv) in zip(condition,x,y)] The result is shaped like `condition` and has elements of `x` or `y` where `condition` is respectively True or False. In the special case, where only `condition` is given, the tuple condition.nonzero() is returned, instead. *Examples* >>> where([True,False,True],[1,2,3],[4,5,6]) array([1, 5, 3]) """) add_newdoc('numpy.core.multiarray','lexsort', """lexsort(keys=, axis=-1) -> array of indices. Argsort with list of keys. Perform an indirect sort using a list of keys. The first key is sorted, then the second, and so on through the list of keys. At each step the previous order is preserved when equal keys are encountered. The result is a sort on multiple keys. If the keys represented columns of a spreadsheet, for example, this would sort using multiple columns (the last key being used for the primary sort order, the second-to-last key for the secondary sort order, and so on). The keys argument must be a sequence of things that can be converted to arrays of the same shape. Parameters: a : array type Array containing values that the returned indices should sort. axis : integer Axis to be indirectly sorted. None indicates that the flattened array should be used. Default is -1. Returns: indices : integer array Array of indices that sort the keys along the specified axis. The array has the same shape as the keys. SeeAlso: argsort : indirect sort sort : inplace sort """) add_newdoc('numpy.core.multiarray','can_cast', """can_cast(from=d1, to=d2) Returns True if data type d1 can be cast to data type d2 without losing precision. """) add_newdoc('numpy.core.multiarray','newbuffer', """newbuffer(size) Return a new uninitialized buffer object of size bytes """) add_newdoc('numpy.core.multiarray','getbuffer', """getbuffer(obj [,offset[, size]]) Create a buffer object from the given object referencing a slice of length size starting at offset. Default is the entire buffer. A read-write buffer is attempted followed by a read-only buffer. """) ############################################################################## # # Documentation for ndarray attributes and methods # ############################################################################## ############################################################################## # # ndarray object # ############################################################################## add_newdoc('numpy.core.multiarray', 'ndarray', """An array object represents a multidimensional, homogeneous array of fixed-size items. An associated data-type-descriptor object details the data-type in an array (including byteorder and any fields). An array can be constructed using the numpy.array command. Arrays are sequence, mapping and numeric objects. More information is available in the numpy module and by looking at the methods and attributes of an array. ndarray.__new__(subtype, shape=, dtype=float, buffer=None, offset=0, strides=None, order=None) There are two modes of creating an array using __new__: 1) If buffer is None, then only shape, dtype, and order are used 2) If buffer is an object exporting the buffer interface, then all keywords are interpreted. The dtype parameter can be any object that can be interpreted as a numpy.dtype object. No __init__ method is needed because the array is fully initialized after the __new__ method. """) ############################################################################## # # ndarray attributes # ############################################################################## add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__', """Array protocol: Python side.""")) add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__', """None.""")) add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__', """Array priority.""")) add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__', """Array protocol: C-struct side.""")) add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_', """Allow the array to be interpreted as a ctypes object by returning the data-memory location as an integer """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('base', """Base object if memory is from some other object. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes', """A ctypes interface object. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('data', """Buffer object pointing to the start of the data. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype', """Data-type for the array. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('imag', """Imaginary part of the array. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize', """Length of one element in bytes. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('flags', """Special object providing array flags. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('flat', """A 1-d flat iterator. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes', """Number of bytes in the array. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim', """Number of array dimensions. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('real', """Real part of the array. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('shape', """Tuple of array dimensions. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('size', """Number of elements in the array. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('strides', """Tuple of bytes to step in each dimension. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('T', """Same as self.transpose() except self is returned for self.ndim < 2. """)) ############################################################################## # # ndarray methods # ############################################################################## add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__', """ a.__array__(|dtype) -> reference if type unchanged, copy otherwise. Returns either a new reference to self if dtype is not given or a new array of provided data type if dtype is different from the current dtype of the array. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__', """a.__array_wrap__(obj) -> Object of same type as a from ndarray obj. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__', """a.__copy__(|order) -> copy, possibly with different order. Return a copy of the array. Argument: order -- Order of returned copy (default 'C') If order is 'C' (False) then the result is contiguous (default). If order is 'Fortran' (True) then the result has fortran order. If order is 'Any' (None) then the result has fortran order only if m is already in fortran order.; """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__', """a.__deepcopy__() -> Deep copy of array. Used if copy.deepcopy is called on an array. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__', """a.__reduce__() For pickling. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__', """a.__setstate__(version, shape, typecode, isfortran, rawdata) For unpickling. Arguments: version -- optional pickle version. If omitted defaults to 0. shape -- a tuple giving the shape typecode -- a typecode isFortran -- a bool stating if Fortran or no rawdata -- a binary string with the data (or a list if Object array) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('all', """ a.all(axis=None) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('any', """ a.any(axis=None, out=None) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax', """ a.argmax(axis=None, out=None) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin', """ a.argmin(axis=None, out=None) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort', """a.argsort(axis=-1, kind='quicksort', order=None) -> indices Perform an indirect sort along the given axis using the algorithm specified by the kind keyword. It returns an array of indices of the same shape as 'a' that index data along the given axis in sorted order. :Parameters: axis : integer Axis to be indirectly sorted. None indicates that the flattened array should be used. Default is -1. kind : string Sorting algorithm to use. Possible values are 'quicksort', 'mergesort', or 'heapsort'. Default is 'quicksort'. order : list type or None When a is an array with fields defined, this argument specifies which fields to compare first, second, etc. Not all fields need be specified. :Returns: indices : integer array Array of indices that sort 'a' along the specified axis. :SeeAlso: - lexsort : indirect stable sort with multiple keys - sort : inplace sort :Notes: ------ The various sorts are characterized by average speed, worst case performance, need for work space, and whether they are stable. A stable sort keeps items with the same key in the same relative order. The three available algorithms have the following properties: |------------------------------------------------------| | kind | speed | worst case | work space | stable| |------------------------------------------------------| |'quicksort'| 1 | O(n^2) | 0 | no | |'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | |'heapsort' | 3 | O(n*log(n)) | 0 | no | |------------------------------------------------------| All the sort algorithms make temporary copies of the data when the sort is not along the last axis. Consequently, sorts along the last axis are faster and use less space than sorts along other axis. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('astype', """a.astype(t) -> Copy of array cast to type t. Cast array m to type t. t can be either a string representing a typecode, or a python type object of type int, float, or complex. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap', """a.byteswap(False) -> View or copy. Swap the bytes in the array. Swap the bytes in the array. Return the byteswapped array. If the first argument is True, byteswap in-place and return a reference to self. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('choose', """ a.choose(b0, b1, ..., bn, out=None, mode='raise') Return an array that merges the b_i arrays together using 'a' as the index The b_i arrays and 'a' must all be broadcastable to the same shape. The output at a particular position is the input array b_i at that position depending on the value of 'a' at that position. Therefore, 'a' must be an integer array with entries from 0 to n+1.; """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('clip', """a.clip(min=, max=, out=None) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('compress', """a.compress(condition=, axis=None, out=None) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('conj', """a.conj() """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate', """a.conjugate() """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('copy', """a.copy(|order) -> copy, possibly with different order. Return a copy of the array. Argument: order -- Order of returned copy (default 'C') If order is 'C' (False) then the result is contiguous (default). If order is 'Fortran' (True) then the result has fortran order. If order is 'Any' (None) then the result has fortran order only if m is already in fortran order.; """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod', """a.cumprod(axis=None, dtype=None) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum', """a.cumsum(axis=None, dtype=None, out=None) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal', """a.diagonal(offset=0, axis1=0, axis2=1) -> diagonals If a is 2-d, return the diagonal of self with the given offset, i.e., the collection of elements of the form a[i,i+offset]. If a is n-d with n > 2, then the axes specified by axis1 and axis2 are used to determine the 2-d subarray whose diagonal is returned. The shape of the resulting array can be determined by removing axis1 and axis2 and appending an index to the right equal to the size of the resulting diagonals. :Parameters: offset : integer Offset of the diagonal from the main diagonal. Can be both positive and negative. Defaults to main diagonal. axis1 : integer Axis to be used as the first axis of the 2-d subarrays from which the diagonals should be taken. Defaults to first index. axis2 : integer Axis to be used as the second axis of the 2-d subarrays from which the diagonals should be taken. Defaults to second index. :Returns: array_of_diagonals : same type as original array If a is 2-d, then a 1-d array containing the diagonal is returned. If a is n-d, n > 2, then an array of diagonals is returned. :SeeAlso: - diag : matlab workalike for 1-d and 2-d arrays. - diagflat : creates diagonal arrays - trace : sum along diagonals Examples -------- >>> a = arange(4).reshape(2,2) >>> a array([[0, 1], [2, 3]]) >>> a.diagonal() array([0, 3]) >>> a.diagonal(1) array([1]) >>> a = arange(8).reshape(2,2,2) >>> a array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) >>> a.diagonal(0,-2,-1) array([[0, 3], [4, 7]]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('dump', """a.dump(file) Dump a pickle of the array to the specified file. The array can be read back with pickle.load or numpy.load Arguments: file -- string naming the dump file. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps', """a.dumps() returns the pickle of the array as a string. pickle.loads or numpy.loads will convert the string back to an array. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('fill', """a.fill(value) -> None. Fill the array with the scalar value. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten', """a.flatten([fortran]) return a 1-d array (always copy) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield', """a.getfield(dtype, offset) -> field of array as given type. Returns a field of the given array as a certain type. A field is a view of the array data with each itemsize determined by the given type and the offset into the current array. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('item', """a.item() -> copy of first array item as Python scalar. Copy the first element of array to a standard Python scalar and return it. The array must be of size one. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('max', """a.max(axis=None) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('mean', """a.mean(axis=None, dtype=None, out=None) -> mean Returns the average of the array elements. The average is taken over the flattened array by default, otherwise over the specified axis. :Parameters: axis : integer Axis along which the means are computed. The default is to compute the standard deviation of the flattened array. dtype : type Type to use in computing the means. For arrays of integer type the default is float32, for arrays of float types it is the same as the array type. out : ndarray Alternative output array in which to place the result. It must have the same shape as the expected output but the type will be cast if necessary. :Returns: mean : The return type varies, see above. A new array holding the result is returned unless out is specified, in which case a reference to out is returned. :SeeAlso: - var : variance - std : standard deviation Notes ----- The mean is the sum of the elements along the axis divided by the number of elements. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('min', """a.min(axis=None) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder', """a.newbyteorder(<byteorder>) is equivalent to a.view(a.dtype.newbytorder(<byteorder>)) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero', """a.nonzero() returns a tuple of arrays Returns a tuple of arrays, one for each dimension of a, containing the indices of the non-zero elements in that dimension. The corresponding non-zero values can be obtained with a[a.nonzero()]. To group the indices by element, rather than dimension, use transpose(a.nonzero()) instead. The result of this is always a 2d array, with a row for each non-zero element.; """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('prod', """a.prod(axis=None, dtype=None) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp', """a.ptp(axis=None) a.max(axis)-a.min(axis) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('put', """a.put(indices, values, mode) sets a.flat[n] = values[n] for each n in indices. If values is shorter than indices then it will repeat. """)) add_newdoc('numpy.core.multiarray', 'putmask', """putmask(a, mask, values) sets a.flat[n] = values[n] for each n where mask.flat[n] is true. If values is not the same size of a and mask then it will repeat. This gives different behavior than a[mask] = values. """) add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel', """a.ravel([fortran]) return a 1-d array (copy only if needed) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat', """a.repeat(repeats=, axis=none) copy elements of a, repeats times. the repeats argument must be a sequence of length a.shape[axis] or a scalar. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape', """a.reshape(d1, d2, ..., dn, order='c') Return a new array from this one. The new array must have the same number of elements as self. Also always returns a view or raises a ValueError if that is impossible. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('resize', """a.resize(new_shape, refcheck=True, order=False) -> None. Change array shape. Change size and shape of self inplace. Array must own its own memory and not be referenced by other arrays. Returns None. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('round', """a.round(decimals=0, out=None) -> out (a). Rounds to 'decimals' places. Keyword arguments: decimals -- number of decimals to round to (default 0). May be negative. out -- existing array to use for output (default a). Return: Reference to out, where None specifies the original array a. Round to the specified number of decimals. When 'decimals' is negative it specifies the number of positions to the left of the decimal point. The real and imaginary parts of complex numbers are rounded separately. Nothing is done if the array is not of float type and 'decimals' is >= 0. The keyword 'out' may be used to specify a different array to hold the result rather than the default 'a'. If the type of the array specified by 'out' differs from that of 'a', the result is cast to the new type, otherwise the original type is kept. Floats round to floats by default. Numpy rounds to even. Thus 1.5 and 2.5 round to 2.0, -0.5 and 0.5 round to 0.0, etc. Results may also be surprising due to the inexact representation of decimal fractions in IEEE floating point and the errors introduced in scaling the numbers when 'decimals' is something other than 0. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted', """a.searchsorted(v, side='left') -> index array. Find the indices into a sorted array such that if the corresponding keys in v were inserted before the indices the order of a would be preserved. If side='left', then the first such index is returned. If side='right', then the last such index is returned. If there is no such index because the key is out of bounds, then the length of a is returned, i.e., the key would need to be appended. The returned index array has the same shape as v. :Parameters: v : array or list type Array of keys to be searched for in a. side : string Possible values are : 'left', 'right'. Default is 'left'. Return the first or last index where the key could be inserted. :Returns: indices : integer array The returned array has the same shape as v. :SeeAlso: - sort - histogram :Notes: ------- The array a must be 1-d and is assumed to be sorted in ascending order. Searchsorted uses binary search to find the required insertion points. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield', """m.setfield(value, dtype, offset) -> None. places val into field of the given array defined by the data type and offset. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags', """a.setflags(write=None, align=None, uic=None) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('sort', """a.sort(axis=-1, kind='quicksort', order=None) -> None. Perform an inplace sort along the given axis using the algorithm specified by the kind keyword. :Parameters: axis : integer Axis to be sorted along. None indicates that the flattened array should be used. Default is -1. kind : string Sorting algorithm to use. Possible values are 'quicksort', 'mergesort', or 'heapsort'. Default is 'quicksort'. order : list type or None When a is an array with fields defined, this argument specifies which fields to compare first, second, etc. Not all fields need be specified. :Returns: None :SeeAlso: - argsort : indirect sort - lexsort : indirect stable sort on multiple keys - searchsorted : find keys in sorted array :Notes: ------ The various sorts are characterized by average speed, worst case performance, need for work space, and whether they are stable. A stable sort keeps items with the same key in the same relative order. The three available algorithms have the following properties: |------------------------------------------------------| | kind | speed | worst case | work space | stable| |------------------------------------------------------| |'quicksort'| 1 | O(n^2) | 0 | no | |'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | |'heapsort' | 3 | O(n*log(n)) | 0 | no | |------------------------------------------------------| All the sort algorithms make temporary copies of the data when the sort is not along the last axis. Consequently, sorts along the last axis are faster and use less space than sorts along other axis. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze', """m.squeeze() eliminate all length-1 dimensions """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('std', """a.std(axis=None, dtype=None, out=None) -> standard deviation. Returns the standard deviation of the array elements, a measure of the spread of a distribution. The standard deviation is computed for the flattened array by default, otherwise over the specified axis. :Parameters: axis : integer Axis along which the standard deviation is computed. The default is to compute the standard deviation of the flattened array. dtype : type Type to use in computing the standard deviation. For arrays of integer type the default is float32, for arrays of float types it is the same as the array type. out : ndarray Alternative output array in which to place the result. It must have the same shape as the expected output but the type will be cast if necessary. :Returns: standard deviation : The return type varies, see above. A new array holding the result is returned unless out is specified, in which case a reference to out is returned. :SeeAlso: - var : variance - mean : average Notes ----- The standard deviation is the square root of the average of the squared deviations from the mean, i.e. var = sqrt(mean((x - x.mean())**2)). The computed standard deviation is biased, i.e., the mean is computed by dividing by the number of elements, N, rather than by N-1. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('sum', """a.sum(axis=None, dtype=None) -> Sum of array over given axis. Sum the array over the given axis. If the axis is None, sum over all dimensions of the array. The optional dtype argument is the data type for the returned value and intermediate calculations. The default is to upcast (promote) smaller integer types to the platform-dependent int. For example, on 32-bit platforms: a.dtype default sum dtype --------------------------------------------------- bool, int8, int16, int32 int32 Warning: The arithmetic is modular and no error is raised on overflow. Examples: >>> array([0.5, 1.5]).sum() 2.0 >>> array([0.5, 1.5]).sum(dtype=int32) 1 >>> array([[0, 1], [0, 5]]).sum(axis=0) array([0, 6]) >>> array([[0, 1], [0, 5]]).sum(axis=1) array([1, 5]) >>> ones(128, dtype=int8).sum(dtype=int8) # overflow! -128 """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes', """a.swapaxes(axis1, axis2) -> new view with axes swapped. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('take', """a.take(indices, axis=None, out=None, mode='raise') -> new array. The new array is formed from the elements of a indexed by indices along the given axis. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile', """a.tofile(fid, sep="", format="%s") -> None. Write the data to a file. Required arguments: file -- an open file object or a string containing a filename Keyword arguments: sep -- separator for text output. Write binary if empty (default "") format -- format string for text file output (default "%s") A convenience function for quick storage of array data. Information on endianess and precision is lost, so this method is not a good choice for files intended to archive data or transport data between machines with different endianess. Some of these problems can be overcome by outputting the data as text files at the expense of speed and file size. If 'sep' is empty this method is equivalent to file.write(a.tostring()). If 'sep' is not empty each data item is converted to the nearest Python type and formatted using "format"%item. The resulting strings are written to the file separated by the contents of 'sep'. The data is always written in "C" (row major) order independent of the order of 'a'. The data produced by this method can be recovered by using the function fromfile(). """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist', """a.tolist() -> Array as hierarchical list. Copy the data portion of the array to a hierarchical python list and return that list. Data items are converted to the nearest compatible Python type. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('tostring', """a.tostring(order='C') -> raw copy of array data as a Python string. Keyword arguments: order -- order of the data item in the copy {"C","F","A"} (default "C") Construct a Python string containing the raw bytes in the array. The order of the data in arrays with ndim > 1 is specified by the 'order' keyword and this keyword overrides the order of the array. The choices are: "C" -- C order (row major) "Fortran" -- Fortran order (column major) "Any" -- Current order of array. None -- Same as "Any" """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('trace', """a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) return the sum along the offset diagonal of the array's indicated axis1 and axis2. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose', """a.transpose(*axes) Returns a view of 'a' with axes transposed. If no axes are given, or None is passed, switches the order of the axes. For a 2-d array, this is the usual matrix transpose. If axes are given, they describe how the axes are permuted. Example: >>> a = array([[1,2],[3,4]]) >>> a array([[1, 2], [3, 4]]) >>> a.transpose() array([[1, 3], [2, 4]]) >>> a.transpose((1,0)) array([[1, 3], [2, 4]]) >>> a.transpose(1,0) array([[1, 3], [2, 4]]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('var', """a.var(axis=None, dtype=None, out=None) -> variance Returns the variance of the array elements, a measure of the spread of a distribution. The variance is computed for the flattened array by default, otherwise over the specified axis. :Parameters: axis : integer Axis along which the variance is computed. The default is to compute the variance of the flattened array. dtype : type Type to use in computing the variance. For arrays of integer type the default is float32, for arrays of float types it is the same as the array type. out : ndarray Alternative output array in which to place the result. It must have the same shape as the expected output but the type will be cast if necessary. :Returns: variance : The return type varies, see above. A new array holding the result is returned unless out is specified, in which case a reference to out is returned. :SeeAlso: - std : standard deviation - mean: average Notes ----- The variance is the average of the squared deviations from the mean, i.e. var = mean((x - x.mean())**2). The computed variance is biased, i.e., the mean is computed by dividing by the number of elements, N, rather than by N-1. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('view', """a.view(<type>) -> new view of array with same data. Type can be either a new sub-type object or a data-descriptor object """))
bsd-3-clause
jmesteve/openerpseda
openerp/addons/process/process.py
50
16137
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import pooler from openerp import tools from openerp.osv import fields, osv class Env(dict): def __init__(self, obj, user): self.__obj = obj self.__usr = user def __getitem__(self, name): if name in ('__obj', '__user'): return super(Env, self).__getitem__(name) if name == 'user': return self.__user if name == 'object': return self.__obj return self.__obj[name] class process_process(osv.osv): _name = "process.process" _description = "Process" _columns = { 'name': fields.char('Name', size=30,required=True, translate=True), 'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the process without removing it."), 'model_id': fields.many2one('ir.model', 'Object', ondelete='set null'), 'note': fields.text('Notes', translate=True), 'node_ids': fields.one2many('process.node', 'process_id', 'Nodes') } _defaults = { 'active' : lambda *a: True, } def search_by_model(self, cr, uid, res_model, context=None): pool = pooler.get_pool(cr.dbname) model_ids = (res_model or None) and pool.get('ir.model').search(cr, uid, [('model', '=', res_model)]) domain = (model_ids or []) and [('model_id', 'in', model_ids)] result = [] # search all processes res = pool.get('process.process').search(cr, uid, domain) if res: res = pool.get('process.process').browse(cr, uid, res, context=context) for process in res: result.append((process.id, process.name)) return result # else search process nodes res = pool.get('process.node').search(cr, uid, domain) if res: res = pool.get('process.node').browse(cr, uid, res, context=context) for node in res: if (node.process_id.id, node.process_id.name) not in result: result.append((node.process_id.id, node.process_id.name)) return result def graph_get(self, cr, uid, id, res_model, res_id, scale, context=None): pool = pooler.get_pool(cr.dbname) process = pool.get('process.process').browse(cr, uid, id, context=context) name = process.name resource = False state = 'N/A' expr_context = {} states = {} perm = False if res_model: states = dict(pool.get(res_model).fields_get(cr, uid, context=context).get('state', {}).get('selection', {})) if res_id: current_object = pool.get(res_model).browse(cr, uid, res_id, context=context) current_user = pool.get('res.users').browse(cr, uid, uid, context=context) expr_context = Env(current_object, current_user) resource = current_object.name if 'state' in current_object: state = states.get(current_object.state, 'N/A') perm = pool.get(res_model).perm_read(cr, uid, [res_id], context=context)[0] notes = process.note or "N/A" nodes = {} start = [] transitions = {} for node in process.node_ids: data = {} data['name'] = node.name data['model'] = (node.model_id or None) and node.model_id.model data['kind'] = node.kind data['subflow'] = (node.subflow_id or False) and [node.subflow_id.id, node.subflow_id.name] data['notes'] = node.note data['active'] = False data['gray'] = False data['url'] = node.help_url data['model_states'] = node.model_states # get assosiated workflow if data['model']: wkf_ids = self.pool.get('workflow').search(cr, uid, [('osv', '=', data['model'])]) data['workflow'] = (wkf_ids or False) and wkf_ids[0] if 'directory_id' in node and node.directory_id: data['directory_id'] = node.directory_id.id data['directory'] = self.pool.get('document.directory').get_resource_path(cr, uid, data['directory_id'], data['model'], False) if node.menu_id: data['menu'] = {'name': node.menu_id.complete_name, 'id': node.menu_id.id} try: gray = True for cond in node.condition_ids: if cond.model_id and cond.model_id.model == res_model: gray = gray and eval(cond.model_states, expr_context) data['gray'] = not gray except: pass if not data['gray']: if node.model_id and node.model_id.model == res_model: try: data['active'] = eval(node.model_states, expr_context) except Exception: pass nodes[node.id] = data if node.flow_start: start.append(node.id) for tr in node.transition_out: data = {} data['name'] = tr.name data['source'] = tr.source_node_id.id data['target'] = tr.target_node_id.id data['notes'] = tr.note data['buttons'] = buttons = [] for b in tr.action_ids: button = {} button['name'] = b.name button['state'] = b.state button['action'] = b.action buttons.append(button) data['groups'] = groups = [] for r in tr.transition_ids: if r.group_id: groups.append({'name': r.group_id.name}) for r in tr.group_ids: groups.append({'name': r.name}) transitions[tr.id] = data # now populate resource information def update_relatives(nid, ref_id, ref_model): relatives = [] for dummy, tr in transitions.items(): if tr['source'] == nid: relatives.append(tr['target']) if tr['target'] == nid: relatives.append(tr['source']) if not ref_id: nodes[nid]['res'] = False return nodes[nid]['res'] = resource = {'id': ref_id, 'model': ref_model} refobj = pool.get(ref_model).browse(cr, uid, ref_id, context=context) fields = pool.get(ref_model).fields_get(cr, uid, context=context) # check for directory_id from inherited from document module if nodes[nid].get('directory_id', False): resource['directory'] = self.pool.get('document.directory').get_resource_path(cr, uid, nodes[nid]['directory_id'], ref_model, ref_id) resource['name'] = pool.get(ref_model).name_get(cr, uid, [ref_id], context=context)[0][1] resource['perm'] = pool.get(ref_model).perm_read(cr, uid, [ref_id], context=context)[0] ref_expr_context = Env(refobj, current_user) try: if not nodes[nid]['gray']: nodes[nid]['active'] = eval(nodes[nid]['model_states'], ref_expr_context) except: pass for r in relatives: node = nodes[r] if 'res' not in node: for n, f in fields.items(): if node['model'] == ref_model: update_relatives(r, ref_id, ref_model) elif f.get('relation') == node['model']: rel = refobj[n] if rel and isinstance(rel, list) : rel = rel[0] try: # XXX: rel has been reported as string (check it) _id = (rel or False) and rel.id _model = node['model'] update_relatives(r, _id, _model) except: pass if res_id: for nid, node in nodes.items(): if not node['gray'] and (node['active'] or node['model'] == res_model): update_relatives(nid, res_id, res_model) break # calculate graph layout g = tools.graph(nodes.keys(), map(lambda x: (x['source'], x['target']), transitions.values())) g.process(start) g.scale(*scale) #g.scale(100, 100, 180, 120) graph = g.result_get() # fix the height problem miny = -1 for k,v in nodes.items(): x = graph[k]['x'] y = graph[k]['y'] if miny == -1: miny = y miny = min(y, miny) v['x'] = x v['y'] = y for k, v in nodes.items(): y = v['y'] v['y'] = min(y - miny + 10, y) nodes = dict([str(n_key), n_val] for n_key, n_val in nodes.iteritems()) transitions = dict([str(t_key), t_val] for t_key, t_val in transitions.iteritems()) return dict(name=name, resource=resource, state=state, perm=perm, notes=notes, nodes=nodes, transitions=transitions) def copy(self, cr, uid, id, default=None, context=None): """ Deep copy the entire process. """ if not default: default = {} pool = pooler.get_pool(cr.dbname) process = pool.get('process.process').browse(cr, uid, id, context=context) nodes = {} transitions = {} # first copy all nodes and and map the new nodes with original for later use in transitions for node in process.node_ids: for t in node.transition_in: tr = transitions.setdefault(t.id, {}) tr['target'] = node.id for t in node.transition_out: tr = transitions.setdefault(t.id, {}) tr['source'] = node.id nodes[node.id] = pool.get('process.node').copy(cr, uid, node.id, context=context) # then copy transitions with new nodes for tid, tr in transitions.items(): vals = { 'source_node_id': nodes[tr['source']], 'target_node_id': nodes[tr['target']] } tr = pool.get('process.transition').copy(cr, uid, tid, default=vals, context=context) # and finally copy the process itself with new nodes default.update({ 'active': True, 'node_ids': [(6, 0, nodes.values())] }) return super(process_process, self).copy(cr, uid, id, default, context) process_process() class process_node(osv.osv): _name = 'process.node' _description ='Process Node' _columns = { 'name': fields.char('Name', size=30,required=True, translate=True), 'process_id': fields.many2one('process.process', 'Process', required=True, ondelete='cascade'), 'kind': fields.selection([('state','Status'), ('subflow','Subflow')], 'Kind of Node', required=True), 'menu_id': fields.many2one('ir.ui.menu', 'Related Menu'), 'note': fields.text('Notes', translate=True), 'model_id': fields.many2one('ir.model', 'Object', ondelete='set null'), 'model_states': fields.char('States Expression', size=128), 'subflow_id': fields.many2one('process.process', 'Subflow', ondelete='set null'), 'flow_start': fields.boolean('Starting Flow'), 'transition_in': fields.one2many('process.transition', 'target_node_id', 'Starting Transitions'), 'transition_out': fields.one2many('process.transition', 'source_node_id', 'Ending Transitions'), 'condition_ids': fields.one2many('process.condition', 'node_id', 'Conditions'), 'help_url': fields.char('Help URL', size=255) } _defaults = { 'kind': lambda *args: 'state', 'model_states': lambda *args: False, 'flow_start': lambda *args: False, } def copy_data(self, cr, uid, id, default=None, context=None): if not default: default = {} default.update({ 'transition_in': [], 'transition_out': [] }) return super(process_node, self).copy_data(cr, uid, id, default, context=context) process_node() class process_node_condition(osv.osv): _name = 'process.condition' _description = 'Condition' _columns = { 'name': fields.char('Name', size=30, required=True), 'node_id': fields.many2one('process.node', 'Node', required=True, ondelete='cascade'), 'model_id': fields.many2one('ir.model', 'Object', ondelete='set null'), 'model_states': fields.char('Expression', required=True, size=128) } process_node_condition() class process_transition(osv.osv): _name = 'process.transition' _description ='Process Transition' _columns = { 'name': fields.char('Name', size=32, required=True, translate=True), 'source_node_id': fields.many2one('process.node', 'Source Node', required=True, ondelete='cascade'), 'target_node_id': fields.many2one('process.node', 'Target Node', required=True, ondelete='cascade'), 'action_ids': fields.one2many('process.transition.action', 'transition_id', 'Buttons'), 'transition_ids': fields.many2many('workflow.transition', 'process_transition_ids', 'ptr_id', 'wtr_id', 'Workflow Transitions'), 'group_ids': fields.many2many('res.groups', 'process_transition_group_rel', 'tid', 'rid', string='Required Groups'), 'note': fields.text('Description', translate=True), } process_transition() class process_transition_action(osv.osv): _name = 'process.transition.action' _description ='Process Transitions Actions' _columns = { 'name': fields.char('Name', size=32, required=True, translate=True), 'state': fields.selection([('dummy','Dummy'), ('object','Object Method'), ('workflow','Workflow Trigger'), ('action','Action')], 'Type', required=True), 'action': fields.char('Action ID', size=64, states={ 'dummy':[('readonly',1)], 'object':[('required',1)], 'workflow':[('required',1)], 'action':[('required',1)], },), 'transition_id': fields.many2one('process.transition', 'Transition', required=True, ondelete='cascade') } _defaults = { 'state': lambda *args: 'dummy', } def copy_data(self, cr, uid, id, default=None, context=None): if not default: default = {} state = self.pool.get('process.transition.action').browse(cr, uid, id, context=context).state if state: default['state'] = state return super(process_transition_action, self).copy_data(cr, uid, id, default, context) process_transition_action() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
gunan/tensorflow
tensorflow/python/ops/resources.py
36
4444
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for using generic resources.""" # pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import os from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.util import tf_should_use _Resource = collections.namedtuple("_Resource", ["handle", "create", "is_initialized"]) def register_resource(handle, create_op, is_initialized_op, is_shared=True): """Registers a resource into the appropriate collections. This makes the resource findable in either the shared or local resources collection. Args: handle: op which returns a handle for the resource. create_op: op which initializes the resource. is_initialized_op: op which returns a scalar boolean tensor of whether the resource has been initialized. is_shared: if True, the resource gets added to the shared resource collection; otherwise it gets added to the local resource collection. """ resource = _Resource(handle, create_op, is_initialized_op) if is_shared: ops.add_to_collection(ops.GraphKeys.RESOURCES, resource) else: ops.add_to_collection(ops.GraphKeys.LOCAL_RESOURCES, resource) def shared_resources(): """Returns resources visible to all tasks in the cluster.""" return ops.get_collection(ops.GraphKeys.RESOURCES) def local_resources(): """Returns resources intended to be local to this session.""" return ops.get_collection(ops.GraphKeys.LOCAL_RESOURCES) def report_uninitialized_resources(resource_list=None, name="report_uninitialized_resources"): """Returns the names of all uninitialized resources in resource_list. If the returned tensor is empty then all resources have been initialized. Args: resource_list: resources to check. If None, will use shared_resources() + local_resources(). name: name for the resource-checking op. Returns: Tensor containing names of the handles of all resources which have not yet been initialized. """ if resource_list is None: resource_list = shared_resources() + local_resources() with ops.name_scope(name): # Run all operations on CPU local_device = os.environ.get( "TF_DEVICE_FOR_UNINITIALIZED_VARIABLE_REPORTING", "/cpu:0") with ops.device(local_device): if not resource_list: # Return an empty tensor so we only need to check for returned tensor # size being 0 as an indication of model ready. return array_ops.constant([], dtype=dtypes.string) # Get a 1-D boolean tensor listing whether each resource is initialized. variables_mask = math_ops.logical_not( array_ops.stack([r.is_initialized for r in resource_list])) # Get a 1-D string tensor containing all the resource names. variable_names_tensor = array_ops.constant( [s.handle.name for s in resource_list]) # Return a 1-D tensor containing all the names of uninitialized resources. return array_ops.boolean_mask(variable_names_tensor, variables_mask) @tf_should_use.should_use_result def initialize_resources(resource_list, name="init"): """Initializes the resources in the given list. Args: resource_list: list of resources to initialize. name: name of the initialization op. Returns: op responsible for initializing all resources. """ if resource_list: return control_flow_ops.group(*[r.create for r in resource_list], name=name) return control_flow_ops.no_op(name=name)
apache-2.0
hyperized/ansible
lib/ansible/modules/network/fortios/fortios_system_password_policy.py
13
12578
#!/usr/bin/python from __future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_system_password_policy short_description: Configure password policy for locally defined administrator passwords and IPsec VPN pre-shared keys in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the user to set and modify system feature and password_policy category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.5 version_added: "2.9" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate IP address. type: str required: false username: description: - FortiOS or FortiGate username. type: str required: false password: description: - FortiOS or FortiGate password. type: str default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. type: str default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol. type: bool default: true ssl_verify: description: - Ensures FortiGate certificate must be verified by a proper CA. type: bool default: true system_password_policy: description: - Configure password policy for locally defined administrator passwords and IPsec VPN pre-shared keys. default: null type: dict suboptions: apply_to: description: - Apply password policy to administrator passwords or IPsec pre-shared keys or both. Separate entries with a space. type: str choices: - admin-password - ipsec-preshared-key change_4_characters: description: - Enable/disable changing at least 4 characters for a new password (This attribute overrides reuse-password if both are enabled). type: str choices: - enable - disable expire_day: description: - Number of days after which passwords expire (1 - 999 days). type: int expire_status: description: - Enable/disable password expiration. type: str choices: - enable - disable min_lower_case_letter: description: - Minimum number of lowercase characters in password (0 - 128). type: int min_non_alphanumeric: description: - Minimum number of non-alphanumeric characters in password (0 - 128). type: int min_number: description: - Minimum number of numeric characters in password (0 - 128). type: int min_upper_case_letter: description: - Minimum number of uppercase characters in password (0 - 128). type: int minimum_length: description: - Minimum password length (8 - 128). type: int reuse_password: description: - Enable/disable reusing of password (if both reuse-password and change-4-characters are enabled, change-4-characters overrides). type: str choices: - enable - disable status: description: - Enable/disable setting a password policy for locally defined administrator passwords and IPsec VPN pre-shared keys. type: str choices: - enable - disable ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" ssl_verify: "False" tasks: - name: Configure password policy for locally defined administrator passwords and IPsec VPN pre-shared keys. fortios_system_password_policy: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" system_password_policy: apply_to: "admin-password" change_4_characters: "enable" expire_day: "5" expire_status: "enable" min_lower_case_letter: "7" min_non_alphanumeric: "8" min_number: "9" min_upper_case_letter: "10" minimum_length: "11" reuse_password: "enable" status: "enable" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.fortios.fortios import FortiOSHandler from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def login(data, fos): host = data['host'] username = data['username'] password = data['password'] ssl_verify = data['ssl_verify'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password, verify=ssl_verify) def filter_system_password_policy_data(json): option_list = ['apply_to', 'change_4_characters', 'expire_day', 'expire_status', 'min_lower_case_letter', 'min_non_alphanumeric', 'min_number', 'min_upper_case_letter', 'minimum_length', 'reuse_password', 'status'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def underscore_to_hyphen(data): if isinstance(data, list): for elem in data: elem = underscore_to_hyphen(elem) elif isinstance(data, dict): new_data = {} for k, v in data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data = new_data return data def system_password_policy(data, fos): vdom = data['vdom'] system_password_policy_data = data['system_password_policy'] filtered_data = underscore_to_hyphen(filter_system_password_policy_data(system_password_policy_data)) return fos.set('system', 'password-policy', data=filtered_data, vdom=vdom) def is_successful_status(status): return status['status'] == "success" or \ status['http_method'] == "DELETE" and status['http_status'] == 404 def fortios_system(data, fos): if data['system_password_policy']: resp = system_password_policy(data, fos) return not is_successful_status(resp), \ resp['status'] == "success", \ resp def main(): fields = { "host": {"required": False, "type": "str"}, "username": {"required": False, "type": "str"}, "password": {"required": False, "type": "str", "default": "", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "ssl_verify": {"required": False, "type": "bool", "default": True}, "system_password_policy": { "required": False, "type": "dict", "default": None, "options": { "apply_to": {"required": False, "type": "str", "choices": ["admin-password", "ipsec-preshared-key"]}, "change_4_characters": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "expire_day": {"required": False, "type": "int"}, "expire_status": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "min_lower_case_letter": {"required": False, "type": "int"}, "min_non_alphanumeric": {"required": False, "type": "int"}, "min_number": {"required": False, "type": "int"}, "min_upper_case_letter": {"required": False, "type": "int"}, "minimum_length": {"required": False, "type": "int"}, "reuse_password": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "status": {"required": False, "type": "str", "choices": ["enable", "disable"]} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) # legacy_mode refers to using fortiosapi instead of HTTPAPI legacy_mode = 'host' in module.params and module.params['host'] is not None and \ 'username' in module.params and module.params['username'] is not None and \ 'password' in module.params and module.params['password'] is not None if not legacy_mode: if module._socket_path: connection = Connection(module._socket_path) fos = FortiOSHandler(connection) is_error, has_changed, result = fortios_system(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG) else: try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") fos = FortiOSAPI() login(module.params, fos) is_error, has_changed, result = fortios_system(module.params, fos) fos.logout() if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
gpl-3.0
supertuxkart/stk-stats
userreport/models.py
2
9803
# This file is mainly for detecting the graphics used and storing these # information in string that are later used for the report import json import re import logging from django.db import models LOG = logging.getLogger(__name__) class UserReport(models.Model): uploader = models.GenericIPAddressField(editable=False) # Hex SHA-1 digest of user's reported ID # (The hashing means that publishing the database won't let people upload # faked reports under someone else's user ID, and also ensures a simple # consistent structure) user_id_hash = models.CharField(max_length=40, db_index=True, editable=False) # When the server received the upload upload_date = models.DateTimeField(auto_now_add=True, db_index=True, editable=False) # When the user claims to have generated the report generation_date = models.DateTimeField(editable=False) data_type = models.CharField(max_length=16, db_index=True, editable=False) data_version = models.IntegerField(editable=False) data = models.TextField(editable=False) def get_data_json(self, cache=True): """ Get the json data :param cache flag that indicates to cache the json :return json """ def get_json(data): try: return json.loads(data) except ValueError: LOG.warning("The data_json is invalid for id = %d" % self.id) return {} # Cache the json if cache and not hasattr(self, 'cached_json'): self.cached_json = get_json(self.data) return self.cached_json return get_json(self.data) def has_data(self): return bool(self.get_data_json()) def clear_cache(self): delattr(self, 'cached_json') def downcast(self): if self.data_type == 'hwdetect': return UserReport_hwdetect.objects.get(id=self.id) return self class UserReport_hwdetect(UserReport): pattern_device_identifier = re.compile( r'^(?:AMD |ATI |NVIDIA |Mesa DRI )?(.*?)\s*(?:GEM 20100328 2010Q1|GEM 20100330 DEVELOPMENT|GEM 20091221 2009Q4|20090101|Series)?\s*(?:x86|/AGP|/PCI|/MMX|/MMX\+|/SSE|/SSE2|/3DNOW!|/3DNow!|/3DNow!\+)*(?: TCL| NO-TCL)?(?: DRI2)?(?: \(Microsoft Corporation - WDDM\))?(?: OpenGL Engine)?\s*$') pattern_gl_version = re.compile(r'^(\d+\.\d+).*') pattern_gl_driver_mesa_git = re.compile(r'^OpenGL \d+\.\d+(?:\.\d+)? (Mesa \d+\.\d+)-devel \(git-([a-f0-9]+)') pattern_gl_driver_mesa_normal = re.compile(r'^OpenGL \d+\.\d+(?:\.\d+)? (Mesa .*)$') pattern_gl_driver_nvidia = re.compile(r'^OpenGL \d+\.\d+(?:\.\d+)? NVIDIA (.*)$') pattern_gl_driver_amd_direct = re.compile(r'^OpenGL (\d+\.\d+\.\d+) Compatibility Profile Context(?: FireGL)?$') pattern_gl_driver_amd_indirect = re.compile( r'^OpenGL 1\.4 \((\d+\.\d+\.\d+) Compatibility Profile Context(?: FireGL)?\)$') class Meta: proxy = True def get_os(self): """:return the operating system""" data_json = self.get_data_json() if data_json: if data_json.get('os_win'): return 'Windows' elif data_json.get('os_linux'): return 'Linux' elif data_json.get('os_macosx'): return 'OS X' elif data_json.get('os_unix'): return 'Other Unix' return 'Unknown' def gl_renderer(self): data_json = self.get_data_json() if 'GL_RENDERER' not in data_json: return "" # The renderer string should typically be interpreted as UTF-8 try: return data_json['GL_RENDERER'].encode('iso-8859-1').decode('utf-8').strip() except UnicodeError: return data_json['GL_RENDERER'].strip() def gl_extensions(self): data_json = self.get_data_json() if 'GL_EXTENSIONS' not in data_json: LOG.warning("The GL_EXTENSIONS does not exist for id = %d" % self.id) return None values = re.split(r'\s+', data_json['GL_EXTENSIONS']) # skip empty strings (e.g. no extensions at all, or leading/trailing space) return frozenset(v for v in values if v) def gl_limits(self): data_json = self.get_data_json() limits = {} for (k, v) in data_json.items(): if not k.startswith('GL_'): continue if k == 'GL_VERSION': m = re.match(self.pattern_gl_version, v) if m: limits[k] = '%s [...]' % m.group(1) limits['GL_VERSION' + '_COMPLETE'] = v # non standard continue if k in ('GL_RENDERER', 'GL_EXTENSIONS'): continue # Hide some values that got deleted from the report in r8953, for consistency if k in ('GL_MAX_COLOR_MATRIX_STACK_DEPTH', 'GL_FRAGMENT_PROGRAM_ARB.GL_MAX_PROGRAM_ADDRESS_REGISTERS_ARB', 'GL_FRAGMENT_PROGRAM_ARB.GL_MAX_PROGRAM_NATIVE_ADDRESS_REGISTERS_ARB'): continue # Hide some pixel depth values that are not really correlated with device if k in ('GL_RED_BITS', 'GL_GREEN_BITS', 'GL_BLUE_BITS', 'GL_ALPHA_BITS', 'GL_INDEX_BITS', 'GL_DEPTH_BITS', 'GL_STENCIL_BITS', 'GL_ACCUM_RED_BITS', 'GL_ACCUM_GREEN_BITS', 'GL_ACCUM_BLUE_BITS', 'GL_ACCUM_ALPHA_BITS'): continue limits[k] = v return limits def gl_device_identifier(self): """ Construct a nice-looking concise graphics device identifier (skipping boring hardware/driver details) """ renderer = self.gl_renderer() m = re.match(self.pattern_device_identifier, renderer) if m: renderer = m.group(1) return renderer.strip() def gl_vendor(self): return self.get_data_json().get('GL_VENDOR', '').strip() def gl_driver(self): """ Construct a nice string identifying the driver It tries all the known possibilities for drivers to find the used one """ data_json = self.get_data_json() if 'gfx_drv_ver' not in data_json or 'GL_VENDOR' not in data_json: return '' gfx_drv_ver = data_json['gfx_drv_ver'] # Try the Mesa git style first m = re.match(self.pattern_gl_driver_mesa_git, gfx_drv_ver) if m: return '%s-git-%s' % (m.group(1), m.group(2)) # Try the normal Mesa style m = re.match(self.pattern_gl_driver_mesa_normal, gfx_drv_ver) if m: return m.group(1) # Try the NVIDIA Linux style m = re.match(self.pattern_gl_driver_nvidia, gfx_drv_ver) if m: return m.group(1) # Try the ATI Catalyst Linux style m = re.match(self.pattern_gl_driver_amd_direct, gfx_drv_ver) if m: return m.group(1) # Try the non-direct-rendering ATI Catalyst Linux style m = re.match(self.pattern_gl_driver_amd_indirect, gfx_drv_ver) if m: return '%s (indirect)' % m.group(1) possibilities = [] # Otherwise the iteration at the will will # Try to guess the relevant Windows driver # (These are the ones listed in lib/sysdep/os/win/wgfx.cpp in the 0 AD code) if data_json['GL_VENDOR'] == 'NVIDIA Corporation': possibilities = [ # Assume 64-bit takes precedence r'nvoglv64.dll \((.*?)\)', r'nvoglv32.dll \((.*?)\)', r'nvoglnt.dll \((.*?)\)' ] if data_json['GL_VENDOR'] in ('ATI Technologies Inc.', 'Advanced Micro Devices, Inc.'): possibilities = [ r'atioglxx.dll \((.*?)\)', r'atioglx2.dll \((.*?)\)', r'atioglaa.dll \((.*?)\)' ] if data_json['GL_VENDOR'] == 'Intel': possibilities = [ # Assume 64-bit takes precedence r'ig4icd64.dll \((.*?)\)', r'ig4icd32.dll \((.*?)\)', # Legacy 32-bit r'iglicd32.dll \((.*?)\)', r'ialmgicd32.dll \((.*?)\)', r'ialmgicd.dll \((.*?)\)' ] for i in possibilities: m = re.search(i, gfx_drv_ver) if m: return m.group(1) return gfx_drv_ver class GraphicsDevice(models.Model): device_name = models.CharField(max_length=128, db_index=True) vendor = models.CharField(max_length=64) renderer = models.CharField(max_length=128) os = models.CharField(max_length=16) driver = models.CharField(max_length=128) usercount = models.IntegerField() def __str__(self): return 'GraphicsDevice<name = "{0}", vendor = "{1}", renderer = "{2}", OS = "{3}", driver = "{4}", ' \ 'usercount = {5}>'.format(self.device_name, self.vendor, self.renderer, self.os, self.driver, self.usercount) class GraphicsExtension(models.Model): device = models.ForeignKey(GraphicsDevice) name = models.CharField(max_length=128, db_index=True) def __str__(self): return 'GraphicsExtension<device_id = "{0}", name = "{1}">'.format(self.device_id, self.name) class GraphicsLimit(models.Model): device = models.ForeignKey(GraphicsDevice) name = models.CharField(max_length=128, db_index=True) value = models.CharField(max_length=64) def __str__(self): return 'GraphicsLimit<device_id = "{0}", name = "{1}", value = "{2}">'.format(self.device_id, self.name, self.value)
mit
jamielennox/keystoneauth
keystoneauth1/session.py
3
30502
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import functools import hashlib import json import logging import socket import time import uuid import requests import six from six.moves import urllib from keystoneauth1 import _utils as utils from keystoneauth1 import exceptions try: import netaddr except ImportError: netaddr = None try: import osprofiler.web as osprofiler_web except ImportError: osprofiler_web = None USER_AGENT = 'keystoneauth1' _logger = utils.get_logger(__name__) class _JSONEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, datetime.datetime): return o.isoformat() if isinstance(o, uuid.UUID): return six.text_type(o) if netaddr and isinstance(o, netaddr.IPAddress): return six.text_type(o) return super(_JSONEncoder, self).default(o) class Session(object): """Maintains client communication state and common functionality. As much as possible the parameters to this class reflect and are passed directly to the requests library. :param auth: An authentication plugin to authenticate the session with. (optional, defaults to None) :type auth: :py:class:`keystonauth.auth.base.BaseAuthPlugin` :param requests.Session session: A requests session object that can be used for issuing requests. (optional) :param string original_ip: The original IP of the requesting user which will be sent to identity service in a 'Forwarded' header. (optional) :param verify: The verification arguments to pass to requests. These are of the same form as requests expects, so True or False to verify (or not) against system certificates or a path to a bundle or CA certs to check against or None for requests to attempt to locate and use certificates. (optional, defaults to True) :param cert: A client certificate to pass to requests. These are of the same form as requests expects. Either a single filename containing both the certificate and key or a tuple containing the path to the certificate then a path to the key. (optional) :param float timeout: A timeout to pass to requests. This should be a numerical value indicating some amount (or fraction) of seconds or 0 for no timeout. (optional, defaults to 0) :param string user_agent: A User-Agent header string to use for the request. If not provided a default is used. (optional, defaults to 'keystoneauth1') :param int/bool redirect: Controls the maximum number of redirections that can be followed by a request. Either an integer for a specific count or True/False for forever/never. (optional, default to 30) """ user_agent = None _REDIRECT_STATUSES = (301, 302, 303, 305, 307) _DEFAULT_REDIRECT_LIMIT = 30 @utils.positional(2) def __init__(self, auth=None, session=None, original_ip=None, verify=True, cert=None, timeout=None, user_agent=None, redirect=_DEFAULT_REDIRECT_LIMIT): if not session: session = requests.Session() # Use TCPKeepAliveAdapter to fix bug 1323862 for scheme in session.adapters.keys(): session.mount(scheme, TCPKeepAliveAdapter()) self.auth = auth self.session = session self.original_ip = original_ip self.verify = verify self.cert = cert self.timeout = None self.redirect = redirect if timeout is not None: self.timeout = float(timeout) # don't override the class variable if none provided if user_agent is not None: self.user_agent = user_agent self._json = _JSONEncoder() def _remove_service_catalog(self, body): try: data = json.loads(body) # V3 token if 'token' in data and 'catalog' in data['token']: data['token']['catalog'] = '<removed>' return self._json.encode(data) # V2 token if 'serviceCatalog' in data['access']: data['access']['serviceCatalog'] = '<removed>' return self._json.encode(data) except Exception: # Don't fail trying to clean up the request body. pass return body @staticmethod def _process_header(header): """Redacts the secure headers to be logged.""" secure_headers = ('authorization', 'x-auth-token', 'x-subject-token',) if header[0].lower() in secure_headers: token_hasher = hashlib.sha1() token_hasher.update(header[1].encode('utf-8')) token_hash = token_hasher.hexdigest() return (header[0], '{SHA1}%s' % token_hash) return header @utils.positional() def _http_log_request(self, url, method=None, data=None, json=None, headers=None, logger=_logger): if not logger.isEnabledFor(logging.DEBUG): # NOTE(morganfainberg): This whole debug section is expensive, # there is no need to do the work if we're not going to emit a # debug log. return string_parts = ['REQ: curl -g -i'] # NOTE(jamielennox): None means let requests do its default validation # so we need to actually check that this is False. if self.verify is False: string_parts.append('--insecure') elif isinstance(self.verify, six.string_types): string_parts.append('--cacert "%s"' % self.verify) if method: string_parts.extend(['-X', method]) string_parts.append(url) if headers: for header in six.iteritems(headers): string_parts.append('-H "%s: %s"' % self._process_header(header)) if json: data = self._json.encode(json) if data: if isinstance(data, six.binary_type): try: data = data.decode("ascii") except UnicodeDecodeError: data = "<binary_data>" string_parts.append("-d '%s'" % data) logger.debug(' '.join(string_parts)) @utils.positional() def _http_log_response(self, response=None, json=None, status_code=None, headers=None, text=None, logger=_logger): if not logger.isEnabledFor(logging.DEBUG): return if response is not None: if not status_code: status_code = response.status_code if not headers: headers = response.headers if not text: text = self._remove_service_catalog(response.text) if json: text = self._json.encode(json) string_parts = ['RESP:'] if status_code: string_parts.append('[%s]' % status_code) if headers: for header in six.iteritems(headers): string_parts.append('%s: %s' % self._process_header(header)) if text: string_parts.append('\nRESP BODY: %s\n' % text) logger.debug(' '.join(string_parts)) @utils.positional() def request(self, url, method, json=None, original_ip=None, user_agent=None, redirect=None, authenticated=None, endpoint_filter=None, auth=None, requests_auth=None, raise_exc=True, allow_reauth=True, log=True, endpoint_override=None, connect_retries=0, logger=_logger, **kwargs): """Send an HTTP request with the specified characteristics. Wrapper around `requests.Session.request` to handle tasks such as setting headers, JSON encoding/decoding, and error handling. Arguments that are not handled are passed through to the requests library. :param string url: Path or fully qualified URL of HTTP request. If only a path is provided then endpoint_filter must also be provided such that the base URL can be determined. If a fully qualified URL is provided then endpoint_filter will be ignored. :param string method: The http method to use. (e.g. 'GET', 'POST') :param string original_ip: Mark this request as forwarded for this ip. (optional) :param dict headers: Headers to be included in the request. (optional) :param json: Some data to be represented as JSON. (optional) :param string user_agent: A user_agent to use for the request. If present will override one present in headers. (optional) :param int/bool redirect: the maximum number of redirections that can be followed by a request. Either an integer for a specific count or True/False for forever/never. (optional) :param int connect_retries: the maximum number of retries that should be attempted for connection errors. (optional, defaults to 0 - never retry). :param bool authenticated: True if a token should be attached to this request, False if not or None for attach if an auth_plugin is available. (optional, defaults to None) :param dict endpoint_filter: Data to be provided to an auth plugin with which it should be able to determine an endpoint to use for this request. If not provided then URL is expected to be a fully qualified URL. (optional) :param str endpoint_override: The URL to use instead of looking up the endpoint in the auth plugin. This will be ignored if a fully qualified URL is provided but take priority over an endpoint_filter. (optional) :param auth: The auth plugin to use when authenticating this request. This will override the plugin that is attached to the session (if any). (optional) :type auth: :py:class:`keystonauth.auth.base.BaseAuthPlugin` :param requests_auth: A requests library auth plugin that cannot be passed via kwarg because the `auth` kwarg collides with our own auth plugins. (optional) :type requests_auth: :py:class:`requests.auth.AuthBase` :param bool raise_exc: If True then raise an appropriate exception for failed HTTP requests. If False then return the request object. (optional, default True) :param bool allow_reauth: Allow fetching a new token and retrying the request on receiving a 401 Unauthorized response. (optional, default True) :param bool log: If True then log the request and response data to the debug log. (optional, default True) :param logger: The logger object to use to log request and responses. If not provided the keystonauth.session default logger will be used. :type logger: logging.Logger :param kwargs: any other parameter that can be passed to requests.Session.request (such as `headers`). Except: 'data' will be overwritten by the data in 'json' param. 'allow_redirects' is ignored as redirects are handled by the session. :raises keystonauth.exceptions.ClientException: For connection failure, or to indicate an error response code. :returns: The response to the request. """ headers = kwargs.setdefault('headers', dict()) if authenticated is None: authenticated = bool(auth or self.auth) if authenticated: auth_headers = self.get_auth_headers(auth) if auth_headers is None: msg = 'No valid authentication is available' raise exceptions.AuthorizationFailure(msg) headers.update(auth_headers) if osprofiler_web: headers.update(osprofiler_web.get_trace_id_headers()) # if we are passed a fully qualified URL and an endpoint_filter we # should ignore the filter. This will make it easier for clients who # want to overrule the default endpoint_filter data added to all client # requests. We check fully qualified here by the presence of a host. if not urllib.parse.urlparse(url).netloc: base_url = None if endpoint_override: base_url = endpoint_override elif endpoint_filter: base_url = self.get_endpoint(auth, **endpoint_filter) if not base_url: raise exceptions.EndpointNotFound() url = '%s/%s' % (base_url.rstrip('/'), url.lstrip('/')) if self.cert: kwargs.setdefault('cert', self.cert) if self.timeout is not None: kwargs.setdefault('timeout', self.timeout) if user_agent: headers['User-Agent'] = user_agent elif self.user_agent: user_agent = headers.setdefault('User-Agent', self.user_agent) else: user_agent = headers.setdefault('User-Agent', USER_AGENT) if self.original_ip: headers.setdefault('Forwarded', 'for=%s;by=%s' % (self.original_ip, user_agent)) if json is not None: headers['Content-Type'] = 'application/json' kwargs['data'] = self._json.encode(json) kwargs.setdefault('verify', self.verify) if requests_auth: kwargs['auth'] = requests_auth if log: self._http_log_request(url, method=method, data=kwargs.get('data'), headers=headers, logger=logger) # Force disable requests redirect handling. We will manage this below. kwargs['allow_redirects'] = False if redirect is None: redirect = self.redirect send = functools.partial(self._send_request, url, method, redirect, log, logger, connect_retries) try: connection_params = self.get_auth_connection_params(auth=auth) except exceptions.MissingAuthPlugin: # NOTE(jamielennox): If we've gotten this far without an auth # plugin then we should be happy with allowing no additional # connection params. This will be the typical case for plugins # anyway. pass else: if connection_params: kwargs.update(connection_params) resp = send(**kwargs) # handle getting a 401 Unauthorized response by invalidating the plugin # and then retrying the request. This is only tried once. if resp.status_code == 401 and authenticated and allow_reauth: if self.invalidate(auth): auth_headers = self.get_auth_headers(auth) if auth_headers is not None: headers.update(auth_headers) resp = send(**kwargs) if raise_exc and resp.status_code >= 400: logger.debug('Request returned failure status: %s', resp.status_code) raise exceptions.from_response(resp, method, url) return resp def _send_request(self, url, method, redirect, log, logger, connect_retries, connect_retry_delay=0.5, **kwargs): # NOTE(jamielennox): We handle redirection manually because the # requests lib follows some browser patterns where it will redirect # POSTs as GETs for certain statuses which is not want we want for an # API. See: https://en.wikipedia.org/wiki/Post/Redirect/Get # NOTE(jamielennox): The interaction between retries and redirects are # handled naively. We will attempt only a maximum number of retries and # redirects rather than per request limits. Otherwise the extreme case # could be redirects * retries requests. This will be sufficient in # most cases and can be fixed properly if there's ever a need. try: try: resp = self.session.request(method, url, **kwargs) except requests.exceptions.SSLError as e: msg = 'SSL exception connecting to %(url)s: %(error)s' % { 'url': url, 'error': e} raise exceptions.SSLError(msg) except requests.exceptions.Timeout: msg = 'Request to %s timed out' % url raise exceptions.ConnectTimeout(msg) except requests.exceptions.ConnectionError: msg = 'Unable to establish connection to %s' % url raise exceptions.ConnectFailure(msg) except requests.exceptions.RequestException as e: msg = 'Unexpected exception for %(url)s: %(error)s' % { 'url': url, 'error': e} raise exceptions.UnknownConnectionError(msg, e) except exceptions.RetriableConnectionFailure as e: if connect_retries <= 0: raise logger.info('Failure: %(e)s. Retrying in %(delay).1fs.', {'e': e, 'delay': connect_retry_delay}) time.sleep(connect_retry_delay) return self._send_request( url, method, redirect, log, logger, connect_retries=connect_retries - 1, connect_retry_delay=connect_retry_delay * 2, **kwargs) if log: self._http_log_response(response=resp, logger=logger) if resp.status_code in self._REDIRECT_STATUSES: # be careful here in python True == 1 and False == 0 if isinstance(redirect, bool): redirect_allowed = redirect else: redirect -= 1 redirect_allowed = redirect >= 0 if not redirect_allowed: return resp try: location = resp.headers['location'] except KeyError: logger.warning("Failed to redirect request to %s as new " "location was not provided.", resp.url) else: # NOTE(jamielennox): We don't pass through connect_retry_delay. # This request actually worked so we can reset the delay count. new_resp = self._send_request( location, method, redirect, log, logger, connect_retries=connect_retries, **kwargs) if not isinstance(new_resp.history, list): new_resp.history = list(new_resp.history) new_resp.history.insert(0, resp) resp = new_resp return resp def head(self, url, **kwargs): """Perform a HEAD request. This calls :py:meth:`.request()` with ``method`` set to ``HEAD``. """ return self.request(url, 'HEAD', **kwargs) def get(self, url, **kwargs): """Perform a GET request. This calls :py:meth:`.request()` with ``method`` set to ``GET``. """ return self.request(url, 'GET', **kwargs) def post(self, url, **kwargs): """Perform a POST request. This calls :py:meth:`.request()` with ``method`` set to ``POST``. """ return self.request(url, 'POST', **kwargs) def put(self, url, **kwargs): """Perform a PUT request. This calls :py:meth:`.request()` with ``method`` set to ``PUT``. """ return self.request(url, 'PUT', **kwargs) def delete(self, url, **kwargs): """Perform a DELETE request. This calls :py:meth:`.request()` with ``method`` set to ``DELETE``. """ return self.request(url, 'DELETE', **kwargs) def patch(self, url, **kwargs): """Perform a PATCH request. This calls :py:meth:`.request()` with ``method`` set to ``PATCH``. """ return self.request(url, 'PATCH', **kwargs) def _auth_required(self, auth, msg): if not auth: auth = self.auth if not auth: msg_fmt = 'An auth plugin is required to %s' raise exceptions.MissingAuthPlugin(msg_fmt % msg) return auth def get_auth_headers(self, auth=None, **kwargs): """Return auth headers as provided by the auth plugin. :param auth: The auth plugin to use for token. Overrides the plugin on the session. (optional) :type auth: :py:class:`keystonauth.auth.base.BaseAuthPlugin` :raises keystonauth.exceptions.AuthorizationFailure: if a new token fetch fails. :raises keystonauth.exceptions.MissingAuthPlugin: if a plugin is not available. :returns: Authentication headers or None for failure. :rtype: dict """ auth = self._auth_required(auth, 'fetch a token') return auth.get_headers(self, **kwargs) def get_token(self, auth=None): """Return a token as provided by the auth plugin. :param auth: The auth plugin to use for token. Overrides the plugin on the session. (optional) :type auth: :py:class:`keystonauth.auth.base.BaseAuthPlugin` :raises keystonauth.exceptions.AuthorizationFailure: if a new token fetch fails. :raises keystonauth.exceptions.MissingAuthPlugin: if a plugin is not available. *DEPRECATED*: This assumes that the only header that is used to authenticate a message is 'X-Auth-Token'. This may not be correct. Use get_auth_headers instead. :returns: A valid token. :rtype: string """ return (self.get_auth_headers(auth) or {}).get('X-Auth-Token') def get_endpoint(self, auth=None, **kwargs): """Get an endpoint as provided by the auth plugin. :param auth: The auth plugin to use for token. Overrides the plugin on the session. (optional) :type auth: :py:class:`keystonauth.auth.base.BaseAuthPlugin` :raises keystonauth.exceptions.MissingAuthPlugin: if a plugin is not available. :returns: An endpoint if available or None. :rtype: string """ auth = self._auth_required(auth, 'determine endpoint URL') return auth.get_endpoint(self, **kwargs) def get_auth_connection_params(self, auth=None, **kwargs): """Return auth connection params as provided by the auth plugin. An auth plugin may specify connection parameters to the request like providing a client certificate for communication. We restrict the values that may be returned from this function to prevent an auth plugin overriding values unrelated to connection parmeters. The values that are currently accepted are: - `cert`: a path to a client certificate, or tuple of client certificate and key pair that are used with this request. - `verify`: a boolean value to indicate verifying SSL certificates against the system CAs or a path to a CA file to verify with. These values are passed to the requests library and further information on accepted values may be found there. :param auth: The auth plugin to use for tokens. Overrides the plugin on the session. (optional) :type auth: keystoneclient.auth.base.BaseAuthPlugin :raises keystoneclient.exceptions.AuthorizationFailure: if a new token fetch fails. :raises keystoneclient.exceptions.MissingAuthPlugin: if a plugin is not available. :raises keystoneclient.exceptions.UnsupportedParameters: if the plugin returns a parameter that is not supported by this session. :returns: Authentication headers or None for failure. :rtype: dict """ msg = 'An auth plugin is required to fetch connection params' auth = self._auth_required(auth, msg) params = auth.get_connection_params(self, **kwargs) # NOTE(jamielennox): There needs to be some consensus on what # parameters are allowed to be modified by the auth plugin here. # Ideally I think it would be only the send() parts of the request # flow. For now lets just allow certain elements. params_copy = params.copy() for arg in ('cert', 'verify'): try: kwargs[arg] = params_copy.pop(arg) except KeyError: pass if params_copy: raise exceptions.UnsupportedParameters(list(params_copy.keys())) return params def invalidate(self, auth=None): """Invalidate an authentication plugin. :param auth: The auth plugin to invalidate. Overrides the plugin on the session. (optional) :type auth: :py:class:`keystonauth.auth.base.BaseAuthPlugin` """ auth = self._auth_required(auth, 'validate') return auth.invalidate() def get_user_id(self, auth=None): """Return the authenticated user_id as provided by the auth plugin. :param auth: The auth plugin to use for token. Overrides the plugin on the session. (optional) :type auth: keystonauth.auth.base.BaseAuthPlugin :raises keystonauth.exceptions.AuthorizationFailure: if a new token fetch fails. :raises keystonauth.exceptions.MissingAuthPlugin: if a plugin is not available. :returns string: Current user_id or None if not supported by plugin. """ auth = self._auth_required(auth, 'get user_id') return auth.get_user_id(self) def get_project_id(self, auth=None): """Return the authenticated project_id as provided by the auth plugin. :param auth: The auth plugin to use for token. Overrides the plugin on the session. (optional) :type auth: keystonauth.auth.base.BaseAuthPlugin :raises keystonauth.exceptions.AuthorizationFailure: if a new token fetch fails. :raises keystonauth.exceptions.MissingAuthPlugin: if a plugin is not available. :returns string: Current project_id or None if not supported by plugin. """ auth = self._auth_required(auth, 'get project_id') return auth.get_project_id(self) class TCPKeepAliveAdapter(requests.adapters.HTTPAdapter): """The custom adapter used to set TCP Keep-Alive on all connections. This Adapter also preserves the default behaviour of Requests which disables Nagle's Algorithm. See also: http://blogs.msdn.com/b/windowsazurestorage/archive/2010/06/25/nagle-s-algorithm-is-not-friendly-towards-small-requests.aspx """ def init_poolmanager(self, *args, **kwargs): if requests.__version__ >= '2.4.1': socket_options = [ # Keep Nagle's algorithm off (socket.IPPROTO_TCP, socket.TCP_NODELAY, 1), # Turn on TCP Keep-Alive (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), # Set the maximum number of keep-alive probes (socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 4), # Send keep-alive probes every 15 seconds (socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 15), ] # Some operating systems (e.g., OSX) do not support setting # keepidle if hasattr(socket, 'TCP_KEEPIDLE'): socket_options += [ # Wait 60 seconds before sending keep-alive probes (socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 60) ] # After waiting 60 seconds, and then sending a probe once every 15 # seconds 4 times, these options should ensure that a connection # hands for no longer than 2 minutes before a ConnectionError is # raised. kwargs.setdefault('socket_options', socket_options) super(TCPKeepAliveAdapter, self).init_poolmanager(*args, **kwargs)
apache-2.0