repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
mmcbride1/python-coretemp
coretemp/sensor_reading.py
1
4790
import re import os import sys import subprocess import sensors as r import coretemp_log as log import coretemp_config as conf from collections import OrderedDict ''' Get sensor constants ''' from coretemp_constants import SUB_MAX_TYPE, SUB_CRT_TYPE, CHIP, NORM, HIGH, CRTC class SensorReading: ''' Store sensor threshold ''' crit = [] high = [] ''' Store log message ''' MSG = "" ''' Store sensor reading ''' read = OrderedDict() ''' Configuration ''' CONF = conf.Config("threshold").get_config() ERRO = log.ExceptionLog() def __init__(self): """ Constructor: Set chip reading and log message """ try: self.__set_chip_read() self.__set_message() except Exception as ex: self.ERRO.update_errlog(ex) def get_reading(self): """ Get sensor reading :return: sensor reading """ return self.read def get_message(self): """ Get log message :return: log message string """ return self.MSG def get_failed(self): """ Get readings only deemed as high or critical from the primary reading :return: max/crt message string """ return re.sub(".*NORMAL.*\n?","",self.MSG) def __collect_recommended(self, sub): """ Gets the recommended threshold values as determined by the sensor sub-feature set :param str sub: the given sub-feature """ self.sub = sub num = sub.get_value() if sub.type == SUB_MAX_TYPE: self.high.append(num) if sub.type == SUB_CRT_TYPE: self.crit.append(num) def __avg(self, arr): """ Obtains the mean value of the collection :param list arr: any given list :return: average value """ self.arr = arr try: avg = sum(arr)/float(len(arr)) return round(avg, 2) except ZeroDivisionError as z: self.ERRO.update_errlog(z) return 0 def get_avg_read(self): """ Gets the average core value of the list of chips on the read :return: average core value """ return self.__avg(self.read.values()) def __msg_str(self, k, v, i): """ Helper function to build the log output message :param str k: core # :param str v: reading :param str i: indicator :return: formatted log message """ self.k = k self.v = v self.i = i return "%s : %s -> %s\n" % (k, v, i) def __set_defaults(self, arr): """ Sets default values for the thresholds in the case that none are provided in the config and a reading cannot be obtained from the chip :param list arr: generated threshold list :return: updated list with defaults """ self.arr = arr for k, v in arr.items(): if k is 'MAX' and v == 0: arr[k] = 86.0 if k is 'CRT' and v == 0: arr[k] = 96.0 return arr def get_threshold(self): """ The primary threshold setting mechanism. Sets first from the config then next from the recommended values if no such properties exist :return: dict containing max/crt values """ h = self.CONF['high'] c = self.CONF['crit'] if h is "" or float(h) <= 0: h = self.__avg(self.high) if c is "" or float(c) <= 0: c = self.__avg(self.crit) order = [float(h),float(c)] high = min(order) crit = max(order) return {'MAX':high,'CRT':crit} def __set_chip_read(self): """ Queries the chip applies result to the 'read' dict. Then, collects the recommended threshold values """ r.init() try: for x in r.iter_detected_chips(CHIP): for f in x: if "Core" in f.label: self.read[f.label] = f.get_value() for sub in f: self.__collect_recommended(sub) finally: r.cleanup() def __set_message(self): """ Builds the output (log) message based on the standing of the chip read and whether given thresholds were reached """ th = self.__set_defaults(self.get_threshold()) for k, v in self.get_reading().items(): if v < th['MAX']: self.MSG += self.__msg_str(k,v,NORM) elif v >= th['MAX'] and v < th['CRT']: self.MSG += self.__msg_str(k,v,HIGH) elif v >= th['CRT']: self.MSG += self.__msg_str(k,v,CRTC) else: self.MSG += self.__msg_str(k,v,"UNKNOWN")
mit
Inferno42/Lerum-Engine
Platformer/freetype-2.4.0/src/tools/chktrcmp.py
381
3826
#!/usr/bin/env python # # Check trace components in FreeType 2 source. # Author: suzuki toshiya, 2009 # # This code is explicitly into the public domain. import sys import os import re SRC_FILE_LIST = [] USED_COMPONENT = {} KNOWN_COMPONENT = {} SRC_FILE_DIRS = [ "src" ] TRACE_DEF_FILES = [ "include/freetype/internal/fttrace.h" ] # -------------------------------------------------------------- # Parse command line options # for i in range( 1, len( sys.argv ) ): if sys.argv[i].startswith( "--help" ): print "Usage: %s [option]" % sys.argv[0] print "Search used-but-defined and defined-but-not-used trace_XXX macros" print "" print " --help:" print " Show this help" print "" print " --src-dirs=dir1:dir2:..." print " Specify the directories of C source files to be checked" print " Default is %s" % ":".join( SRC_FILE_DIRS ) print "" print " --def-files=file1:file2:..." print " Specify the header files including FT_TRACE_DEF()" print " Default is %s" % ":".join( TRACE_DEF_FILES ) print "" exit(0) if sys.argv[i].startswith( "--src-dirs=" ): SRC_FILE_DIRS = sys.argv[i].replace( "--src-dirs=", "", 1 ).split( ":" ) elif sys.argv[i].startswith( "--def-files=" ): TRACE_DEF_FILES = sys.argv[i].replace( "--def-files=", "", 1 ).split( ":" ) # -------------------------------------------------------------- # Scan C source and header files using trace macros. # c_pathname_pat = re.compile( '^.*\.[ch]$', re.IGNORECASE ) trace_use_pat = re.compile( '^[ \t]*#define[ \t]+FT_COMPONENT[ \t]+trace_' ) for d in SRC_FILE_DIRS: for ( p, dlst, flst ) in os.walk( d ): for f in flst: if c_pathname_pat.match( f ) != None: src_pathname = os.path.join( p, f ) line_num = 0 for src_line in open( src_pathname, 'r' ): line_num = line_num + 1 src_line = src_line.strip() if trace_use_pat.match( src_line ) != None: component_name = trace_use_pat.sub( '', src_line ) if component_name in USED_COMPONENT: USED_COMPONENT[component_name].append( "%s:%d" % ( src_pathname, line_num ) ) else: USED_COMPONENT[component_name] = [ "%s:%d" % ( src_pathname, line_num ) ] # -------------------------------------------------------------- # Scan header file(s) defining trace macros. # trace_def_pat_opn = re.compile( '^.*FT_TRACE_DEF[ \t]*\([ \t]*' ) trace_def_pat_cls = re.compile( '[ \t\)].*$' ) for f in TRACE_DEF_FILES: line_num = 0 for hdr_line in open( f, 'r' ): line_num = line_num + 1 hdr_line = hdr_line.strip() if trace_def_pat_opn.match( hdr_line ) != None: component_name = trace_def_pat_opn.sub( '', hdr_line ) component_name = trace_def_pat_cls.sub( '', component_name ) if component_name in KNOWN_COMPONENT: print "trace component %s is defined twice, see %s and fttrace.h:%d" % \ ( component_name, KNOWN_COMPONENT[component_name], line_num ) else: KNOWN_COMPONENT[component_name] = "%s:%d" % \ ( os.path.basename( f ), line_num ) # -------------------------------------------------------------- # Compare the used and defined trace macros. # print "# Trace component used in the implementations but not defined in fttrace.h." cmpnt = USED_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if c not in KNOWN_COMPONENT: print "Trace component %s (used in %s) is not defined." % ( c, ", ".join( USED_COMPONENT[c] ) ) print "# Trace component is defined but not used in the implementations." cmpnt = KNOWN_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if c not in USED_COMPONENT: if c != "any": print "Trace component %s (defined in %s) is not used." % ( c, KNOWN_COMPONENT[c] )
gpl-2.0
stevelle/glance
glance/i18n.py
16
1109
# Copyright 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_i18n import * # noqa _translators = TranslatorFactory(domain='glance') # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical
apache-2.0
cluck/dnspython
dns/rdtypes/ANY/SSHFP.py
8
2829
# Copyright (C) 2005-2007, 2009-2011 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import struct import binascii import dns.rdata import dns.rdatatype class SSHFP(dns.rdata.Rdata): """SSHFP record @ivar algorithm: the algorithm @type algorithm: int @ivar fp_type: the digest type @type fp_type: int @ivar fingerprint: the fingerprint @type fingerprint: string @see: draft-ietf-secsh-dns-05.txt""" __slots__ = ['algorithm', 'fp_type', 'fingerprint'] def __init__(self, rdclass, rdtype, algorithm, fp_type, fingerprint): super(SSHFP, self).__init__(rdclass, rdtype) self.algorithm = algorithm self.fp_type = fp_type self.fingerprint = fingerprint def to_text(self, origin=None, relativize=True, **kw): return '%d %d %s' % (self.algorithm, self.fp_type, dns.rdata._hexify(self.fingerprint, chunksize=128)) @classmethod def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): algorithm = tok.get_uint8() fp_type = tok.get_uint8() chunks = [] while 1: t = tok.get().unescape() if t.is_eol_or_eof(): break if not t.is_identifier(): raise dns.exception.SyntaxError chunks.append(t.value.encode()) fingerprint = b''.join(chunks) fingerprint = binascii.unhexlify(fingerprint) return cls(rdclass, rdtype, algorithm, fp_type, fingerprint) def to_wire(self, file, compress=None, origin=None): header = struct.pack("!BB", self.algorithm, self.fp_type) file.write(header) file.write(self.fingerprint) @classmethod def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): header = struct.unpack("!BB", wire[current: current + 2]) current += 2 rdlen -= 2 fingerprint = wire[current: current + rdlen].unwrap() return cls(rdclass, rdtype, header[0], header[1], fingerprint)
isc
orymeyer/Flask-Python-GAE-Login-Registration
lib/werkzeug/contrib/securecookie.py
294
12204
# -*- coding: utf-8 -*- r""" werkzeug.contrib.securecookie ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module implements a cookie that is not alterable from the client because it adds a checksum the server checks for. You can use it as session replacement if all you have is a user id or something to mark a logged in user. Keep in mind that the data is still readable from the client as a normal cookie is. However you don't have to store and flush the sessions you have at the server. Example usage: >>> from werkzeug.contrib.securecookie import SecureCookie >>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef") Dumping into a string so that one can store it in a cookie: >>> value = x.serialize() Loading from that string again: >>> x = SecureCookie.unserialize(value, "deadbeef") >>> x["baz"] (1, 2, 3) If someone modifies the cookie and the checksum is wrong the unserialize method will fail silently and return a new empty `SecureCookie` object. Keep in mind that the values will be visible in the cookie so do not store data in a cookie you don't want the user to see. Application Integration ======================= If you are using the werkzeug request objects you could integrate the secure cookie into your application like this:: from werkzeug.utils import cached_property from werkzeug.wrappers import BaseRequest from werkzeug.contrib.securecookie import SecureCookie # don't use this key but a different one; you could just use # os.urandom(20) to get something random SECRET_KEY = '\xfa\xdd\xb8z\xae\xe0}4\x8b\xea' class Request(BaseRequest): @cached_property def client_session(self): data = self.cookies.get('session_data') if not data: return SecureCookie(secret_key=SECRET_KEY) return SecureCookie.unserialize(data, SECRET_KEY) def application(environ, start_response): request = Request(environ, start_response) # get a response object here response = ... if request.client_session.should_save: session_data = request.client_session.serialize() response.set_cookie('session_data', session_data, httponly=True) return response(environ, start_response) A less verbose integration can be achieved by using shorthand methods:: class Request(BaseRequest): @cached_property def client_session(self): return SecureCookie.load_cookie(self, secret_key=COOKIE_SECRET) def application(environ, start_response): request = Request(environ, start_response) # get a response object here response = ... request.client_session.save_cookie(response) return response(environ, start_response) :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import pickle import base64 from hmac import new as hmac from time import time from hashlib import sha1 as _default_hash from werkzeug._compat import iteritems, text_type from werkzeug.urls import url_quote_plus, url_unquote_plus from werkzeug._internal import _date_to_unix from werkzeug.contrib.sessions import ModificationTrackingDict from werkzeug.security import safe_str_cmp from werkzeug._compat import to_native class UnquoteError(Exception): """Internal exception used to signal failures on quoting.""" class SecureCookie(ModificationTrackingDict): """Represents a secure cookie. You can subclass this class and provide an alternative mac method. The import thing is that the mac method is a function with a similar interface to the hashlib. Required methods are update() and digest(). Example usage: >>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef") >>> x["foo"] 42 >>> x["baz"] (1, 2, 3) >>> x["blafasel"] = 23 >>> x.should_save True :param data: the initial data. Either a dict, list of tuples or `None`. :param secret_key: the secret key. If not set `None` or not specified it has to be set before :meth:`serialize` is called. :param new: The initial value of the `new` flag. """ #: The hash method to use. This has to be a module with a new function #: or a function that creates a hashlib object. Such as `hashlib.md5` #: Subclasses can override this attribute. The default hash is sha1. #: Make sure to wrap this in staticmethod() if you store an arbitrary #: function there such as hashlib.sha1 which might be implemented #: as a function. hash_method = staticmethod(_default_hash) #: the module used for serialization. Unless overriden by subclasses #: the standard pickle module is used. serialization_method = pickle #: if the contents should be base64 quoted. This can be disabled if the #: serialization process returns cookie safe strings only. quote_base64 = True def __init__(self, data=None, secret_key=None, new=True): ModificationTrackingDict.__init__(self, data or ()) # explicitly convert it into a bytestring because python 2.6 # no longer performs an implicit string conversion on hmac if secret_key is not None: secret_key = bytes(secret_key) self.secret_key = secret_key self.new = new def __repr__(self): return '<%s %s%s>' % ( self.__class__.__name__, dict.__repr__(self), self.should_save and '*' or '' ) @property def should_save(self): """True if the session should be saved. By default this is only true for :attr:`modified` cookies, not :attr:`new`. """ return self.modified @classmethod def quote(cls, value): """Quote the value for the cookie. This can be any object supported by :attr:`serialization_method`. :param value: the value to quote. """ if cls.serialization_method is not None: value = cls.serialization_method.dumps(value) if cls.quote_base64: value = b''.join(base64.b64encode(value).splitlines()).strip() return value @classmethod def unquote(cls, value): """Unquote the value for the cookie. If unquoting does not work a :exc:`UnquoteError` is raised. :param value: the value to unquote. """ try: if cls.quote_base64: value = base64.b64decode(value) if cls.serialization_method is not None: value = cls.serialization_method.loads(value) return value except Exception: # unfortunately pickle and other serialization modules can # cause pretty every error here. if we get one we catch it # and convert it into an UnquoteError raise UnquoteError() def serialize(self, expires=None): """Serialize the secure cookie into a string. If expires is provided, the session will be automatically invalidated after expiration when you unseralize it. This provides better protection against session cookie theft. :param expires: an optional expiration date for the cookie (a :class:`datetime.datetime` object) """ if self.secret_key is None: raise RuntimeError('no secret key defined') if expires: self['_expires'] = _date_to_unix(expires) result = [] mac = hmac(self.secret_key, None, self.hash_method) for key, value in sorted(self.items()): result.append(('%s=%s' % ( url_quote_plus(key), self.quote(value).decode('ascii') )).encode('ascii')) mac.update(b'|' + result[-1]) return b'?'.join([ base64.b64encode(mac.digest()).strip(), b'&'.join(result) ]) @classmethod def unserialize(cls, string, secret_key): """Load the secure cookie from a serialized string. :param string: the cookie value to unserialize. :param secret_key: the secret key used to serialize the cookie. :return: a new :class:`SecureCookie`. """ if isinstance(string, text_type): string = string.encode('utf-8', 'replace') if isinstance(secret_key, text_type): secret_key = secret_key.encode('utf-8', 'replace') try: base64_hash, data = string.split(b'?', 1) except (ValueError, IndexError): items = () else: items = {} mac = hmac(secret_key, None, cls.hash_method) for item in data.split(b'&'): mac.update(b'|' + item) if not b'=' in item: items = None break key, value = item.split(b'=', 1) # try to make the key a string key = url_unquote_plus(key.decode('ascii')) try: key = to_native(key) except UnicodeError: pass items[key] = value # no parsing error and the mac looks okay, we can now # sercurely unpickle our cookie. try: client_hash = base64.b64decode(base64_hash) except TypeError: items = client_hash = None if items is not None and safe_str_cmp(client_hash, mac.digest()): try: for key, value in iteritems(items): items[key] = cls.unquote(value) except UnquoteError: items = () else: if '_expires' in items: if time() > items['_expires']: items = () else: del items['_expires'] else: items = () return cls(items, secret_key, False) @classmethod def load_cookie(cls, request, key='session', secret_key=None): """Loads a :class:`SecureCookie` from a cookie in request. If the cookie is not set, a new :class:`SecureCookie` instanced is returned. :param request: a request object that has a `cookies` attribute which is a dict of all cookie values. :param key: the name of the cookie. :param secret_key: the secret key used to unquote the cookie. Always provide the value even though it has no default! """ data = request.cookies.get(key) if not data: return cls(secret_key=secret_key) return cls.unserialize(data, secret_key) def save_cookie(self, response, key='session', expires=None, session_expires=None, max_age=None, path='/', domain=None, secure=None, httponly=False, force=False): """Saves the SecureCookie in a cookie on response object. All parameters that are not described here are forwarded directly to :meth:`~BaseResponse.set_cookie`. :param response: a response object that has a :meth:`~BaseResponse.set_cookie` method. :param key: the name of the cookie. :param session_expires: the expiration date of the secure cookie stored information. If this is not provided the cookie `expires` date is used instead. """ if force or self.should_save: data = self.serialize(session_expires or expires) response.set_cookie(key, data, expires=expires, max_age=max_age, path=path, domain=domain, secure=secure, httponly=httponly)
apache-2.0
minhphung171093/OpenERP_V8
openerp/addons/email_template/wizard/mail_compose_message.py
197
11797
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> # ############################################################################## from openerp import tools from openerp.osv import osv, fields def _reopen(self, res_id, model): return {'type': 'ir.actions.act_window', 'view_mode': 'form', 'view_type': 'form', 'res_id': res_id, 'res_model': self._name, 'target': 'new', # save original model in context, because selecting the list of available # templates requires a model in context 'context': { 'default_model': model, }, } class mail_compose_message(osv.TransientModel): _inherit = 'mail.compose.message' def default_get(self, cr, uid, fields, context=None): """ Override to pre-fill the data when having a template in single-email mode and not going through the view: the on_change is not called in that case. """ if context is None: context = {} res = super(mail_compose_message, self).default_get(cr, uid, fields, context=context) if res.get('composition_mode') != 'mass_mail' and context.get('default_template_id') and res.get('model') and res.get('res_id'): res.update( self.onchange_template_id( cr, uid, [], context['default_template_id'], res.get('composition_mode'), res.get('model'), res.get('res_id'), context=context )['value'] ) if fields is not None: [res.pop(field, None) for field in res.keys() if field not in fields] return res _columns = { 'template_id': fields.many2one('email.template', 'Use template', select=True), } def send_mail(self, cr, uid, ids, context=None): """ Override of send_mail to duplicate attachments linked to the email.template. Indeed, basic mail.compose.message wizard duplicates attachments in mass mailing mode. But in 'single post' mode, attachments of an email template also have to be duplicated to avoid changing their ownership. """ if context is None: context = {} wizard_context = dict(context) for wizard in self.browse(cr, uid, ids, context=context): if wizard.template_id: wizard_context['mail_notify_user_signature'] = False # template user_signature is added when generating body_html wizard_context['mail_auto_delete'] = wizard.template_id.auto_delete # mass mailing: use template auto_delete value -> note, for emails mass mailing only wizard_context['mail_server_id'] = wizard.template_id.mail_server_id.id if not wizard.attachment_ids or wizard.composition_mode == 'mass_mail' or not wizard.template_id: continue new_attachment_ids = [] for attachment in wizard.attachment_ids: if attachment in wizard.template_id.attachment_ids: new_attachment_ids.append(self.pool.get('ir.attachment').copy(cr, uid, attachment.id, {'res_model': 'mail.compose.message', 'res_id': wizard.id}, context=context)) else: new_attachment_ids.append(attachment.id) self.write(cr, uid, wizard.id, {'attachment_ids': [(6, 0, new_attachment_ids)]}, context=context) return super(mail_compose_message, self).send_mail(cr, uid, ids, context=wizard_context) def onchange_template_id(self, cr, uid, ids, template_id, composition_mode, model, res_id, context=None): """ - mass_mailing: we cannot render, so return the template values - normal mode: return rendered values """ if template_id and composition_mode == 'mass_mail': fields = ['subject', 'body_html', 'email_from', 'reply_to', 'mail_server_id'] template = self.pool['email.template'].browse(cr, uid, template_id, context=context) values = dict((field, getattr(template, field)) for field in fields if getattr(template, field)) if template.attachment_ids: values['attachment_ids'] = [att.id for att in template.attachment_ids] if template.mail_server_id: values['mail_server_id'] = template.mail_server_id.id if template.user_signature and 'body_html' in values: signature = self.pool.get('res.users').browse(cr, uid, uid, context).signature values['body_html'] = tools.append_content_to_html(values['body_html'], signature, plaintext=False) elif template_id: values = self.generate_email_for_composer_batch(cr, uid, template_id, [res_id], context=context)[res_id] # transform attachments into attachment_ids; not attached to the document because this will # be done further in the posting process, allowing to clean database if email not send ir_attach_obj = self.pool.get('ir.attachment') for attach_fname, attach_datas in values.pop('attachments', []): data_attach = { 'name': attach_fname, 'datas': attach_datas, 'datas_fname': attach_fname, 'res_model': 'mail.compose.message', 'res_id': 0, 'type': 'binary', # override default_type from context, possibly meant for another model! } values.setdefault('attachment_ids', list()).append(ir_attach_obj.create(cr, uid, data_attach, context=context)) else: default_context = dict(context, default_composition_mode=composition_mode, default_model=model, default_res_id=res_id) default_values = self.default_get(cr, uid, ['composition_mode', 'model', 'res_id', 'parent_id', 'partner_ids', 'subject', 'body', 'email_from', 'reply_to', 'attachment_ids', 'mail_server_id'], context=default_context) values = dict((key, default_values[key]) for key in ['subject', 'body', 'partner_ids', 'email_from', 'reply_to', 'attachment_ids', 'mail_server_id'] if key in default_values) if values.get('body_html'): values['body'] = values.pop('body_html') return {'value': values} def save_as_template(self, cr, uid, ids, context=None): """ hit save as template button: current form value will be a new template attached to the current document. """ email_template = self.pool.get('email.template') ir_model_pool = self.pool.get('ir.model') for record in self.browse(cr, uid, ids, context=context): model_ids = ir_model_pool.search(cr, uid, [('model', '=', record.model or 'mail.message')], context=context) model_id = model_ids and model_ids[0] or False model_name = '' if model_id: model_name = ir_model_pool.browse(cr, uid, model_id, context=context).name template_name = "%s: %s" % (model_name, tools.ustr(record.subject)) values = { 'name': template_name, 'subject': record.subject or False, 'body_html': record.body or False, 'model_id': model_id or False, 'attachment_ids': [(6, 0, [att.id for att in record.attachment_ids])], } template_id = email_template.create(cr, uid, values, context=context) # generate the saved template template_values = record.onchange_template_id(template_id, record.composition_mode, record.model, record.res_id)['value'] template_values['template_id'] = template_id record.write(template_values) return _reopen(self, record.id, record.model) #------------------------------------------------------ # Wizard validation and send #------------------------------------------------------ def generate_email_for_composer_batch(self, cr, uid, template_id, res_ids, context=None, fields=None): """ Call email_template.generate_email(), get fields relevant for mail.compose.message, transform email_cc and email_to into partner_ids """ if context is None: context = {} if fields is None: fields = ['subject', 'body_html', 'email_from', 'email_to', 'partner_to', 'email_cc', 'reply_to', 'attachment_ids', 'mail_server_id'] returned_fields = fields + ['partner_ids', 'attachments'] values = dict.fromkeys(res_ids, False) ctx = dict(context, tpl_partners_only=True) template_values = self.pool.get('email.template').generate_email_batch(cr, uid, template_id, res_ids, fields=fields, context=ctx) for res_id in res_ids: res_id_values = dict((field, template_values[res_id][field]) for field in returned_fields if template_values[res_id].get(field)) res_id_values['body'] = res_id_values.pop('body_html', '') values[res_id] = res_id_values return values def render_message_batch(self, cr, uid, wizard, res_ids, context=None): """ Override to handle templates. """ # generate composer values composer_values = super(mail_compose_message, self).render_message_batch(cr, uid, wizard, res_ids, context) # generate template-based values if wizard.template_id: template_values = self.generate_email_for_composer_batch( cr, uid, wizard.template_id.id, res_ids, fields=['email_to', 'partner_to', 'email_cc', 'attachment_ids', 'mail_server_id'], context=context) else: template_values = {} for res_id in res_ids: if template_values.get(res_id): # recipients are managed by the template composer_values[res_id].pop('partner_ids') composer_values[res_id].pop('email_to') composer_values[res_id].pop('email_cc') # remove attachments from template values as they should not be rendered template_values[res_id].pop('attachment_ids', None) else: template_values[res_id] = dict() # update template values by composer values template_values[res_id].update(composer_values[res_id]) return template_values def render_template_batch(self, cr, uid, template, model, res_ids, context=None, post_process=False): return self.pool.get('email.template').render_template_batch(cr, uid, template, model, res_ids, context=context, post_process=post_process) # Compatibility methods def generate_email_for_composer(self, cr, uid, template_id, res_id, context=None): return self.generate_email_for_composer_batch(cr, uid, template_id, [res_id], context)[res_id] # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
inexactually/irisbot
utils.py
1
2817
import aiohttp import inspect import io import discord from discord.ext import commands import settings def setting(name, default): return getattr(settings, name, default) def pretty_list(names, bold=True, conjunction='and', empty=''): names = list(names) if not names: return empty if bold: names = ['**{}**'.format(name) for name in names] sep = ' ' + conjunction if conjunction else '' if len(names) == 1: return names[0] elif len(names) == 2: return '{}{} {}'.format(names[0], sep, names[1]) else: return '{},{} {}'.format(', '.join(names[:-1]), sep, names[-1]) def is_local_check_failure(error): """This horrible hack lets a command error handler figure out if the error originates from the command's own checks, rather than a global check or some other sort of error. """ if isinstance(error, commands.CheckFailure): if error.args: return "check functions for command" in error.args[0] # Copied from discord.ext.commands.bot.py. We need this because # there's no way to override the formatting of the defualt Bot.reply. def bot_get_variable(name): stack = inspect.stack() try: for frames in stack: try: frame = frames[0] current_locals = frame.f_locals if name in current_locals: return current_locals[name] finally: del frame finally: del stack class Bot(commands.Bot): """A subclass of `discord.ext.commands.Bot` with some improvements. """ async def reply(self, content, *args, separator=' ', **kwargs): # Now with custom separator support author = bot_get_variable('_internal_author') text = '{0.mention}{1}{2}'.format(author, separator, str(content)) return await self.say(text, *args, **kwargs) async def send_file(self, destination, fp, *, filename=None, content=None, embed=None, tts=False): # Now with embed support channel_id, guild_id = await self._resolve_destination(destination) if embed is not None: embed = embed.to_dict() try: with open(fp, 'rb') as f: buffer = io.BytesIO(f.read()) if filename is None: _, filename = path_split(fp) except TypeError: buffer = fp content = str(content) if content is not None else None data = await self.http.send_file(channel_id, buffer, guild_id=guild_id, filename=filename, content=content, embed=embed, tts=tts) channel = self.get_channel(data.get('channel_id')) message = self.connection._create_message(channel=channel, **data) return message
mit
censof/ansible-deployment
django_app_server_db_server/deployment/templates/common.py
1
3942
import os.path # Configuration modules. from ._installed_apps import * from ._middleware import * from ._context_processors import * from ._email import * from ._eclaim import * _ = lambda s: s # Debugging mode. DEBUG = False TEMPLATE_DEBUG = False if DEMO_MODE: SEND_NOTIF_EMAILS = False else: SEND_NOTIF_EMAILS = True # Project root directory. _path = os.path.join(os.path.dirname(__file__), os.pardir) BASE_DIR = os.path.abspath(os.path.join(_path, os.pardir)) # SQL scripts directory. _parpath = os.path.join(BASE_DIR, os.pardir) SQL_SCRIPTS_DIR = os.path.abspath(os.path.join(_parpath, 'sql_scripts')) # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'wk9&6^ns(71^*i#8&=v#j53-cv#85csvl53zu4dp$w0x(k%zsz' ALLOWED_HOSTS = ['{{ ansible_eth0.ipv4.address }}'] if DEMO_MODE: HOST_URL = 'http://{{ ansible_eth0.ipv4.address }}:{}/'.format(DEMO_PORT) else: HOST_URL = 'http://{{ ansible_eth0.ipv4.address }}/' LOGIN_URL = '/eclaim/login/' ROOT_URLCONF = 'eclaim.urls' WSGI_APPLICATION = 'wsgi.application' # Absolute path to the directory that holds static files. STATIC_ROOT = '{{ django_app_home }}/static_files' # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static'), ) STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'compressor.finders.CompressorFinder', ) # Compress static files. COMPRESS_ENABLED = True # Absolute path to the directory that holds media files. MEDIA_ROOT = '{{ django_app_home }}/media_files' MEDIA_URL = '/media/' TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(BASE_DIR, 'templates'), ) # Django Rest Framework. REST_FRAMEWORK = { 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination', 'PAGE_SIZE': 100 } MINI_PAGE_SIZE = 20 # Sphinx documentation. DOCS_ROOT = os.path.join(BASE_DIR, 'docs/_build/html') DOCS_ACCESS = 'login_required' # public/login_required/staff/superuser # Internationalization. # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en' LANGUAGES = ( ('en', _('English')), ('ms', _('Bahasa Malaysia')), ) LOCALE_PATHS = ( os.path.join(BASE_DIR, 'locale'), ) TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # JavaScript Internationalization (i18n) JS_I18N_PACKAGES = ( 'eclaim.masterfiles', 'eclaim.settings' ) # Caching. CACHE_TIMEOUT = 7 * 86400 # 7 days CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache', 'LOCATION': os.path.join(BASE_DIR, 'cache'), 'TIMEOUT': CACHE_TIMEOUT }, } # Logging. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'verbose': { 'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s", 'datefmt': "%d/%b/%Y %H:%M:%S" }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'handlers': { 'file': { 'level': 'DEBUG', 'class': 'logging.handlers.RotatingFileHandler', 'filename': os.path.abspath('eclaim.log'), 'formatter': 'verbose' }, }, 'loggers': { 'django': { 'handlers': ['file'], 'propagate': True, 'level': 'DEBUG', }, 'ECLAIM': { 'handlers': ['file'], 'level': 'DEBUG', }, } }
mit
cdjones32/vertx-web
src/test/sockjs-protocol/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/langhebrewmodel.py
2763
11318
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Simon Montagu # Portions created by the Initial Developer are Copyright (C) 2005 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # Shoshannah Forbes - original C code (?) # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # 255: Control characters that usually does not exist in any text # 254: Carriage/Return # 253: symbol (punctuation) that does not belong to word # 252: 0 - 9 # Windows-1255 language model # Character Mapping Table: win1255_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85, # 40 78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253, # 50 253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49, # 60 66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253, # 70 124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214, 215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221, 34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227, 106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234, 30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237, 238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250, 9, 8, 20, 16, 3, 2, 24, 14, 22, 1, 25, 15, 4, 11, 6, 23, 12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253, ) # Model Table: # total sequences: 100% # first 512 sequences: 98.4004% # first 1024 sequences: 1.5981% # rest sequences: 0.087% # negative sequences: 0.0015% HebrewLangModel = ( 0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0, 3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2, 1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2, 1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3, 1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2, 1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2, 1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2, 0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2, 0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2, 1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0, 3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2, 0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1, 0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, 3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0, 0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2, 0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2, 0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0, 3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2, 0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2, 0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2, 0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2, 0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1, 0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2, 0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0, 3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2, 0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2, 0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, 3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2, 0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, 3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0, 1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2, 0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, 3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0, 0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0, 0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, 3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3, 0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, 2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0, 0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0, 0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, 0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0, 0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0, 0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0, 2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0, 0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1, 0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1, 0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2, 0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, 3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0, 0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1, 1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1, 0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1, 2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1, 1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1, 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1, 2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1, 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1, 1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1, 2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0, 0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1, 1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1, 0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0, ) Win1255HebrewModel = { 'charToOrderMap': win1255_CharToOrderMap, 'precedenceMatrix': HebrewLangModel, 'mTypicalPositiveRatio': 0.984004, 'keepEnglishLetter': False, 'charsetName': "windows-1255" } # flake8: noqa
apache-2.0
M4sse/chromium.src
third_party/cython/src/Cython/Build/BuildExecutable.py
98
4286
""" Compile a Python script into an executable that embeds CPython and run it. Requires CPython to be built as a shared library ('libpythonX.Y'). Basic usage: python cythonrun somefile.py [ARGS] """ DEBUG = True import sys import os from distutils import sysconfig def get_config_var(name, default=''): return sysconfig.get_config_var(name) or default INCDIR = sysconfig.get_python_inc() LIBDIR1 = get_config_var('LIBDIR') LIBDIR2 = get_config_var('LIBPL') PYLIB = get_config_var('LIBRARY') PYLIB_DYN = get_config_var('LDLIBRARY') if PYLIB_DYN == PYLIB: # no shared library PYLIB_DYN = '' else: PYLIB_DYN = os.path.splitext(PYLIB_DYN[3:])[0] # 'lib(XYZ).so' -> XYZ CC = get_config_var('CC', os.environ.get('CC', '')) CFLAGS = get_config_var('CFLAGS') + ' ' + os.environ.get('CFLAGS', '') LINKCC = get_config_var('LINKCC', os.environ.get('LINKCC', CC)) LINKFORSHARED = get_config_var('LINKFORSHARED') LIBS = get_config_var('LIBS') SYSLIBS = get_config_var('SYSLIBS') EXE_EXT = sysconfig.get_config_var('EXE') def _debug(msg, *args): if DEBUG: if args: msg = msg % args sys.stderr.write(msg + '\n') def dump_config(): _debug('INCDIR: %s', INCDIR) _debug('LIBDIR1: %s', LIBDIR1) _debug('LIBDIR2: %s', LIBDIR2) _debug('PYLIB: %s', PYLIB) _debug('PYLIB_DYN: %s', PYLIB_DYN) _debug('CC: %s', CC) _debug('CFLAGS: %s', CFLAGS) _debug('LINKCC: %s', LINKCC) _debug('LINKFORSHARED: %s', LINKFORSHARED) _debug('LIBS: %s', LIBS) _debug('SYSLIBS: %s', SYSLIBS) _debug('EXE_EXT: %s', EXE_EXT) def runcmd(cmd, shell=True): if shell: cmd = ' '.join(cmd) _debug(cmd) else: _debug(' '.join(cmd)) try: import subprocess except ImportError: # Python 2.3 ... returncode = os.system(cmd) else: returncode = subprocess.call(cmd, shell=shell) if returncode: sys.exit(returncode) def clink(basename): runcmd([LINKCC, '-o', basename + EXE_EXT, basename+'.o', '-L'+LIBDIR1, '-L'+LIBDIR2] + [PYLIB_DYN and ('-l'+PYLIB_DYN) or os.path.join(LIBDIR1, PYLIB)] + LIBS.split() + SYSLIBS.split() + LINKFORSHARED.split()) def ccompile(basename): runcmd([CC, '-c', '-o', basename+'.o', basename+'.c', '-I' + INCDIR] + CFLAGS.split()) def cycompile(input_file, options=()): from Cython.Compiler import Version, CmdLine, Main options, sources = CmdLine.parse_command_line(list(options or ()) + ['--embed', input_file]) _debug('Using Cython %s to compile %s', Version.version, input_file) result = Main.compile(sources, options) if result.num_errors > 0: sys.exit(1) def exec_file(program_name, args=()): runcmd([os.path.abspath(program_name)] + list(args), shell=False) def build(input_file, compiler_args=(), force=False): """ Build an executable program from a Cython module. Returns the name of the executable file. """ basename = os.path.splitext(input_file)[0] exe_file = basename + EXE_EXT if not force and os.path.abspath(exe_file) == os.path.abspath(input_file): raise ValueError("Input and output file names are the same, refusing to overwrite") if (not force and os.path.exists(exe_file) and os.path.exists(input_file) and os.path.getmtime(input_file) <= os.path.getmtime(exe_file)): _debug("File is up to date, not regenerating %s", exe_file) return exe_file cycompile(input_file, compiler_args) ccompile(basename) clink(basename) return exe_file def build_and_run(args): """ Build an executable program from a Cython module and runs it. Arguments after the module name will be passed verbatimely to the program. """ cy_args = [] last_arg = None for i, arg in enumerate(args): if arg.startswith('-'): cy_args.append(arg) elif last_arg in ('-X', '--directive'): cy_args.append(arg) else: input_file = arg args = args[i+1:] break last_arg = arg else: raise ValueError('no input file provided') program_name = build(input_file, cy_args) exec_file(program_name, args) if __name__ == '__main__': build_and_run(sys.argv[1:])
bsd-3-clause
debsankha/networkx
networkx/readwrite/multiline_adjlist.py
13
11887
# -*- coding: utf-8 -*- """ ************************* Multi-line Adjacency List ************************* Read and write NetworkX graphs as multi-line adjacency lists. The multi-line adjacency list format is useful for graphs with nodes that can be meaningfully represented as strings. With this format simple edge data can be stored but node or graph data is not. Format ------ The first label in a line is the source node label followed by the node degree d. The next d lines are target node labels and optional edge data. That pattern repeats for all nodes in the graph. The graph with edges a-b, a-c, d-e can be represented as the following adjacency list (anything following the # in a line is a comment):: # example.multiline-adjlist a 2 b c d 1 e """ __author__ = '\n'.join(['Aric Hagberg <hagberg@lanl.gov>', 'Dan Schult <dschult@colgate.edu>', 'Loïc Séguin-C. <loicseguin@gmail.com>']) # Copyright (C) 2004-2015 by # Aric Hagberg <hagberg@lanl.gov> # Dan Schult <dschult@colgate.edu> # Pieter Swart <swart@lanl.gov> # All rights reserved. # BSD license. __all__ = ['generate_multiline_adjlist', 'write_multiline_adjlist', 'parse_multiline_adjlist', 'read_multiline_adjlist'] from networkx.utils import make_str, open_file import networkx as nx def generate_multiline_adjlist(G, delimiter=' '): """Generate a single line of the graph G in multiline adjacency list format. Parameters ---------- G : NetworkX graph delimiter : string, optional Separator for node labels Returns ------- lines : string Lines of data in multiline adjlist format. Examples -------- >>> G = nx.lollipop_graph(4, 3) >>> for line in nx.generate_multiline_adjlist(G): ... print(line) 0 3 1 {} 2 {} 3 {} 1 2 2 {} 3 {} 2 1 3 {} 3 1 4 {} 4 1 5 {} 5 1 6 {} 6 0 See Also -------- write_multiline_adjlist, read_multiline_adjlist """ if G.is_directed(): if G.is_multigraph(): for s, nbrs in G.adjacency_iter(): nbr_edges = [(u, data) for u, datadict in nbrs.items() for key, data in datadict.items()] deg = len(nbr_edges) yield make_str(s) + delimiter + str(deg) for u, d in nbr_edges: if d is None: yield make_str(u) else: yield make_str(u) + delimiter + make_str(d) else: # directed single edges for s, nbrs in G.adjacency_iter(): deg = len(nbrs) yield make_str(s) + delimiter + str(deg) for u, d in nbrs.items(): if d is None: yield make_str(u) else: yield make_str(u) + delimiter + make_str(d) else: # undirected if G.is_multigraph(): seen = set() # helper dict used to avoid duplicate edges for s, nbrs in G.adjacency_iter(): nbr_edges = [(u, data) for u, datadict in nbrs.items() if u not in seen for key, data in datadict.items()] deg = len(nbr_edges) yield make_str(s) + delimiter + str(deg) for u, d in nbr_edges: if d is None: yield make_str(u) else: yield make_str(u) + delimiter + make_str(d) seen.add(s) else: # undirected single edges seen = set() # helper dict used to avoid duplicate edges for s, nbrs in G.adjacency_iter(): nbr_edges = [(u, d) for u, d in nbrs.items() if u not in seen] deg = len(nbr_edges) yield make_str(s) + delimiter + str(deg) for u, d in nbr_edges: if d is None: yield make_str(u) else: yield make_str(u) + delimiter + make_str(d) seen.add(s) @open_file(1, mode='wb') def write_multiline_adjlist(G, path, delimiter=' ', comments='#', encoding='utf-8'): """ Write the graph G in multiline adjacency list format to path Parameters ---------- G : NetworkX graph comments : string, optional Marker for comment lines delimiter : string, optional Separator for node labels encoding : string, optional Text encoding. Examples -------- >>> G=nx.path_graph(4) >>> nx.write_multiline_adjlist(G,"test.adjlist") The path can be a file handle or a string with the name of the file. If a file handle is provided, it has to be opened in 'wb' mode. >>> fh=open("test.adjlist",'wb') >>> nx.write_multiline_adjlist(G,fh) Filenames ending in .gz or .bz2 will be compressed. >>> nx.write_multiline_adjlist(G,"test.adjlist.gz") See Also -------- read_multiline_adjlist """ import sys import time pargs = comments + " ".join(sys.argv) header = ("{}\n".format(pargs) + comments + " GMT {}\n".format(time.asctime(time.gmtime())) + comments + " {}\n".format(G.name)) path.write(header.encode(encoding)) for multiline in generate_multiline_adjlist(G, delimiter): multiline += '\n' path.write(multiline.encode(encoding)) def parse_multiline_adjlist(lines, comments='#', delimiter=None, create_using=None, nodetype=None, edgetype=None): """Parse lines of a multiline adjacency list representation of a graph. Parameters ---------- lines : list or iterator of strings Input data in multiline adjlist format create_using: NetworkX graph container Use given NetworkX graph for holding nodes or edges. nodetype : Python type, optional Convert nodes to this type. comments : string, optional Marker for comment lines delimiter : string, optional Separator for node labels. The default is whitespace. Returns ------- G: NetworkX graph The graph corresponding to the lines in multiline adjacency list format. Examples -------- >>> lines = ['1 2', ... "2 {'weight':3, 'name': 'Frodo'}", ... "3 {}", ... "2 1", ... "5 {'weight':6, 'name': 'Saruman'}"] >>> G = nx.parse_multiline_adjlist(iter(lines), nodetype = int) >>> G.nodes() [1, 2, 3, 5] """ from ast import literal_eval if create_using is None: G = nx.Graph() else: try: G = create_using G.clear() except: raise TypeError("Input graph is not a networkx graph type") for line in lines: p = line.find(comments) if p >= 0: line = line[:p] if not line: continue try: (u, deg) = line.strip().split(delimiter) deg = int(deg) except: raise TypeError("Failed to read node and degree on line ({})".format(line)) if nodetype is not None: try: u = nodetype(u) except: raise TypeError("Failed to convert node ({}) to type {}" .format(u, nodetype)) G.add_node(u) for i in range(deg): while True: try: line = next(lines) except StopIteration: msg = "Failed to find neighbor for node ({})".format(u) raise TypeError(msg) p = line.find(comments) if p >= 0: line = line[:p] if line: break vlist = line.strip().split(delimiter) numb = len(vlist) if numb < 1: continue # isolated node v = vlist.pop(0) data = ''.join(vlist) if nodetype is not None: try: v = nodetype(v) except: raise TypeError( "Failed to convert node ({}) to type {}" .format(v, nodetype)) if edgetype is not None: try: edgedata = {'weight': edgetype(data)} except: raise TypeError( "Failed to convert edge data ({}) to type {}" .format(data, edgetype)) else: try: # try to evaluate edgedata = literal_eval(data) except: edgedata = {} G.add_edge(u, v, attr_dict=edgedata) return G @open_file(0, mode='rb') def read_multiline_adjlist(path, comments="#", delimiter=None, create_using=None, nodetype=None, edgetype=None, encoding='utf-8'): """Read graph in multi-line adjacency list format from path. Parameters ---------- path : string or file Filename or file handle to read. Filenames ending in .gz or .bz2 will be uncompressed. create_using: NetworkX graph container Use given NetworkX graph for holding nodes or edges. nodetype : Python type, optional Convert nodes to this type. edgetype : Python type, optional Convert edge data to this type. comments : string, optional Marker for comment lines delimiter : string, optional Separator for node labels. The default is whitespace. Returns ------- G: NetworkX graph Examples -------- >>> G=nx.path_graph(4) >>> nx.write_multiline_adjlist(G,"test.adjlist") >>> G=nx.read_multiline_adjlist("test.adjlist") The path can be a file or a string with the name of the file. If a file s provided, it has to be opened in 'rb' mode. >>> fh=open("test.adjlist", 'rb') >>> G=nx.read_multiline_adjlist(fh) Filenames ending in .gz or .bz2 will be compressed. >>> nx.write_multiline_adjlist(G,"test.adjlist.gz") >>> G=nx.read_multiline_adjlist("test.adjlist.gz") The optional nodetype is a function to convert node strings to nodetype. For example >>> G=nx.read_multiline_adjlist("test.adjlist", nodetype=int) will attempt to convert all nodes to integer type. The optional edgetype is a function to convert edge data strings to edgetype. >>> G=nx.read_multiline_adjlist("test.adjlist") The optional create_using parameter is a NetworkX graph container. The default is Graph(), an undirected graph. To read the data as a directed graph use >>> G=nx.read_multiline_adjlist("test.adjlist", create_using=nx.DiGraph()) Notes ----- This format does not store graph, node, or edge data. See Also -------- write_multiline_adjlist """ lines = (line.decode(encoding) for line in path) return parse_multiline_adjlist(lines, comments=comments, delimiter=delimiter, create_using=create_using, nodetype=nodetype, edgetype=edgetype) # fixture for nose tests def teardown_module(module): import os for fname in ['test.adjlist', 'test.adjlist.gz']: if os.path.isfile(fname): os.unlink(fname)
bsd-3-clause
catapult-project/catapult
dependency_manager/dependency_manager/exceptions.py
14
1695
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from py_utils import cloud_storage CloudStorageError = cloud_storage.CloudStorageError class UnsupportedConfigFormatError(ValueError): def __init__(self, config_type, config_file): if not config_type: message = ('The json file at %s is unsupported by the dependency_manager ' 'due to no specified config type' % config_file) else: message = ('The json file at %s has config type %s, which is unsupported ' 'by the dependency manager.' % (config_file, config_type)) super(UnsupportedConfigFormatError, self).__init__(message) class EmptyConfigError(ValueError): def __init__(self, file_path): super(EmptyConfigError, self).__init__('Empty config at %s.' % file_path) class FileNotFoundError(Exception): def __init__(self, file_path): super(FileNotFoundError, self).__init__('No file found at %s' % file_path) class NoPathFoundError(Exception): def __init__(self, dependency, platform): super(NoPathFoundError, self).__init__( 'No file could be found locally, and no file to download from cloud ' 'storage for %s on platform %s' % (dependency, platform)) class ReadWriteError(Exception): pass class CloudStorageUploadConflictError(CloudStorageError): def __init__(self, bucket, path): super(CloudStorageUploadConflictError, self).__init__( 'File location %s already exists in bucket %s' % (path, bucket)) class ArchiveError(Exception): def __init__(self, msg): super(ArchiveError, self).__init__(msg)
bsd-3-clause
quantumlib/Cirq
cirq-core/cirq/experiments/purity_estimation.py
1
2467
# Copyright 2020 The Cirq Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Sequence import numpy as np def purity_from_probabilities( hilbert_space_dimension: int, probabilities: Sequence[float], ) -> float: """Purity estimator from speckle purity benchmarking. Estimates purity from empirical probabilities of observed bitstrings. This estimator assumes that the circuit used in experiment is sufficiently scrambling that its output probabilities follow the Porter-Thomas distribution. This assumption holds for typical instances of random quantum circuits of sufficient depth. The state resulting from the experimental implementation of the circuit is modeled as ρ = p |𝜓⟩⟨𝜓| + (1 - p) I / D where |𝜓⟩ is a pure state, I / D is the maximally mixed state, and p is between 0 and 1. The purity of this state is given by p**2. If p = 1, then the bitstring probabilities are modeled as being drawn from the Porter-Thomas distribution, with probability density function given by f(x) = (D - 1) (1 - x)**(D - 2). The mean of this distribution is 1 / D and its variance is (D - 1) / [D**2 (D + 1)]. In general, the variance of the distribution is multipled by p**2. Therefore, the purity can be computed by dividing the variance of the empirical probabilities by the Porter-Thomas variance (D - 1) / [D**2 (D + 1)]. Args: hilbert_space_dimension: Dimension of the Hilbert space on which the quantum circuits acts. probabilities: Empirical probabilities of bitstrings observed in experiment. Returns: Estimate of the purity of the state resulting from the experimental implementation of a quantum circuit. """ D = hilbert_space_dimension porter_thomas_variance = (D - 1) / (D + 1) / D ** 2 return np.var(probabilities) / porter_thomas_variance
apache-2.0
ActiveState/code
recipes/Python/578414_Takuzu_solver/recipe-578414.py
1
4263
# Copyright 2013 Eviatar Bach, eviatarbach@gmail.com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Implementation of a Takuzu solver. A Takuzu board consists of a square grid of binary cells. There must be an equal number of 0s and 1s in every row and column, no duplicate rows or columns, and no more than two of the same bit consecutive in every row and column. """ from constraint_solver import pywrapcp N = None BOARD1 = [[N, 1, N, 0], [N, N, 0, N], [N, 0, N, N], [1, 1, N, 0]] BOARD2 = [[N, 1, N, N, N, 0], [1, N, N, N, N, 1], [N, N, 0, N, N, N], [1, N, N, N, N, N], [N, N, N, 0, N, 0], [N, N, N, N, 1, N]] BOARD3 = [[N, N, N, 1, N, N, N, N, N, N], [N, 0, N, N, N, 0, N, N, N, 1], [1, N, 1, 1, N, N, N, 1, N, N], [N, N, N, N, N, 0, N, N, N, N], [N, 1, N, N, N, N, N, N, 0, N], [0, N, N, N, 0, N, N, N, 0, N], [N, 1, N, N, N, 0, N, N, N, N], [1, N, N, N, 1, N, 1, N, N, N], [1, 1, N, 0, N, N, N, N, N, N], [N, N, N, N, N, N, N, 1, N, N]] def valid(board): ''' Checks whether a board has no duplicate rows or columns. This is needed to filter out invalid solutions from the constraint solver. ''' return ((len(set(map(tuple, board))) == len(board)) and (len(set(zip(*board))) == len(board))) def solve(board): ''' Solves a Takuzu board, with None for empty (unsolved) spaces ''' assert len(set(map(len, board))) == 1 # all row lengths are the same assert len(board) == len(board[0]) # width and height are the same assert len(board) % 2 == 0 # board has even dimensions line_sum = len(board) / 2 # the number to which all rows and columns sum line = range(len(board)) # line and row indices solver = pywrapcp.Solver('takuzu') grid = {} for i in line: for j in line: grid[(i, j)] = solver.IntVar(0, 1, 'grid %i %i' % (i, j)) # initial values for i in line: for j in line: if board[i][j] is not None: solver.Add(grid[(i, j)] == board[i][j]) # no three consecutive elements in rows or columns for i in line: for j in range(len(board) - 2): solver.Add(solver.SumGreaterOrEqual([grid[(i, jl)] for jl in line[j:j + 3]], 1)) solver.Add(solver.SumLessOrEqual([grid[(i, jl)] for jl in line[j:j + 3]], 2)) solver.Add(solver.SumGreaterOrEqual([grid[(jl, i)] for jl in line[j:j + 3]], 1)) solver.Add(solver.SumLessOrEqual([grid[(jl, i)] for jl in line[j:j + 3]], 2)) # rows and columns sum to half the size for i in line: solver.Add(solver.SumEquality([grid[(i, j)] for j in line], line_sum)) for j in line: solver.Add(solver.SumEquality([grid[(i, j)] for i in line], line_sum)) # regroup all variables into a list all_vars = [grid[(i, j)] for i in line for j in line] # create search phases vars_phase = solver.Phase(all_vars, solver.INT_VAR_SIMPLE, solver.INT_VALUE_SIMPLE) # search for all solutions and remove those with duplicate rows or columns solver.NewSearch(vars_phase) solutions = [] while solver.NextSolution(): solutions.append([[int(grid[(i, j)].Value()) for j in line] for i in line]) solver.EndSearch() solutions = filter(valid, solutions) assert len(solutions) == 1 # there should be only one solution return solutions[0] for row in solve(BOARD3): print row
mit
quantumlib/OpenFermion
src/openfermion/testing/performance_benchmarks.py
1
11534
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This file contains tests of code performance to reveal bottlenecks.""" import time import logging import numpy from openfermion.utils import commutator, Grid from openfermion.ops import FermionOperator, QubitOperator from openfermion.hamiltonians import jellium_model from openfermion.transforms.opconversions import (jordan_wigner, get_fermion_operator, normal_ordered) from openfermion.linalg import (jordan_wigner_sparse, LinearQubitOperatorOptions, LinearQubitOperator, ParallelLinearQubitOperator) from openfermion.testing.testing_utils import random_interaction_operator from openfermion.transforms import \ commutator_ordered_diagonal_coulomb_with_two_body_operator def benchmark_molecular_operator_jordan_wigner(n_qubits): """Test speed with which molecular operators transform to qubit operators. Args: n_qubits: The size of the molecular operator instance. Ideally, we would be able to transform to a qubit operator for 50 qubit instances in less than a minute. We are way too slow right now. Returns: runtime: The number of seconds required to make the conversion. """ # Get an instance of InteractionOperator. molecular_operator = random_interaction_operator(n_qubits) # Convert to a qubit operator. start = time.time() _ = jordan_wigner(molecular_operator) end = time.time() # Return runtime. runtime = end - start return runtime def benchmark_fermion_math_and_normal_order(n_qubits, term_length, power): """Benchmark both arithmetic operators and normal ordering on fermions. The idea is we generate two random FermionTerms, A and B, each acting on n_qubits with term_length operators. We then compute (A + B) ** power. This is costly that is the first benchmark. The second benchmark is in normal ordering whatever comes out. Args: n_qubits: The number of qubits on which these terms act. term_length: The number of operators in each term. power: Int, the exponent to which to raise sum of the two terms. Returns: runtime_math: The time it takes to perform (A + B) ** power runtime_normal_order: The time it takes to perform FermionOperator.normal_order() """ # Generate random operator strings. operators_a = [(numpy.random.randint(n_qubits), numpy.random.randint(2))] operators_b = [(numpy.random.randint(n_qubits), numpy.random.randint(2))] for _ in range(term_length): # Make sure the operator is not trivially zero. operator_a = (numpy.random.randint(n_qubits), numpy.random.randint(2)) while operator_a == operators_a[-1]: operator_a = (numpy.random.randint(n_qubits), numpy.random.randint(2)) operators_a += [operator_a] # Do the same for the other operator. operator_b = (numpy.random.randint(n_qubits), numpy.random.randint(2)) while operator_b == operators_b[-1]: operator_b = (numpy.random.randint(n_qubits), numpy.random.randint(2)) operators_b += [operator_b] # Initialize FermionTerms and then sum them together. fermion_term_a = FermionOperator(tuple(operators_a), float(numpy.random.randn())) fermion_term_b = FermionOperator(tuple(operators_b), float(numpy.random.randn())) fermion_operator = fermion_term_a + fermion_term_b # Exponentiate. start_time = time.time() fermion_operator **= power runtime_math = time.time() - start_time # Normal order. start_time = time.time() normal_ordered(fermion_operator) runtime_normal_order = time.time() - start_time # Return. return runtime_math, runtime_normal_order def benchmark_jordan_wigner_sparse(n_qubits): """Benchmark the speed at which a FermionOperator is mapped to a matrix. Args: n_qubits: The number of qubits in the example. Returns: runtime: The time in seconds that the benchmark took. """ # Initialize a random FermionOperator. molecular_operator = random_interaction_operator(n_qubits) fermion_operator = get_fermion_operator(molecular_operator) # Map to SparseOperator class. start_time = time.time() _ = jordan_wigner_sparse(fermion_operator) runtime = time.time() - start_time return runtime def benchmark_linear_qubit_operator(n_qubits, n_terms, processes=None): """Test speed with getting a linear operator from a Qubit Operator. Args: n_qubits: The number of qubits, implying the dimension of the operator is 2 ** n_qubits. n_terms: The number of terms in a qubit operator. processes: The number of processors to use. Returns: runtime_operator: The time it takes to get the linear operator. runtime_matvec: The time it takes to perform matrix multiplication. """ # Generates Qubit Operator with specified number of terms. map_int_to_operator = { 0: 'X', 1: 'Y', 2: 'Z', } qubit_operator = QubitOperator.zero() for _ in range(n_terms): tuples = [] for i in range(n_qubits): operator = numpy.random.randint(4) # 3 is 'I', so just skip. if operator > 2: continue tuples.append((i, map_int_to_operator[operator])) if tuples: qubit_operator += QubitOperator(tuples, 1.00) # Gets an instance of (Parallel)LinearQubitOperator. start = time.time() if processes is None: linear_operator = LinearQubitOperator(qubit_operator, n_qubits) else: linear_operator = ParallelLinearQubitOperator( qubit_operator, n_qubits, LinearQubitOperatorOptions(processes=processes)) end = time.time() runtime_operator = end - start vec = numpy.random.rand(2**n_qubits) # Performs matrix multiplication. start = time.time() _ = linear_operator * vec end = time.time() runtime_matvec = end - start return runtime_operator, runtime_matvec def benchmark_commutator_diagonal_coulomb_operators_2D_spinless_jellium( side_length): """Test speed of computing commutators using specialized functions. Args: side_length: The side length of the 2D jellium grid. There are side_length ** 2 qubits, and O(side_length ** 4) terms in the Hamiltonian. Returns: runtime_commutator: The time it takes to compute a commutator, after partitioning the terms and normal ordering, using the regular commutator function. runtime_diagonal_commutator: The time it takes to compute the same commutator using methods restricted to diagonal Coulomb operators. """ hamiltonian = normal_ordered( jellium_model(Grid(2, side_length, 1.), plane_wave=False)) part_a = FermionOperator.zero() part_b = FermionOperator.zero() add_to_a_or_b = 0 # add to a if 0; add to b if 1 for term, coeff in hamiltonian.terms.items(): # Partition terms in the Hamiltonian into part_a or part_b if add_to_a_or_b: part_a += FermionOperator(term, coeff) else: part_b += FermionOperator(term, coeff) add_to_a_or_b ^= 1 start = time.time() _ = normal_ordered(commutator(part_a, part_b)) end = time.time() runtime_commutator = end - start start = time.time() _ = commutator_ordered_diagonal_coulomb_with_two_body_operator( part_a, part_b) end = time.time() runtime_diagonal_commutator = end - start return runtime_commutator, runtime_diagonal_commutator # Sets up each benchmark run. def run_molecular_operator_jordan_wigner(n_qubits=18): """Run InteractionOperator.jordan_wigner_transform() benchmark.""" logging.info('Starting test on ' 'InteractionOperator.jordan_wigner_transform()') logging.info('n_qubits = %d.', n_qubits) runtime = benchmark_molecular_operator_jordan_wigner(n_qubits) logging.info( 'InteractionOperator.jordan_wigner_transform() takes %f ' 'seconds.\n', runtime) return runtime def run_fermion_math_and_normal_order(n_qubits=20, term_length=10, power=15): """Run benchmark on FermionOperator math and normal-ordering.""" logging.info('Starting test on FermionOperator math and normal ordering.') logging.info('(n_qubits, term_length, power) = (%d, %d, %d).', n_qubits, term_length, power) runtime_math, runtime_normal = benchmark_fermion_math_and_normal_order( n_qubits, term_length, power) logging.info('Math took %f seconds. Normal ordering took %f seconds.\n', runtime_math, runtime_normal) return runtime_math, runtime_normal def run_jordan_wigner_sparse(n_qubits=10): """Run FermionOperator.jordan_wigner_sparse() benchmark.""" logging.info('Starting test on FermionOperator.jordan_wigner_sparse().') logging.info('n_qubits = %d.', n_qubits) runtime = benchmark_jordan_wigner_sparse(n_qubits) logging.info('Construction of SparseOperator took %f seconds.\n', runtime) return runtime def run_linear_qubit_operator(n_qubits=16, n_terms=10, processes=10): """Run linear_qubit_operator benchmark.""" logging.info('Starting test on linear_qubit_operator().') logging.info('(n_qubits, n_terms) = (%d, %d).', n_qubits, n_terms) _, runtime_sequential = benchmark_linear_qubit_operator(n_qubits, n_terms) _, runtime_parallel = benchmark_linear_qubit_operator( n_qubits, n_terms, processes) logging.info( 'LinearQubitOperator took %f seconds, while ' 'ParallelQubitOperator took %f seconds with %d processes, ' 'and ratio is %.2f.\n', runtime_sequential, runtime_parallel, processes, runtime_sequential / runtime_parallel) return runtime_sequential, runtime_parallel def run_diagonal_commutator(side_length=4): """Run commutator_diagonal_coulomb_operators benchmark.""" logging.info( 'Starting test on ' 'commutator_ordered_diagonal_coulomb_with_two_body_operator().') runtime_commutator, runtime_diagonal_commutator = ( benchmark_commutator_diagonal_coulomb_operators_2D_spinless_jellium( side_length=side_length)) logging.info( 'Regular commutator computation took %f seconds, while ' 'commutator_ordered_diagonal_coulomb_with_two_body_operator' ' took %f seconds. Ratio is %.2f.\n', runtime_commutator, runtime_diagonal_commutator, runtime_commutator / runtime_diagonal_commutator) return runtime_commutator, runtime_diagonal_commutator
apache-2.0
jtopjian/st2
st2common/tests/unit/test_reference.py
2
3724
# Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import mock import mongoengine from st2common.exceptions import db from st2common.models.db.trigger import TriggerDB from st2common.persistence.trigger import Trigger from st2common.transport.publishers import PoolPublisher from st2common.util import reference from st2tests import DbTestCase @mock.patch.object(PoolPublisher, 'publish', mock.MagicMock()) class ReferenceTest(DbTestCase): __model = None __ref = None @classmethod @mock.patch.object(PoolPublisher, 'publish', mock.MagicMock()) def setUpClass(cls): super(ReferenceTest, cls).setUpClass() trigger = TriggerDB() trigger.name = 'trigger-1' trigger.pack = 'dummy_pack_1' cls.__model = Trigger.add_or_update(trigger) cls.__ref = {'id': str(cls.__model.id), 'name': cls.__model.name} @classmethod @mock.patch.object(PoolPublisher, 'publish', mock.MagicMock()) def tearDownClass(cls): Trigger.delete(cls.__model) super(ReferenceTest, cls).tearDownClass() def test_to_reference(self): ref = reference.get_ref_from_model(self.__model) self.assertEqual(ref, self.__ref, 'Failed to generated equivalent ref.') def test_to_reference_no_model(self): try: reference.get_ref_from_model(None) self.assertTrue(False, 'Exception expected.') except ValueError: self.assertTrue(True) def test_to_reference_no_model_id(self): try: model = copy.copy(self.__model) model.id = None reference.get_ref_from_model(model) self.assertTrue(False, 'Exception expected.') except db.StackStormDBObjectMalformedError: self.assertTrue(True) def test_to_model_with_id(self): model = reference.get_model_from_ref(Trigger, self.__ref) self.assertEqual(model, self.__model, 'Failed to return correct model.') def test_to_model_with_name(self): ref = copy.copy(self.__ref) ref['id'] = None model = reference.get_model_from_ref(Trigger, ref) self.assertEqual(model, self.__model, 'Failed to return correct model.') def test_to_model_no_name_no_id(self): try: reference.get_model_from_ref(Trigger, {}) self.assertTrue(False, 'Exception expected.') except db.StackStormDBObjectNotFoundError: self.assertTrue(True) def test_to_model_unknown_id(self): try: reference.get_model_from_ref(Trigger, {'id': '1'}) self.assertTrue(False, 'Exception expected.') except mongoengine.ValidationError: self.assertTrue(True) def test_to_model_unknown_name(self): try: reference.get_model_from_ref(Trigger, {'name': 'unknown'}) self.assertTrue(False, 'Exception expected.') except ValueError: self.assertTrue(True)
apache-2.0
vbraun/oxford-strings
app/calendar_view.py
1
6003
# -*- coding: utf-8 -*- """ Calendaring Page Views """ import sys import os import uuid import logging from datetime import date, datetime, timedelta from webapp2 import uri_for from google.appengine.api import users import app.config as config from app.base_view import RequestHandler from app.decorators import cached_property, requires_login, requires_admin from app.event_model import Event class CalendarAdmin(RequestHandler): def get_events(self): """ Return all future events """ now = datetime.combine(date.today(), datetime.min.time()) return Event.query(Event.start_date >= now).order(Event.start_date).fetch(100) def get(self): self.cache_must_revalidate() values = dict() values['sync_url'] = uri_for('cron-sync') values['full_url'] = uri_for('calendar-admin') values['calendar_admin_url'] = self.request.uri values['calendar'] = self.get_events() self.render_response('calendar_admin.html', **values) @requires_admin def post(self): key_id = self.request.get('key_id') active = (self.request.get('active') == u'true') ev = Event.get_by_id(int(key_id)) ev.active = active ev.put() class EventListing(RequestHandler): def get_events(self): """ Return all future events """ now = datetime.combine(date.today(), datetime.min.time()) query = Event.query(Event.start_date >= now, Event.active == True) return query.order(Event.start_date).fetch(100) def get_template(self): raise NotImplementedError def get(self): self.cache_must_revalidate() values = dict() # values['edit_url'] = uri_for('calendar-new') values['sync_url'] = uri_for('cron-sync') values['calendar_admin_url'] = uri_for('calendar-admin') values['calendar'] = self.get_events() values['abstract_intro'] = config.abstract_intro self.render_response(self.get_template(), **values) self.response.md5_etag() class IcalExport(EventListing): def _ical_time(self, dt): import pytz import time dt = pytz.utc.localize(dt) return time.strftime('%Y%m%dT%H%M%SZ', dt.timetuple()) def get(self): from icalendar import Calendar, Event, vCalAddress, vText cal = Calendar() cal.add('prodid', '-//Strings Oxford Calendaring//strings.ox.ac.uk//') cal.add('version', '2.0') cal.add('X-WR-CALNAME', 'Strings Oxford') for ev in self.get_events(): event = Event() event['uid'] = vText(ev.uid) event['location'] = vText(ev.location) event['summary'] = ev.title event['dtstart'] = self._ical_time(ev.start_date) event['dtend'] = self._ical_time(ev.end_date) desc = u'Speaker: {}\n'.format(ev.speaker) desc += u'Location: {}\n'.format(ev.location) desc += u'Series: {}\n'.format(ev.series) desc += ev.description event['description'] = vText(desc) cal.add_component(event) #self.response.headers['Content-Type'] = 'text/plain' self.response.headers['Content-Type'] = 'text/calendar' self.response.write(cal.to_ical()) class Seminars(EventListing): def get_template(self): return 'calendar.html' class JuniorSeminar(EventListing): def get_events(self): """ Return all future events in the string theory junior seminar series """ now = datetime.combine(date.today(), datetime.min.time()) query = Event.query( Event.series == 'Strings Junior Seminar', Event.start_date >= now, Event.active == True) return query.order(Event.start_date).fetch(100) def get_template(self): return 'junior_seminar.html' class ThisWeek(EventListing): def get_template(self): return 'this_week.html' def get_start_date(self): """ Return the date of the last Saturday """ today = date.today() # today.weekday in {0, ..., 6} switches to "0" on Monday key_day = today + timedelta(days=2) # we want to switch calendar on saturday return today - timedelta(days=key_day.weekday()) def get_events(self): last_saturday = self.get_start_date() next_saturday = last_saturday + timedelta(weeks=1) t0 = datetime.combine(last_saturday, datetime.min.time()) t1 = datetime.combine(next_saturday, datetime.max.time()) # allow for week-spanning events would be ideally: # query = Event.query(Event.start_date <= t1, Event.end_date >= t0) # but inequality queries can currently be only on one property query = Event.query( Event.start_date >= t0, Event.start_date < t1, Event.active == True) return query.order(Event.start_date).fetch(100) class NextWeek(ThisWeek): def get_template(self): return 'next_week.html' def get_start_date(self): """ Return the date of the next Saturday """ return ThisWeek.get_start_date(self) + timedelta(weeks=1) class ThisWeekEmail(ThisWeek): def get_template(self): return 'this_week_email.html' class CalendarEdit(EventListing): """ TODO: do we really want to edit events ourselves? """ def get_event(self, key_id): if key_id is not None: return Event.get_by_id(int(key_id)) uid = str(uuid.uuid4()) ev = Event(uid=uid, editable=True, active=True) ev.start_date = datetime.utcnow() ev.end_date = datetime.utcnow() ev.put() return ev def get(self, uid=None): values = dict() values['calendar'] = [self.get_event(uid)] self.render_response('calendar.html', **values)
gpl-2.0
jjmiranda/edx-platform
common/lib/xmodule/xmodule/tests/test_export.py
35
6802
""" Tests of XML export """ import ddt import lxml.etree import mock import pytz import shutil import unittest from datetime import datetime, timedelta, tzinfo from fs.osfs import OSFS from path import Path as path from tempfile import mkdtemp from textwrap import dedent from xblock.core import XBlock from xblock.fields import String, Scope, Integer from xblock.test.tools import blocks_are_equivalent from opaque_keys.edx.locations import Location from xmodule.modulestore import EdxJSONEncoder from xmodule.modulestore.xml import XMLModuleStore from xmodule.tests import DATA_DIR from xmodule.x_module import XModuleMixin def strip_filenames(descriptor): """ Recursively strips 'filename' from all children's definitions. """ print "strip filename from {desc}".format(desc=descriptor.location.to_deprecated_string()) if descriptor._field_data.has(descriptor, 'filename'): descriptor._field_data.delete(descriptor, 'filename') if hasattr(descriptor, 'xml_attributes'): if 'filename' in descriptor.xml_attributes: del descriptor.xml_attributes['filename'] for child in descriptor.get_children(): strip_filenames(child) descriptor.save() class PureXBlock(XBlock): """Class for testing pure XBlocks.""" has_children = True field1 = String(default="something", scope=Scope.user_state) field2 = Integer(scope=Scope.user_state) @ddt.ddt class RoundTripTestCase(unittest.TestCase): """ Check that our test courses roundtrip properly. Same course imported , than exported, then imported again. And we compare original import with second import (after export). Thus we make sure that export and import work properly. """ def setUp(self): super(RoundTripTestCase, self).setUp() self.maxDiff = None self.temp_dir = mkdtemp() self.addCleanup(shutil.rmtree, self.temp_dir) @mock.patch('xmodule.course_module.requests.get') @ddt.data( "toy", "simple", "conditional_and_poll", "conditional", "self_assessment", "test_exam_registration", "word_cloud", "pure_xblock", ) @XBlock.register_temp_plugin(PureXBlock, 'pure') def test_export_roundtrip(self, course_dir, mock_get): # Patch network calls to retrieve the textbook TOC mock_get.return_value.text = dedent(""" <?xml version="1.0"?><table_of_contents> <entry page="5" page_label="ii" name="Table of Contents"/> </table_of_contents> """).strip() root_dir = path(self.temp_dir) print "Copying test course to temp dir {0}".format(root_dir) data_dir = path(DATA_DIR) shutil.copytree(data_dir / course_dir, root_dir / course_dir) print "Starting import" initial_import = XMLModuleStore(root_dir, source_dirs=[course_dir], xblock_mixins=(XModuleMixin,)) courses = initial_import.get_courses() self.assertEquals(len(courses), 1) initial_course = courses[0] # export to the same directory--that way things like the custom_tags/ folder # will still be there. print "Starting export" file_system = OSFS(root_dir) initial_course.runtime.export_fs = file_system.makeopendir(course_dir) root = lxml.etree.Element('root') initial_course.add_xml_to_node(root) with initial_course.runtime.export_fs.open('course.xml', 'w') as course_xml: lxml.etree.ElementTree(root).write(course_xml) print "Starting second import" second_import = XMLModuleStore(root_dir, source_dirs=[course_dir], xblock_mixins=(XModuleMixin,)) courses2 = second_import.get_courses() self.assertEquals(len(courses2), 1) exported_course = courses2[0] print "Checking course equality" # HACK: filenames change when changing file formats # during imports from old-style courses. Ignore them. strip_filenames(initial_course) strip_filenames(exported_course) self.assertTrue(blocks_are_equivalent(initial_course, exported_course)) self.assertEquals(initial_course.id, exported_course.id) course_id = initial_course.id print "Checking key equality" self.assertItemsEqual( initial_import.modules[course_id].keys(), second_import.modules[course_id].keys() ) print "Checking module equality" for location in initial_import.modules[course_id].keys(): print("Checking", location) self.assertTrue(blocks_are_equivalent( initial_import.modules[course_id][location], second_import.modules[course_id][location] )) class TestEdxJsonEncoder(unittest.TestCase): """ Tests for xml_exporter.EdxJSONEncoder """ def setUp(self): super(TestEdxJsonEncoder, self).setUp() self.encoder = EdxJSONEncoder() class OffsetTZ(tzinfo): """A timezone with non-None utcoffset""" def utcoffset(self, _dt): return timedelta(hours=4) self.offset_tz = OffsetTZ() class NullTZ(tzinfo): """A timezone with None as its utcoffset""" def utcoffset(self, _dt): return None self.null_utc_tz = NullTZ() def test_encode_location(self): loc = Location('org', 'course', 'run', 'category', 'name', None) self.assertEqual(loc.to_deprecated_string(), self.encoder.default(loc)) loc = Location('org', 'course', 'run', 'category', 'name', 'version') self.assertEqual(loc.to_deprecated_string(), self.encoder.default(loc)) def test_encode_naive_datetime(self): self.assertEqual( "2013-05-03T10:20:30.000100", self.encoder.default(datetime(2013, 5, 3, 10, 20, 30, 100)) ) self.assertEqual( "2013-05-03T10:20:30", self.encoder.default(datetime(2013, 5, 3, 10, 20, 30)) ) def test_encode_utc_datetime(self): self.assertEqual( "2013-05-03T10:20:30+00:00", self.encoder.default(datetime(2013, 5, 3, 10, 20, 30, 0, pytz.UTC)) ) self.assertEqual( "2013-05-03T10:20:30+04:00", self.encoder.default(datetime(2013, 5, 3, 10, 20, 30, 0, self.offset_tz)) ) self.assertEqual( "2013-05-03T10:20:30Z", self.encoder.default(datetime(2013, 5, 3, 10, 20, 30, 0, self.null_utc_tz)) ) def test_fallthrough(self): with self.assertRaises(TypeError): self.encoder.default(None) with self.assertRaises(TypeError): self.encoder.default({})
agpl-3.0
jiangzhuo/kbengine
kbe/src/lib/python/Lib/test/test_mimetypes.py
111
4279
import io import locale import mimetypes import sys import unittest from test import support # Tell it we don't know about external files: mimetypes.knownfiles = [] mimetypes.inited = False mimetypes._default_mime_types() class MimeTypesTestCase(unittest.TestCase): def setUp(self): self.db = mimetypes.MimeTypes() def test_default_data(self): eq = self.assertEqual eq(self.db.guess_type("foo.html"), ("text/html", None)) eq(self.db.guess_type("foo.tgz"), ("application/x-tar", "gzip")) eq(self.db.guess_type("foo.tar.gz"), ("application/x-tar", "gzip")) eq(self.db.guess_type("foo.tar.Z"), ("application/x-tar", "compress")) eq(self.db.guess_type("foo.tar.bz2"), ("application/x-tar", "bzip2")) eq(self.db.guess_type("foo.tar.xz"), ("application/x-tar", "xz")) def test_data_urls(self): eq = self.assertEqual guess_type = self.db.guess_type eq(guess_type("data:,thisIsTextPlain"), ("text/plain", None)) eq(guess_type("data:;base64,thisIsTextPlain"), ("text/plain", None)) eq(guess_type("data:text/x-foo,thisIsTextXFoo"), ("text/x-foo", None)) def test_file_parsing(self): eq = self.assertEqual sio = io.StringIO("x-application/x-unittest pyunit\n") self.db.readfp(sio) eq(self.db.guess_type("foo.pyunit"), ("x-application/x-unittest", None)) eq(self.db.guess_extension("x-application/x-unittest"), ".pyunit") def test_non_standard_types(self): eq = self.assertEqual # First try strict eq(self.db.guess_type('foo.xul', strict=True), (None, None)) eq(self.db.guess_extension('image/jpg', strict=True), None) # And then non-strict eq(self.db.guess_type('foo.xul', strict=False), ('text/xul', None)) eq(self.db.guess_extension('image/jpg', strict=False), '.jpg') def test_guess_all_types(self): eq = self.assertEqual unless = self.assertTrue # First try strict. Use a set here for testing the results because if # test_urllib2 is run before test_mimetypes, global state is modified # such that the 'all' set will have more items in it. all = set(self.db.guess_all_extensions('text/plain', strict=True)) unless(all >= set(['.bat', '.c', '.h', '.ksh', '.pl', '.txt'])) # And now non-strict all = self.db.guess_all_extensions('image/jpg', strict=False) all.sort() eq(all, ['.jpg']) # And now for no hits all = self.db.guess_all_extensions('image/jpg', strict=True) eq(all, []) def test_encoding(self): getpreferredencoding = locale.getpreferredencoding self.addCleanup(setattr, locale, 'getpreferredencoding', getpreferredencoding) locale.getpreferredencoding = lambda: 'ascii' filename = support.findfile("mime.types") mimes = mimetypes.MimeTypes([filename]) exts = mimes.guess_all_extensions('application/vnd.geocube+xml', strict=True) self.assertEqual(exts, ['.g3', '.g\xb3']) @unittest.skipUnless(sys.platform.startswith("win"), "Windows only") class Win32MimeTypesTestCase(unittest.TestCase): def setUp(self): # ensure all entries actually come from the Windows registry self.original_types_map = mimetypes.types_map.copy() mimetypes.types_map.clear() mimetypes.init() self.db = mimetypes.MimeTypes() def tearDown(self): # restore default settings mimetypes.types_map.clear() mimetypes.types_map.update(self.original_types_map) def test_registry_parsing(self): # the original, minimum contents of the MIME database in the # Windows registry is undocumented AFAIK. # Use file types that should *always* exist: eq = self.assertEqual eq(self.db.guess_type("foo.txt"), ("text/plain", None)) eq(self.db.guess_type("image.jpg"), ("image/jpeg", None)) eq(self.db.guess_type("image.png"), ("image/png", None)) def test_main(): support.run_unittest(MimeTypesTestCase, Win32MimeTypesTestCase ) if __name__ == "__main__": test_main()
lgpl-3.0
mbrukman/flocker
vagrant/tutorial/post-reboot-bootstrap.py
4
5139
#!/usr/bin/python # This script builds the base flocker-tutorial box. import sys import os from subprocess import check_call, check_output from textwrap import dedent from urlparse import urljoin if len(sys.argv) != 4: print "Wrong number of arguments." raise SystemExit(1) rpm_version = sys.argv[1] branch = sys.argv[2] build_server = sys.argv[3] or 'http://build.clusterhq.com/' rpm_dist = check_output(['rpm', '-E', '%dist']).strip() clusterhq_repo_url = ( 'https://s3.amazonaws.com/' 'clusterhq-archive/' 'centos/' 'clusterhq-release%s.noarch.rpm') % (rpm_dist,) check_call(['yum', 'install', '-y', clusterhq_repo_url]) if branch: # If a branch is specified, add a repo pointing at the # buildserver repository corresponding to that branch. # This repo will be disabled by default. with open('/etc/yum.repos.d/clusterhq-build.repo', 'w') as repo: result_path = os.path.join('/results/omnibus', branch, 'centos-$releasever') base_url = urljoin(build_server, result_path) repo.write(dedent(b""" [clusterhq-build] name=clusterhq-build baseurl=%s gpgcheck=0 enabled=0 """) % (base_url,)) branch_opt = ['--enablerepo=clusterhq-build'] else: branch_opt = [] # The Flocker packages don't explicitly depend on ZFS because it is only # required when using the ZFS storage driver. That's exactly what this box # wants to do. check_call(['yum', 'install', '-y', 'zfs']) # If a version is specifed, install that version. # Otherwise install whatever yum decides. if rpm_version: # The buildserver doesn't build dirty versions, # so strip that. if rpm_version.endswith('.dirty'): rpm_version = rpm_version[:-len('.dirty')] package = 'clusterhq-flocker-node-%s' % (rpm_version,) else: package = 'clusterhq-flocker-node' # Install flocker-node check_call(['yum', 'install', '-y'] + branch_opt + [package]) # Install ZFS. check_call(['yum', 'install', '-y', 'zfs']) # Install Docker. check_call(["curl", "-o", "/tmp/install-docker.sh", "https://get.docker.com/"]) check_call(["sh", "/tmp/install-docker.sh"]) # And enable it. We don't need to start it, since when the box is packaged, # the machine will be reset. check_call(['systemctl', 'enable', 'docker']) # Enable firewalld # Typical deployments will have a firewall enabled, so enable it on vagrant to # make the environment more realistic. # We need to unmask it, since the base box has it masked. check_call(['systemctl', 'unmask', 'firewalld']) check_call(['systemctl', 'enable', 'firewalld']) check_call(['systemctl', 'start', 'firewalld']) # We need to open the firewall for the flocker-control for service in ['flocker-control-api', 'flocker-control-agent']: check_call(['firewall-cmd', '--permanent', '--add-service', service]) # Make it easy to authenticate as root check_call(['mkdir', '-p', '/root/.ssh']) check_call( ['cp', os.path.expanduser('~vagrant/.ssh/authorized_keys'), '/root/.ssh']) # Configure GRUB2 to boot kernel with elevator=noop to workaround # https://clusterhq.atlassian.net/browse/FLOC-235 with open('/etc/default/grub', 'a') as f: f.write('GRUB_CMDLINE_LINUX="${GRUB_CMDLINE_LINUX} elevator=noop"\n') check_call(['grub2-mkconfig', '-o', '/boot/grub2/grub.cfg']) # Create a ZFS storage pool backed by a normal filesystem file. This # is a bad way to configure ZFS for production use but it is # convenient for a demo in a VM. check_call(['mkdir', '-p', '/var/opt/flocker']) check_call(['truncate', '--size', '1G', '/var/opt/flocker/pool-vdev']) # ZFS 0.6.5 stopped loading the module stack. This additional environment # variable makes it follow the old behaviour. However, support for this # will be removed in a future release. See FLOC-3018 environ = os.environ.copy() environ['ZFS_MODULE_LOADING'] = 'yes' check_call( ['zpool', 'create', 'flocker', '/var/opt/flocker/pool-vdev'], env=environ) # Move SSH private key into place so ZFS agent can use it until we remove # SSH completely in FLOC-1665. The Vagrantfile copied it over, and it's # the same one we already have in root's authorized_keys (see above). check_call(['mkdir', '/etc/flocker']) check_call(['chmod', 'u=rwx,g=,o=', '/etc/flocker']) check_call(["ssh-keygen", "-N", "", "-f", "/etc/flocker/id_rsa_flocker"]) with file("/root/.ssh/authorized_keys", "a") as f: f.write(file("/etc/flocker/id_rsa_flocker.pub").read()) # Workaround https://github.com/mitchellh/vagrant/issues/5590 # New version of CentOS have a NetworkManager that is aggresive about # configuring network devices. Vagrant tells NetworkManager to not # manage the device it uses for a static IP, but doesn't tell NetworkManger # that it has done so. Teach NetworkManager to automatically reload the # configuration. with file("/etc/NetworkManager/conf.d/auto-reload.conf", "a") as f: f.write("""\ # Created by Flocker (https://clusterhq.atlassian.net/browse/FLOC-2052) # Workaround https://github.com/mitchellh/vagrant/issues/5590 [main] monitor-connection-files=true """)
apache-2.0
xiaojimao18/shadowsocks
shadowsocks/eventloop.py
949
7288
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2013-2015 clowwindy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from ssloop # https://github.com/clowwindy/ssloop from __future__ import absolute_import, division, print_function, \ with_statement import os import time import socket import select import errno import logging from collections import defaultdict from shadowsocks import shell __all__ = ['EventLoop', 'POLL_NULL', 'POLL_IN', 'POLL_OUT', 'POLL_ERR', 'POLL_HUP', 'POLL_NVAL', 'EVENT_NAMES'] POLL_NULL = 0x00 POLL_IN = 0x01 POLL_OUT = 0x04 POLL_ERR = 0x08 POLL_HUP = 0x10 POLL_NVAL = 0x20 EVENT_NAMES = { POLL_NULL: 'POLL_NULL', POLL_IN: 'POLL_IN', POLL_OUT: 'POLL_OUT', POLL_ERR: 'POLL_ERR', POLL_HUP: 'POLL_HUP', POLL_NVAL: 'POLL_NVAL', } # we check timeouts every TIMEOUT_PRECISION seconds TIMEOUT_PRECISION = 10 class KqueueLoop(object): MAX_EVENTS = 1024 def __init__(self): self._kqueue = select.kqueue() self._fds = {} def _control(self, fd, mode, flags): events = [] if mode & POLL_IN: events.append(select.kevent(fd, select.KQ_FILTER_READ, flags)) if mode & POLL_OUT: events.append(select.kevent(fd, select.KQ_FILTER_WRITE, flags)) for e in events: self._kqueue.control([e], 0) def poll(self, timeout): if timeout < 0: timeout = None # kqueue behaviour events = self._kqueue.control(None, KqueueLoop.MAX_EVENTS, timeout) results = defaultdict(lambda: POLL_NULL) for e in events: fd = e.ident if e.filter == select.KQ_FILTER_READ: results[fd] |= POLL_IN elif e.filter == select.KQ_FILTER_WRITE: results[fd] |= POLL_OUT return results.items() def register(self, fd, mode): self._fds[fd] = mode self._control(fd, mode, select.KQ_EV_ADD) def unregister(self, fd): self._control(fd, self._fds[fd], select.KQ_EV_DELETE) del self._fds[fd] def modify(self, fd, mode): self.unregister(fd) self.register(fd, mode) def close(self): self._kqueue.close() class SelectLoop(object): def __init__(self): self._r_list = set() self._w_list = set() self._x_list = set() def poll(self, timeout): r, w, x = select.select(self._r_list, self._w_list, self._x_list, timeout) results = defaultdict(lambda: POLL_NULL) for p in [(r, POLL_IN), (w, POLL_OUT), (x, POLL_ERR)]: for fd in p[0]: results[fd] |= p[1] return results.items() def register(self, fd, mode): if mode & POLL_IN: self._r_list.add(fd) if mode & POLL_OUT: self._w_list.add(fd) if mode & POLL_ERR: self._x_list.add(fd) def unregister(self, fd): if fd in self._r_list: self._r_list.remove(fd) if fd in self._w_list: self._w_list.remove(fd) if fd in self._x_list: self._x_list.remove(fd) def modify(self, fd, mode): self.unregister(fd) self.register(fd, mode) def close(self): pass class EventLoop(object): def __init__(self): if hasattr(select, 'epoll'): self._impl = select.epoll() model = 'epoll' elif hasattr(select, 'kqueue'): self._impl = KqueueLoop() model = 'kqueue' elif hasattr(select, 'select'): self._impl = SelectLoop() model = 'select' else: raise Exception('can not find any available functions in select ' 'package') self._fdmap = {} # (f, handler) self._last_time = time.time() self._periodic_callbacks = [] self._stopping = False logging.debug('using event model: %s', model) def poll(self, timeout=None): events = self._impl.poll(timeout) return [(self._fdmap[fd][0], fd, event) for fd, event in events] def add(self, f, mode, handler): fd = f.fileno() self._fdmap[fd] = (f, handler) self._impl.register(fd, mode) def remove(self, f): fd = f.fileno() del self._fdmap[fd] self._impl.unregister(fd) def add_periodic(self, callback): self._periodic_callbacks.append(callback) def remove_periodic(self, callback): self._periodic_callbacks.remove(callback) def modify(self, f, mode): fd = f.fileno() self._impl.modify(fd, mode) def stop(self): self._stopping = True def run(self): events = [] while not self._stopping: asap = False try: events = self.poll(TIMEOUT_PRECISION) except (OSError, IOError) as e: if errno_from_exception(e) in (errno.EPIPE, errno.EINTR): # EPIPE: Happens when the client closes the connection # EINTR: Happens when received a signal # handles them as soon as possible asap = True logging.debug('poll:%s', e) else: logging.error('poll:%s', e) import traceback traceback.print_exc() continue for sock, fd, event in events: handler = self._fdmap.get(fd, None) if handler is not None: handler = handler[1] try: handler.handle_event(sock, fd, event) except (OSError, IOError) as e: shell.print_exception(e) now = time.time() if asap or now - self._last_time >= TIMEOUT_PRECISION: for callback in self._periodic_callbacks: callback() self._last_time = now def __del__(self): self._impl.close() # from tornado def errno_from_exception(e): """Provides the errno from an Exception object. There are cases that the errno attribute was not set so we pull the errno out of the args but if someone instatiates an Exception without any args you will get a tuple error. So this function abstracts all that behavior to give you a safe way to get the errno. """ if hasattr(e, 'errno'): return e.errno elif e.args: return e.args[0] else: return None # from tornado def get_sock_error(sock): error_number = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) return socket.error(error_number, os.strerror(error_number))
apache-2.0
fxia22/ASM_xf
PythonD/lib/python2.4/lib-tk/ScrolledText.py
20
1638
# A ScrolledText widget feels like a text widget but also has a # vertical scroll bar on its right. (Later, options may be added to # add a horizontal bar as well, to make the bars disappear # automatically when not needed, to move them to the other side of the # window, etc.) # # Configuration options are passed to the Text widget. # A Frame widget is inserted between the master and the text, to hold # the Scrollbar widget. # Most methods calls are inherited from the Text widget; Pack methods # are redirected to the Frame widget however. from Tkinter import * from Tkinter import _cnfmerge class ScrolledText(Text): def __init__(self, master=None, cnf=None, **kw): if cnf is None: cnf = {} if kw: cnf = _cnfmerge((cnf, kw)) fcnf = {} for k in cnf.keys(): if type(k) == ClassType or k == 'name': fcnf[k] = cnf[k] del cnf[k] self.frame = Frame(master, **fcnf) self.vbar = Scrollbar(self.frame, name='vbar') self.vbar.pack(side=RIGHT, fill=Y) cnf['name'] = 'text' Text.__init__(self, self.frame, **cnf) self.pack(side=LEFT, fill=BOTH, expand=1) self['yscrollcommand'] = self.vbar.set self.vbar['command'] = self.yview # Copy geometry methods of self.frame -- hack! methods = Pack.__dict__.keys() methods = methods + Grid.__dict__.keys() methods = methods + Place.__dict__.keys() for m in methods: if m[0] != '_' and m != 'config' and m != 'configure': setattr(self, m, getattr(self.frame, m))
gpl-2.0
Havate/havate-openstack
proto-build/gui/horizon/Horizon_GUI/horizon/management/commands/startdash.py
79
2888
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glob from optparse import make_option # noqa import os from django.core.management.base import CommandError # noqa from django.core.management.templates import TemplateCommand # noqa from django.utils.importlib import import_module # noqa import horizon class Command(TemplateCommand): template = os.path.join(horizon.__path__[0], "conf", "dash_template") option_list = TemplateCommand.option_list + ( make_option('--target', dest='target', action='store', default=None, help='The directory in which the panel ' 'should be created. Defaults to the ' 'current directory. The value "auto" ' 'may also be used to automatically ' 'create the panel inside the specified ' 'dashboard module.'),) help = ("Creates a Django app directory structure for a new dashboard " "with the given name in the current directory or optionally in " "the given directory.") def handle(self, dash_name=None, **options): if dash_name is None: raise CommandError("You must provide a dashboard name.") # Use our default template if one isn't specified. if not options.get("template", None): options["template"] = self.template # We have html templates as well, so make sure those are included. options["extensions"].extend(["tmpl", "html", "js", "css"]) # Check that the app_name cannot be imported. try: import_module(dash_name) except ImportError: pass else: raise CommandError("%r conflicts with the name of an existing " "Python module and cannot be used as an app " "name. Please try another name." % dash_name) super(Command, self).handle('dash', dash_name, **options) target = options.pop("target", None) if not target: target = os.path.join(os.curdir, dash_name) # Rename our python template files. file_names = glob.glob(os.path.join(target, "*.py.tmpl")) for filename in file_names: os.rename(filename, filename[:-5])
apache-2.0
n-west/gnuradio-volk
gr-digital/python/digital/qa_constellation_receiver.py
52
8241
#!/usr/bin/env python # # Copyright 2011,2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # import random import math import pmt from gnuradio import gr, gr_unittest, filter, analog, blocks, digital from gnuradio.digital.utils import mod_codes, alignment from gnuradio.digital import packet_utils from gnuradio.digital.generic_mod_demod import generic_mod, generic_demod from qa_constellation import tested_constellations, twod_constell # Set a seed so that if errors turn up they are reproducible. SEED = 1239 # TESTING PARAMETERS # The number of symbols to test with. # We need this many to let the frequency recovery block converge. DATA_LENGTH = 1000 # Test fails if fraction of output that is correct is less than this. EASY_REQ_CORRECT = 0.9 # For constellations that aren't expected to work so well. MEDIUM_REQ_CORRECT = 0.8 # CHANNEL PARAMETERS NOISE_VOLTAGE = 0.01 FREQUENCY_OFFSET = 0.01 TIMING_OFFSET = 1.0 # RECEIVER PARAMETERS FREQ_BW = 2*math.pi/100.0 PHASE_BW = 2*math.pi/100.0 class channel_model(gr.hier_block2): def __init__(self, noise_voltage, freq, timing): gr.hier_block2.__init__(self, "channel_model", gr.io_signature(1, 1, gr.sizeof_gr_complex), gr.io_signature(1, 1, gr.sizeof_gr_complex)) timing_offset = filter.fractional_resampler_cc(0, timing) noise_adder = blocks.add_cc() noise = analog.noise_source_c(analog.GR_GAUSSIAN, noise_voltage, 0) freq_offset = analog.sig_source_c(1, analog.GR_SIN_WAVE, freq, 1.0, 0.0) mixer_offset = blocks.multiply_cc(); self.connect(self, timing_offset) self.connect(timing_offset, (mixer_offset,0)) self.connect(freq_offset, (mixer_offset,1)) self.connect(mixer_offset, (noise_adder,1)) self.connect(noise, (noise_adder,0)) self.connect(noise_adder, self) class test_constellation_receiver(gr_unittest.TestCase): # We ignore the first half of the output data since often it takes # a while for the receiver to lock on. ignore_fraction = 0.8 max_data_length = DATA_LENGTH * 6 max_num_samples = 1000 def test_basic(self): """ Tests a bunch of different constellations by using generic modulation, a channel, and generic demodulation. The generic demodulation uses constellation_receiver which is what we're really trying to test. """ rndm = random.Random() rndm.seed(SEED) # Assumes not more than 64 points in a constellation # Generates some random input data to use. self.src_data = tuple( [rndm.randint(0,1) for i in range(0, self.max_data_length)]) # Generates some random indices to use for comparing input and # output data (a full comparison is too slow in python). self.indices = alignment.random_sample( self.max_data_length, self.max_num_samples, SEED) requirements = ( (EASY_REQ_CORRECT, tested_constellations(easy=True, medium=False, difficult=False)), (MEDIUM_REQ_CORRECT, tested_constellations(easy=False, medium=True, difficult=False)), ) for req_correct, tcs in requirements: for constellation, differential in tcs: # The constellation_receiver doesn't work for constellations # of multple dimensions (i.e. multiple complex numbers to a # single symbol). # That is not implemented since the receiver has no way of # knowing where the beginning of a symbol is. # It also doesn't work for non-differential modulation. if constellation.dimensionality() != 1 or not differential: continue data_length = DATA_LENGTH * constellation.bits_per_symbol() tb = rec_test_tb(constellation, differential, src_data=self.src_data[:data_length]) tb.run() data = tb.dst.data() d1 = tb.src_data[:int(len(tb.src_data)*self.ignore_fraction)] d2 = data[:int(len(data)*self.ignore_fraction)] correct, overlap, offset, indices = alignment.align_sequences( d1, d2, indices=self.indices) if correct <= req_correct: print("Constellation is {0}. Differential is {1}. Required correct is {2}. Correct is {3}. FAIL.". format(constellation, differential, req_correct, correct)) self.assertTrue(correct > req_correct) def test_tag(self): # Send data through bpsk receiver # followed by qpsk receiver data = [0.9+0j, 0.1+0.9j, -1-0.1j, -0.1-0.6j]*2 bpsk_data = [1, 1, 0, 0] qpsk_data = [1, 3, 0, 0] first_tag = gr.tag_t() first_tag.key = pmt.intern("set_constellation") first_tag.value = digital.bpsk_constellation().as_pmt() first_tag.offset = 0 second_tag = gr.tag_t() second_tag.key = pmt.intern("set_constellation") second_tag.value = digital.qpsk_constellation().as_pmt() second_tag.offset = 4 src = blocks.vector_source_c(data, False, 1, [first_tag, second_tag]) decoder = digital.constellation_receiver_cb( digital.bpsk_constellation().base(), 0, 0, 0) snk = blocks.vector_sink_b() tb = gr.top_block() tb.connect(src, decoder, snk) tb.run() self.assertEqual(list(snk.data()), bpsk_data+qpsk_data) class rec_test_tb(gr.top_block): """ Takes a constellation an runs a generic modulation, channel, and generic demodulation. """ def __init__(self, constellation, differential, data_length=None, src_data=None, freq_offset=True): """ Args: constellation: a constellation object differential: whether differential encoding is used data_length: the number of bits of data to use src_data: a list of the bits to use freq_offset: whether to use a frequency offset in the channel """ super(rec_test_tb, self).__init__() # Transmission Blocks if src_data is None: self.src_data = tuple([rndm.randint(0,1) for i in range(0, data_length)]) else: self.src_data = src_data packer = blocks.unpacked_to_packed_bb(1, gr.GR_MSB_FIRST) src = blocks.vector_source_b(self.src_data) mod = generic_mod(constellation, differential=differential) # Channel if freq_offset: channel = channel_model(NOISE_VOLTAGE, FREQUENCY_OFFSET, TIMING_OFFSET) else: channel = channel_model(NOISE_VOLTAGE, 0, TIMING_OFFSET) # Receiver Blocks if freq_offset: demod = generic_demod(constellation, differential=differential, freq_bw=FREQ_BW, phase_bw=PHASE_BW) else: demod = generic_demod(constellation, differential=differential, freq_bw=0, phase_bw=0) self.dst = blocks.vector_sink_b() self.connect(src, packer, mod, channel, demod, self.dst) if __name__ == '__main__': gr_unittest.run(test_constellation_receiver, "test_constellation_receiver.xml")
gpl-3.0
watspidererik/testenv
flask/lib/python2.7/site-packages/setuptools/command/upload_docs.py
71
6811
# -*- coding: utf-8 -*- """upload_docs Implements a Distutils 'upload_docs' subcommand (upload documentation to PyPI's pythonhosted.org). """ from base64 import standard_b64encode from distutils import log from distutils.errors import DistutilsOptionError from distutils.command.upload import upload import os import socket import zipfile import tempfile import sys import shutil from setuptools.compat import httplib, urlparse, unicode, iteritems, PY3 from pkg_resources import iter_entry_points errors = 'surrogateescape' if PY3 else 'strict' # This is not just a replacement for byte literals # but works as a general purpose encoder def b(s, encoding='utf-8'): if isinstance(s, unicode): return s.encode(encoding, errors) return s class upload_docs(upload): description = 'Upload documentation to PyPI' user_options = [ ('repository=', 'r', "url of repository [default: %s]" % upload.DEFAULT_REPOSITORY), ('show-response', None, 'display full response text from server'), ('upload-dir=', None, 'directory to upload'), ] boolean_options = upload.boolean_options def has_sphinx(self): if self.upload_dir is None: for ep in iter_entry_points('distutils.commands', 'build_sphinx'): return True sub_commands = [('build_sphinx', has_sphinx)] def initialize_options(self): upload.initialize_options(self) self.upload_dir = None self.target_dir = None def finalize_options(self): upload.finalize_options(self) if self.upload_dir is None: if self.has_sphinx(): build_sphinx = self.get_finalized_command('build_sphinx') self.target_dir = build_sphinx.builder_target_dir else: build = self.get_finalized_command('build') self.target_dir = os.path.join(build.build_base, 'docs') else: self.ensure_dirname('upload_dir') self.target_dir = self.upload_dir self.announce('Using upload directory %s' % self.target_dir) def create_zipfile(self, filename): zip_file = zipfile.ZipFile(filename, "w") try: self.mkpath(self.target_dir) # just in case for root, dirs, files in os.walk(self.target_dir): if root == self.target_dir and not files: raise DistutilsOptionError( "no files found in upload directory '%s'" % self.target_dir) for name in files: full = os.path.join(root, name) relative = root[len(self.target_dir):].lstrip(os.path.sep) dest = os.path.join(relative, name) zip_file.write(full, dest) finally: zip_file.close() def run(self): # Run sub commands for cmd_name in self.get_sub_commands(): self.run_command(cmd_name) tmp_dir = tempfile.mkdtemp() name = self.distribution.metadata.get_name() zip_file = os.path.join(tmp_dir, "%s.zip" % name) try: self.create_zipfile(zip_file) self.upload_file(zip_file) finally: shutil.rmtree(tmp_dir) def upload_file(self, filename): f = open(filename, 'rb') content = f.read() f.close() meta = self.distribution.metadata data = { ':action': 'doc_upload', 'name': meta.get_name(), 'content': (os.path.basename(filename), content), } # set up the authentication credentials = b(self.username + ':' + self.password) credentials = standard_b64encode(credentials) if PY3: credentials = credentials.decode('ascii') auth = "Basic " + credentials # Build up the MIME payload for the POST data boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' sep_boundary = b('\n--') + b(boundary) end_boundary = sep_boundary + b('--') body = [] for key, values in iteritems(data): title = '\nContent-Disposition: form-data; name="%s"' % key # handle multiple entries for the same name if not isinstance(values, list): values = [values] for value in values: if type(value) is tuple: title += '; filename="%s"' % value[0] value = value[1] else: value = b(value) body.append(sep_boundary) body.append(b(title)) body.append(b("\n\n")) body.append(value) if value and value[-1:] == b('\r'): body.append(b('\n')) # write an extra newline (lurve Macs) body.append(end_boundary) body.append(b("\n")) body = b('').join(body) self.announce("Submitting documentation to %s" % (self.repository), log.INFO) # build the Request # We can't use urllib2 since we need to send the Basic # auth right with the first request schema, netloc, url, params, query, fragments = \ urlparse(self.repository) assert not params and not query and not fragments if schema == 'http': conn = httplib.HTTPConnection(netloc) elif schema == 'https': conn = httplib.HTTPSConnection(netloc) else: raise AssertionError("unsupported schema " + schema) data = '' try: conn.connect() conn.putrequest("POST", url) content_type = 'multipart/form-data; boundary=%s' % boundary conn.putheader('Content-type', content_type) conn.putheader('Content-length', str(len(body))) conn.putheader('Authorization', auth) conn.endheaders() conn.send(body) except socket.error: e = sys.exc_info()[1] self.announce(str(e), log.ERROR) return r = conn.getresponse() if r.status == 200: self.announce('Server response (%s): %s' % (r.status, r.reason), log.INFO) elif r.status == 301: location = r.getheader('Location') if location is None: location = 'https://pythonhosted.org/%s/' % meta.get_name() self.announce('Upload successful. Visit %s' % location, log.INFO) else: self.announce('Upload failed (%s): %s' % (r.status, r.reason), log.ERROR) if self.show_response: print('-' * 75, r.read(), '-' * 75)
mit
Mchakravartula/rockstor-core
src/rockstor/storageadmin/models/iscsi_target.py
6
1257
""" Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com> This file is part of RockStor. RockStor is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. RockStor is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ from django.db import models from storageadmin.models import Share """ Iscsi target """ class IscsiTarget(models.Model): share = models.ForeignKey(Share) """target id""" tid = models.IntegerField(unique=True) """target name""" tname = models.CharField(max_length=128, unique=True) """for now, this is the file created inside the share""" dev_name = models.CharField(max_length=128, unique=True) """size. this is static for now""" dev_size = models.IntegerField() class Meta: app_label = 'storageadmin'
gpl-3.0
KhronosGroup/COLLADA-CTS
StandardDataSets/xml/uri/external_references/geometry/geometry.py
2
4559
# Copyright (c) 2012 The Khronos Group Inc. # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to # the following conditions: # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Materials. # THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. # See Core.Logic.FJudgementContext for the information # of the 'context' parameter. # This sample judging object does the following: # # JudgeBaseline: just verifies that the standard steps did not crash. # JudgeSuperior: also verifies that the validation steps are not in error. # JudgeExemplary: same as intermediate badge. # We import an assistant script that includes the common verifications # methods. The assistant buffers its checks, so that running them again # does not incurs an unnecessary performance hint. from StandardDataSets.scripts import JudgeAssistant # Please feed your node list here: tagLst = [['library_geometry', 'geometry'], ['library_visual_scenes', 'visual_scene', 'node', 'instance_geometry']] attrName = '' attrVal = 'library_geometries.dae#cube' dataToCheck = '' class SimpleJudgingObject: def __init__(self, _tagLst, _attrName, _attrVal, _data): self.tagList = _tagLst self.attrName = _attrName self.attrVal = _attrVal self.dataToCheck = _data self.status_baseline = False self.status_superior = False self.status_exemplary = False self.__assistant = JudgeAssistant.JudgeAssistant() def JudgeBaseline(self, context): # No step should not crash self.__assistant.CheckCrashes(context) # Import/export/validate must exist and pass, while Render must only exist. self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"]) self.status_baseline = self.__assistant.GetResults() return self.status_baseline # To pass intermediate you need to pass basic, this object could also include additional # tests that were specific to the intermediate badge. def JudgeSuperior(self, context): self.status_superior = self.status_baseline return self.status_superior # To pass advanced you need to pass intermediate, this object could also include additional # tests that were specific to the advanced badge def JudgeExemplary(self, context): # if superior fails, no point in further checking if (self.status_superior == False): self.status_exemplary = self.status_superior return self.status_exemplary # Compare the rendered images between import and export # Compare images against reference test # Check for url term if ( self.__assistant.CompareRenderedImages(context) ): self.__assistant.CompareImagesAgainst(context, "_reference_geometry") self.__assistant.CheckForURLTermInAttr(context, self.tagList[1], self.attrVal) if (self.__assistant.GetResults() == False): self.status_exemplary = False return self.status_exemplary else: self.status_exemplary = True # Check that the external reference element hasn't been baked into the export if (self.__assistant.ElementPreserved(context, self.tagList[0], False)): self.status_exemplary = False return self.status_exemplary # This is where all the work occurs: "judgingObject" is an absolutely necessary token. # The dynamic loader looks very specifically for a class instance named "judgingObject". # judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
mit
trtd/faker
faker/providers/date_time/__init__.py
2
38341
# coding=utf-8 from __future__ import unicode_literals from datetime import timedelta import re from time import time, mktime from dateutil import relativedelta from dateutil.tz import tzlocal from faker.generator import random from faker.utils.datetime_safe import date, datetime, real_date, real_datetime from faker.utils import is_string from .. import BaseProvider localized = True def datetime_to_timestamp(dt): if getattr(dt, 'tzinfo', None) is not None: dt = dt.astimezone(tzlocal()) return mktime(dt.timetuple()) timedelta_pattern = r'' for name, sym in [('years', 'y'), ('weeks', 'w'), ('days', 'd'), ('hours', 'h'), ('minutes', 'm'), ('seconds', 's')]: timedelta_pattern += r'((?P<{0}>(?:\+|-)\d+?){1})?'.format(name, sym) class Provider(BaseProvider): centuries = ['I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', 'IX', 'X', 'XI', 'XII', 'XIII', 'XIV', 'XV', 'XVI', 'XVII', 'XVIII', 'XIX', 'XX', 'XXI'] countries = [ {'timezones': ['Europe/Andorra'], 'code': 'AD', 'continent': 'Europe', 'name': 'Andorra', 'capital': 'Andorra la Vella'}, {'timezones': ['Asia/Kabul'], 'code': 'AF', 'continent': 'Asia', 'name': 'Afghanistan', 'capital': 'Kabul'}, {'timezones': ['America/Antigua'], 'code': 'AG', 'continent': 'North America', 'name': 'Antigua and Barbuda', 'capital': "St. John's"}, {'timezones': ['Europe/Tirane'], 'code': 'AL', 'continent': 'Europe', 'name': 'Albania', 'capital': 'Tirana'}, {'timezones': ['Asia/Yerevan'], 'code': 'AM', 'continent': 'Asia', 'name': 'Armenia', 'capital': 'Yerevan'}, {'timezones': ['Africa/Luanda'], 'code': 'AO', 'continent': 'Africa', 'name': 'Angola', 'capital': 'Luanda'}, {'timezones': ['America/Argentina/Buenos_Aires', 'America/Argentina/Cordoba', 'America/Argentina/Jujuy', 'America/Argentina/Tucuman', 'America/Argentina/Catamarca', 'America/Argentina/La_Rioja', 'America/Argentina/San_Juan', 'America/Argentina/Mendoza', 'America/Argentina/Rio_Gallegos', 'America/Argentina/Ushuaia'], 'code': 'AR', 'continent': 'South America', 'name': 'Argentina', 'capital': 'Buenos Aires'}, {'timezones': ['Europe/Vienna'], 'code': 'AT', 'continent': 'Europe', 'name': 'Austria', 'capital': 'Vienna'}, {'timezones': ['Australia/Lord_Howe', 'Australia/Hobart', 'Australia/Currie', 'Australia/Melbourne', 'Australia/Sydney', 'Australia/Broken_Hill', 'Australia/Brisbane', 'Australia/Lindeman', 'Australia/Adelaide', 'Australia/Darwin', 'Australia/Perth'], 'code': 'AU', 'continent': 'Oceania', 'name': 'Australia', 'capital': 'Canberra'}, {'timezones': ['Asia/Baku'], 'code': 'AZ', 'continent': 'Asia', 'name': 'Azerbaijan', 'capital': 'Baku'}, {'timezones': ['America/Barbados'], 'code': 'BB', 'continent': 'North America', 'name': 'Barbados', 'capital': 'Bridgetown'}, {'timezones': ['Asia/Dhaka'], 'code': 'BD', 'continent': 'Asia', 'name': 'Bangladesh', 'capital': 'Dhaka'}, {'timezones': ['Europe/Brussels'], 'code': 'BE', 'continent': 'Europe', 'name': 'Belgium', 'capital': 'Brussels'}, {'timezones': ['Africa/Ouagadougou'], 'code': 'BF', 'continent': 'Africa', 'name': 'Burkina Faso', 'capital': 'Ouagadougou'}, {'timezones': ['Europe/Sofia'], 'code': 'BG', 'continent': 'Europe', 'name': 'Bulgaria', 'capital': 'Sofia'}, {'timezones': ['Asia/Bahrain'], 'code': 'BH', 'continent': 'Asia', 'name': 'Bahrain', 'capital': 'Manama'}, {'timezones': ['Africa/Bujumbura'], 'code': 'BI', 'continent': 'Africa', 'name': 'Burundi', 'capital': 'Bujumbura'}, {'timezones': ['Africa/Porto-Novo'], 'code': 'BJ', 'continent': 'Africa', 'name': 'Benin', 'capital': 'Porto-Novo'}, {'timezones': ['Asia/Brunei'], 'code': 'BN', 'continent': 'Asia', 'name': 'Brunei Darussalam', 'capital': 'Bandar Seri Begawan'}, {'timezones': ['America/La_Paz'], 'code': 'BO', 'continent': 'South America', 'name': 'Bolivia', 'capital': 'Sucre'}, {'timezones': ['America/Noronha', 'America/Belem', 'America/Fortaleza', 'America/Recife', 'America/Araguaina', 'America/Maceio', 'America/Bahia', 'America/Sao_Paulo', 'America/Campo_Grande', 'America/Cuiaba', 'America/Porto_Velho', 'America/Boa_Vista', 'America/Manaus', 'America/Eirunepe', 'America/Rio_Branco'], 'code': 'BR', 'continent': 'South America', 'name': 'Brazil', 'capital': 'Bras\xc3\xadlia'}, {'timezones': ['America/Nassau'], 'code': 'BS', 'continent': 'North America', 'name': 'Bahamas', 'capital': 'Nassau'}, {'timezones': ['Asia/Thimphu'], 'code': 'BT', 'continent': 'Asia', 'name': 'Bhutan', 'capital': 'Thimphu'}, {'timezones': ['Africa/Gaborone'], 'code': 'BW', 'continent': 'Africa', 'name': 'Botswana', 'capital': 'Gaborone'}, {'timezones': ['Europe/Minsk'], 'code': 'BY', 'continent': 'Europe', 'name': 'Belarus', 'capital': 'Minsk'}, {'timezones': ['America/Belize'], 'code': 'BZ', 'continent': 'North America', 'name': 'Belize', 'capital': 'Belmopan'}, {'timezones': ['America/St_Johns', 'America/Halifax', 'America/Glace_Bay', 'America/Moncton', 'America/Goose_Bay', 'America/Blanc-Sablon', 'America/Montreal', 'America/Toronto', 'America/Nipigon', 'America/Thunder_Bay', 'America/Pangnirtung', 'America/Iqaluit', 'America/Atikokan', 'America/Rankin_Inlet', 'America/Winnipeg', 'America/Rainy_River', 'America/Cambridge_Bay', 'America/Regina', 'America/Swift_Current', 'America/Edmonton', 'America/Yellowknife', 'America/Inuvik', 'America/Dawson_Creek', 'America/Vancouver', 'America/Whitehorse', 'America/Dawson'], 'code': 'CA', 'continent': 'North America', 'name': 'Canada', 'capital': 'Ottawa'}, {'timezones': ['Africa/Kinshasa', 'Africa/Lubumbashi'], 'code': 'CD', 'continent': 'Africa', 'name': 'Democratic Republic of the Congo', 'capital': 'Kinshasa'}, {'timezones': ['Africa/Brazzaville'], 'code': 'CG', 'continent': 'Africa', 'name': 'Republic of the Congo', 'capital': 'Brazzaville'}, {'timezones': ['Africa/Abidjan'], 'code': 'CI', 'continent': 'Africa', 'name': "C\xc3\xb4te d'Ivoire", 'capital': 'Yamoussoukro'}, {'timezones': ['America/Santiago', 'Pacific/Easter'], 'code': 'CL', 'continent': 'South America', 'name': 'Chile', 'capital': 'Santiago'}, {'timezones': ['Africa/Douala'], 'code': 'CM', 'continent': 'Africa', 'name': 'Cameroon', 'capital': 'Yaound\xc3\xa9'}, {'timezones': ['Asia/Shanghai', 'Asia/Harbin', 'Asia/Chongqing', 'Asia/Urumqi', 'Asia/Kashgar'], 'code': 'CN', 'continent': 'Asia', 'name': "People's Republic of China", 'capital': 'Beijing'}, {'timezones': ['America/Bogota'], 'code': 'CO', 'continent': 'South America', 'name': 'Colombia', 'capital': 'Bogot\xc3\xa1'}, {'timezones': ['America/Costa_Rica'], 'code': 'CR', 'continent': 'North America', 'name': 'Costa Rica', 'capital': 'San Jos\xc3\xa9'}, {'timezones': ['America/Havana'], 'code': 'CU', 'continent': 'North America', 'name': 'Cuba', 'capital': 'Havana'}, {'timezones': ['Atlantic/Cape_Verde'], 'code': 'CV', 'continent': 'Africa', 'name': 'Cape Verde', 'capital': 'Praia'}, {'timezones': ['Asia/Nicosia'], 'code': 'CY', 'continent': 'Asia', 'name': 'Cyprus', 'capital': 'Nicosia'}, {'timezones': ['Europe/Prague'], 'code': 'CZ', 'continent': 'Europe', 'name': 'Czech Republic', 'capital': 'Prague'}, {'timezones': ['Europe/Berlin'], 'code': 'DE', 'continent': 'Europe', 'name': 'Germany', 'capital': 'Berlin'}, {'timezones': ['Africa/Djibouti'], 'code': 'DJ', 'continent': 'Africa', 'name': 'Djibouti', 'capital': 'Djibouti City'}, {'timezones': ['Europe/Copenhagen'], 'code': 'DK', 'continent': 'Europe', 'name': 'Denmark', 'capital': 'Copenhagen'}, {'timezones': ['America/Dominica'], 'code': 'DM', 'continent': 'North America', 'name': 'Dominica', 'capital': 'Roseau'}, {'timezones': ['America/Santo_Domingo'], 'code': 'DO', 'continent': 'North America', 'name': 'Dominican Republic', 'capital': 'Santo Domingo'}, {'timezones': ['America/Guayaquil', 'Pacific/Galapagos'], 'code': 'EC', 'continent': 'South America', 'name': 'Ecuador', 'capital': 'Quito'}, {'timezones': ['Europe/Tallinn'], 'code': 'EE', 'continent': 'Europe', 'name': 'Estonia', 'capital': 'Tallinn'}, {'timezones': ['Africa/Cairo'], 'code': 'EG', 'continent': 'Africa', 'name': 'Egypt', 'capital': 'Cairo'}, {'timezones': ['Africa/Asmera'], 'code': 'ER', 'continent': 'Africa', 'name': 'Eritrea', 'capital': 'Asmara'}, {'timezones': ['Africa/Addis_Ababa'], 'code': 'ET', 'continent': 'Africa', 'name': 'Ethiopia', 'capital': 'Addis Ababa'}, {'timezones': ['Europe/Helsinki'], 'code': 'FI', 'continent': 'Europe', 'name': 'Finland', 'capital': 'Helsinki'}, {'timezones': ['Pacific/Fiji'], 'code': 'FJ', 'continent': 'Oceania', 'name': 'Fiji', 'capital': 'Suva'}, {'timezones': ['Europe/Paris'], 'code': 'FR', 'continent': 'Europe', 'name': 'France', 'capital': 'Paris'}, {'timezones': ['Africa/Libreville'], 'code': 'GA', 'continent': 'Africa', 'name': 'Gabon', 'capital': 'Libreville'}, {'timezones': ['Asia/Tbilisi'], 'code': 'GE', 'continent': 'Asia', 'name': 'Georgia', 'capital': 'Tbilisi'}, {'timezones': ['Africa/Accra'], 'code': 'GH', 'continent': 'Africa', 'name': 'Ghana', 'capital': 'Accra'}, {'timezones': ['Africa/Banjul'], 'code': 'GM', 'continent': 'Africa', 'name': 'The Gambia', 'capital': 'Banjul'}, {'timezones': ['Africa/Conakry'], 'code': 'GN', 'continent': 'Africa', 'name': 'Guinea', 'capital': 'Conakry'}, {'timezones': ['Europe/Athens'], 'code': 'GR', 'continent': 'Europe', 'name': 'Greece', 'capital': 'Athens'}, {'timezones': ['America/Guatemala'], 'code': 'GT', 'continent': 'North America', 'name': 'Guatemala', 'capital': 'Guatemala City'}, {'timezones': ['America/Guatemala'], 'code': 'GT', 'continent': 'North America', 'name': 'Haiti', 'capital': 'Port-au-Prince'}, {'timezones': ['Africa/Bissau'], 'code': 'GW', 'continent': 'Africa', 'name': 'Guinea-Bissau', 'capital': 'Bissau'}, {'timezones': ['America/Guyana'], 'code': 'GY', 'continent': 'South America', 'name': 'Guyana', 'capital': 'Georgetown'}, {'timezones': ['America/Tegucigalpa'], 'code': 'HN', 'continent': 'North America', 'name': 'Honduras', 'capital': 'Tegucigalpa'}, {'timezones': ['Europe/Budapest'], 'code': 'HU', 'continent': 'Europe', 'name': 'Hungary', 'capital': 'Budapest'}, {'timezones': ['Asia/Jakarta', 'Asia/Pontianak', 'Asia/Makassar', 'Asia/Jayapura'], 'code': 'ID', 'continent': 'Asia', 'name': 'Indonesia', 'capital': 'Jakarta'}, {'timezones': ['Europe/Dublin'], 'code': 'IE', 'continent': 'Europe', 'name': 'Republic of Ireland', 'capital': 'Dublin'}, {'timezones': ['Asia/Jerusalem'], 'code': 'IL', 'continent': 'Asia', 'name': 'Israel', 'capital': 'Jerusalem'}, {'timezones': ['Asia/Calcutta'], 'code': 'IN', 'continent': 'Asia', 'name': 'India', 'capital': 'New Delhi'}, {'timezones': ['Asia/Baghdad'], 'code': 'IQ', 'continent': 'Asia', 'name': 'Iraq', 'capital': 'Baghdad'}, {'timezones': ['Asia/Tehran'], 'code': 'IR', 'continent': 'Asia', 'name': 'Iran', 'capital': 'Tehran'}, {'timezones': ['Atlantic/Reykjavik'], 'code': 'IS', 'continent': 'Europe', 'name': 'Iceland', 'capital': 'Reykjav\xc3\xadk'}, {'timezones': ['Europe/Rome'], 'code': 'IT', 'continent': 'Europe', 'name': 'Italy', 'capital': 'Rome'}, {'timezones': ['America/Jamaica'], 'code': 'JM', 'continent': 'North America', 'name': 'Jamaica', 'capital': 'Kingston'}, {'timezones': ['Asia/Amman'], 'code': 'JO', 'continent': 'Asia', 'name': 'Jordan', 'capital': 'Amman'}, {'timezones': ['Asia/Tokyo'], 'code': 'JP', 'continent': 'Asia', 'name': 'Japan', 'capital': 'Tokyo'}, {'timezones': ['Africa/Nairobi'], 'code': 'KE', 'continent': 'Africa', 'name': 'Kenya', 'capital': 'Nairobi'}, {'timezones': ['Asia/Bishkek'], 'code': 'KG', 'continent': 'Asia', 'name': 'Kyrgyzstan', 'capital': 'Bishkek'}, {'timezones': ['Pacific/Tarawa', 'Pacific/Enderbury', 'Pacific/Kiritimati'], 'code': 'KI', 'continent': 'Oceania', 'name': 'Kiribati', 'capital': 'Tarawa'}, {'timezones': ['Asia/Pyongyang'], 'code': 'KP', 'continent': 'Asia', 'name': 'North Korea', 'capital': 'Pyongyang'}, {'timezones': ['Asia/Seoul'], 'code': 'KR', 'continent': 'Asia', 'name': 'South Korea', 'capital': 'Seoul'}, {'timezones': ['Asia/Kuwait'], 'code': 'KW', 'continent': 'Asia', 'name': 'Kuwait', 'capital': 'Kuwait City'}, {'timezones': ['Asia/Beirut'], 'code': 'LB', 'continent': 'Asia', 'name': 'Lebanon', 'capital': 'Beirut'}, {'timezones': ['Europe/Vaduz'], 'code': 'LI', 'continent': 'Europe', 'name': 'Liechtenstein', 'capital': 'Vaduz'}, {'timezones': ['Africa/Monrovia'], 'code': 'LR', 'continent': 'Africa', 'name': 'Liberia', 'capital': 'Monrovia'}, {'timezones': ['Africa/Maseru'], 'code': 'LS', 'continent': 'Africa', 'name': 'Lesotho', 'capital': 'Maseru'}, {'timezones': ['Europe/Vilnius'], 'code': 'LT', 'continent': 'Europe', 'name': 'Lithuania', 'capital': 'Vilnius'}, {'timezones': ['Europe/Luxembourg'], 'code': 'LU', 'continent': 'Europe', 'name': 'Luxembourg', 'capital': 'Luxembourg City'}, {'timezones': ['Europe/Riga'], 'code': 'LV', 'continent': 'Europe', 'name': 'Latvia', 'capital': 'Riga'}, {'timezones': ['Africa/Tripoli'], 'code': 'LY', 'continent': 'Africa', 'name': 'Libya', 'capital': 'Tripoli'}, {'timezones': ['Indian/Antananarivo'], 'code': 'MG', 'continent': 'Africa', 'name': 'Madagascar', 'capital': 'Antananarivo'}, {'timezones': ['Pacific/Majuro', 'Pacific/Kwajalein'], 'code': 'MH', 'continent': 'Oceania', 'name': 'Marshall Islands', 'capital': 'Majuro'}, {'timezones': ['Europe/Skopje'], 'code': 'MK', 'continent': 'Europe', 'name': 'Macedonia', 'capital': 'Skopje'}, {'timezones': ['Africa/Bamako'], 'code': 'ML', 'continent': 'Africa', 'name': 'Mali', 'capital': 'Bamako'}, {'timezones': ['Asia/Rangoon'], 'code': 'MM', 'continent': 'Asia', 'name': 'Myanmar', 'capital': 'Naypyidaw'}, {'timezones': ['Asia/Ulaanbaatar', 'Asia/Hovd', 'Asia/Choibalsan'], 'code': 'MN', 'continent': 'Asia', 'name': 'Mongolia', 'capital': 'Ulaanbaatar'}, {'timezones': ['Africa/Nouakchott'], 'code': 'MR', 'continent': 'Africa', 'name': 'Mauritania', 'capital': 'Nouakchott'}, {'timezones': ['Europe/Malta'], 'code': 'MT', 'continent': 'Europe', 'name': 'Malta', 'capital': 'Valletta'}, {'timezones': ['Indian/Mauritius'], 'code': 'MU', 'continent': 'Africa', 'name': 'Mauritius', 'capital': 'Port Louis'}, {'timezones': ['Indian/Maldives'], 'code': 'MV', 'continent': 'Asia', 'name': 'Maldives', 'capital': 'Mal\xc3\xa9'}, {'timezones': ['Africa/Blantyre'], 'code': 'MW', 'continent': 'Africa', 'name': 'Malawi', 'capital': 'Lilongwe'}, {'timezones': ['America/Mexico_City', 'America/Cancun', 'America/Merida', 'America/Monterrey', 'America/Mazatlan', 'America/Chihuahua', 'America/Hermosillo', 'America/Tijuana'], 'code': 'MX', 'continent': 'North America', 'name': 'Mexico', 'capital': 'Mexico City'}, {'timezones': ['Asia/Kuala_Lumpur', 'Asia/Kuching'], 'code': 'MY', 'continent': 'Asia', 'name': 'Malaysia', 'capital': 'Kuala Lumpur'}, {'timezones': ['Africa/Maputo'], 'code': 'MZ', 'continent': 'Africa', 'name': 'Mozambique', 'capital': 'Maputo'}, {'timezones': ['Africa/Windhoek'], 'code': 'NA', 'continent': 'Africa', 'name': 'Namibia', 'capital': 'Windhoek'}, {'timezones': ['Africa/Niamey'], 'code': 'NE', 'continent': 'Africa', 'name': 'Niger', 'capital': 'Niamey'}, {'timezones': ['Africa/Lagos'], 'code': 'NG', 'continent': 'Africa', 'name': 'Nigeria', 'capital': 'Abuja'}, {'timezones': ['America/Managua'], 'code': 'NI', 'continent': 'North America', 'name': 'Nicaragua', 'capital': 'Managua'}, {'timezones': ['Europe/Amsterdam'], 'code': 'NL', 'continent': 'Europe', 'name': 'Kingdom of the Netherlands', 'capital': 'Amsterdam'}, {'timezones': ['Europe/Oslo'], 'code': 'NO', 'continent': 'Europe', 'name': 'Norway', 'capital': 'Oslo'}, {'timezones': ['Asia/Katmandu'], 'code': 'NP', 'continent': 'Asia', 'name': 'Nepal', 'capital': 'Kathmandu'}, {'timezones': ['Pacific/Nauru'], 'code': 'NR', 'continent': 'Oceania', 'name': 'Nauru', 'capital': 'Yaren'}, {'timezones': ['Pacific/Auckland', 'Pacific/Chatham'], 'code': 'NZ', 'continent': 'Oceania', 'name': 'New Zealand', 'capital': 'Wellington'}, {'timezones': ['Asia/Muscat'], 'code': 'OM', 'continent': 'Asia', 'name': 'Oman', 'capital': 'Muscat'}, {'timezones': ['America/Panama'], 'code': 'PA', 'continent': 'North America', 'name': 'Panama', 'capital': 'Panama City'}, {'timezones': ['America/Lima'], 'code': 'PE', 'continent': 'South America', 'name': 'Peru', 'capital': 'Lima'}, {'timezones': ['Pacific/Port_Moresby'], 'code': 'PG', 'continent': 'Oceania', 'name': 'Papua New Guinea', 'capital': 'Port Moresby'}, {'timezones': ['Asia/Manila'], 'code': 'PH', 'continent': 'Asia', 'name': 'Philippines', 'capital': 'Manila'}, {'timezones': ['Asia/Karachi'], 'code': 'PK', 'continent': 'Asia', 'name': 'Pakistan', 'capital': 'Islamabad'}, {'timezones': ['Europe/Warsaw'], 'code': 'PL', 'continent': 'Europe', 'name': 'Poland', 'capital': 'Warsaw'}, {'timezones': ['Europe/Lisbon', 'Atlantic/Madeira', 'Atlantic/Azores'], 'code': 'PT', 'continent': 'Europe', 'name': 'Portugal', 'capital': 'Lisbon'}, {'timezones': ['Pacific/Palau'], 'code': 'PW', 'continent': 'Oceania', 'name': 'Palau', 'capital': 'Ngerulmud'}, {'timezones': ['America/Asuncion'], 'code': 'PY', 'continent': 'South America', 'name': 'Paraguay', 'capital': 'Asunci\xc3\xb3n'}, {'timezones': ['Asia/Qatar'], 'code': 'QA', 'continent': 'Asia', 'name': 'Qatar', 'capital': 'Doha'}, {'timezones': ['Europe/Bucharest'], 'code': 'RO', 'continent': 'Europe', 'name': 'Romania', 'capital': 'Bucharest'}, {'timezones': ['Europe/Kaliningrad', 'Europe/Moscow', 'Europe/Volgograd', 'Europe/Samara', 'Asia/Yekaterinburg', 'Asia/Omsk', 'Asia/Novosibirsk', 'Asia/Krasnoyarsk', 'Asia/Irkutsk', 'Asia/Yakutsk', 'Asia/Vladivostok', 'Asia/Sakhalin', 'Asia/Magadan', 'Asia/Kamchatka', 'Asia/Anadyr'], 'code': 'RU', 'continent': 'Europe', 'name': 'Russia', 'capital': 'Moscow'}, {'timezones': ['Africa/Kigali'], 'code': 'RW', 'continent': 'Africa', 'name': 'Rwanda', 'capital': 'Kigali'}, {'timezones': ['Asia/Riyadh'], 'code': 'SA', 'continent': 'Asia', 'name': 'Saudi Arabia', 'capital': 'Riyadh'}, {'timezones': ['Pacific/Guadalcanal'], 'code': 'SB', 'continent': 'Oceania', 'name': 'Solomon Islands', 'capital': 'Honiara'}, {'timezones': ['Indian/Mahe'], 'code': 'SC', 'continent': 'Africa', 'name': 'Seychelles', 'capital': 'Victoria'}, {'timezones': ['Africa/Khartoum'], 'code': 'SD', 'continent': 'Africa', 'name': 'Sudan', 'capital': 'Khartoum'}, {'timezones': ['Europe/Stockholm'], 'code': 'SE', 'continent': 'Europe', 'name': 'Sweden', 'capital': 'Stockholm'}, {'timezones': ['Asia/Singapore'], 'code': 'SG', 'continent': 'Asia', 'name': 'Singapore', 'capital': 'Singapore'}, {'timezones': ['Europe/Ljubljana'], 'code': 'SI', 'continent': 'Europe', 'name': 'Slovenia', 'capital': 'Ljubljana'}, {'timezones': ['Europe/Bratislava'], 'code': 'SK', 'continent': 'Europe', 'name': 'Slovakia', 'capital': 'Bratislava'}, {'timezones': ['Africa/Freetown'], 'code': 'SL', 'continent': 'Africa', 'name': 'Sierra Leone', 'capital': 'Freetown'}, {'timezones': ['Europe/San_Marino'], 'code': 'SM', 'continent': 'Europe', 'name': 'San Marino', 'capital': 'San Marino'}, {'timezones': ['Africa/Dakar'], 'code': 'SN', 'continent': 'Africa', 'name': 'Senegal', 'capital': 'Dakar'}, {'timezones': ['Africa/Mogadishu'], 'code': 'SO', 'continent': 'Africa', 'name': 'Somalia', 'capital': 'Mogadishu'}, {'timezones': ['America/Paramaribo'], 'code': 'SR', 'continent': 'South America', 'name': 'Suriname', 'capital': 'Paramaribo'}, {'timezones': ['Africa/Sao_Tome'], 'code': 'ST', 'continent': 'Africa', 'name': 'S\xc3\xa3o Tom\xc3\xa9 and Pr\xc3\xadncipe', 'capital': 'S\xc3\xa3o Tom\xc3\xa9'}, {'timezones': ['Asia/Damascus'], 'code': 'SY', 'continent': 'Asia', 'name': 'Syria', 'capital': 'Damascus'}, {'timezones': ['Africa/Lome'], 'code': 'TG', 'continent': 'Africa', 'name': 'Togo', 'capital': 'Lom\xc3\xa9'}, {'timezones': ['Asia/Bangkok'], 'code': 'TH', 'continent': 'Asia', 'name': 'Thailand', 'capital': 'Bangkok'}, {'timezones': ['Asia/Dushanbe'], 'code': 'TJ', 'continent': 'Asia', 'name': 'Tajikistan', 'capital': 'Dushanbe'}, {'timezones': ['Asia/Ashgabat'], 'code': 'TM', 'continent': 'Asia', 'name': 'Turkmenistan', 'capital': 'Ashgabat'}, {'timezones': ['Africa/Tunis'], 'code': 'TN', 'continent': 'Africa', 'name': 'Tunisia', 'capital': 'Tunis'}, {'timezones': ['Pacific/Tongatapu'], 'code': 'TO', 'continent': 'Oceania', 'name': 'Tonga', 'capital': 'Nuku\xca\xbbalofa'}, {'timezones': ['Europe/Istanbul'], 'code': 'TR', 'continent': 'Asia', 'name': 'Turkey', 'capital': 'Ankara'}, {'timezones': ['America/Port_of_Spain'], 'code': 'TT', 'continent': 'North America', 'name': 'Trinidad and Tobago', 'capital': 'Port of Spain'}, {'timezones': ['Pacific/Funafuti'], 'code': 'TV', 'continent': 'Oceania', 'name': 'Tuvalu', 'capital': 'Funafuti'}, {'timezones': ['Africa/Dar_es_Salaam'], 'code': 'TZ', 'continent': 'Africa', 'name': 'Tanzania', 'capital': 'Dodoma'}, {'timezones': ['Europe/Kiev', 'Europe/Uzhgorod', 'Europe/Zaporozhye', 'Europe/Simferopol'], 'code': 'UA', 'continent': 'Europe', 'name': 'Ukraine', 'capital': 'Kiev'}, {'timezones': ['Africa/Kampala'], 'code': 'UG', 'continent': 'Africa', 'name': 'Uganda', 'capital': 'Kampala'}, {'timezones': ['America/New_York', 'America/Detroit', 'America/Kentucky/Louisville', 'America/Kentucky/Monticello', 'America/Indiana/Indianapolis', 'America/Indiana/Marengo', 'America/Indiana/Knox', 'America/Indiana/Vevay', 'America/Chicago', 'America/Indiana/Vincennes', 'America/Indiana/Petersburg', 'America/Menominee', 'America/North_Dakota/Center', 'America/North_Dakota/New_Salem', 'America/Denver', 'America/Boise', 'America/Shiprock', 'America/Phoenix', 'America/Los_Angeles', 'America/Anchorage', 'America/Juneau', 'America/Yakutat', 'America/Nome', 'America/Adak', 'Pacific/Honolulu'], 'code': 'US', 'continent': 'North America', 'name': 'United States', 'capital': 'Washington, D.C.'}, {'timezones': ['America/Montevideo'], 'code': 'UY', 'continent': 'South America', 'name': 'Uruguay', 'capital': 'Montevideo'}, {'timezones': ['Asia/Samarkand', 'Asia/Tashkent'], 'code': 'UZ', 'continent': 'Asia', 'name': 'Uzbekistan', 'capital': 'Tashkent'}, {'timezones': ['Europe/Vatican'], 'code': 'VA', 'continent': 'Europe', 'name': 'Vatican City', 'capital': 'Vatican City'}, {'timezones': ['America/Caracas'], 'code': 'VE', 'continent': 'South America', 'name': 'Venezuela', 'capital': 'Caracas'}, {'timezones': ['Asia/Saigon'], 'code': 'VN', 'continent': 'Asia', 'name': 'Vietnam', 'capital': 'Hanoi'}, {'timezones': ['Pacific/Efate'], 'code': 'VU', 'continent': 'Oceania', 'name': 'Vanuatu', 'capital': 'Port Vila'}, {'timezones': ['Asia/Aden'], 'code': 'YE', 'continent': 'Asia', 'name': 'Yemen', 'capital': "Sana'a"}, {'timezones': ['Africa/Lusaka'], 'code': 'ZM', 'continent': 'Africa', 'name': 'Zambia', 'capital': 'Lusaka'}, {'timezones': ['Africa/Harare'], 'code': 'ZW', 'continent': 'Africa', 'name': 'Zimbabwe', 'capital': 'Harare'}, {'timezones': ['Africa/Algiers'], 'code': 'DZ', 'continent': 'Africa', 'name': 'Algeria', 'capital': 'Algiers'}, {'timezones': ['Europe/Sarajevo'], 'code': 'BA', 'continent': 'Europe', 'name': 'Bosnia and Herzegovina', 'capital': 'Sarajevo'}, {'timezones': ['Asia/Phnom_Penh'], 'code': 'KH', 'continent': 'Asia', 'name': 'Cambodia', 'capital': 'Phnom Penh'}, {'timezones': ['Africa/Bangui'], 'code': 'CF', 'continent': 'Africa', 'name': 'Central African Republic', 'capital': 'Bangui'}, {'timezones': ['Africa/Ndjamena'], 'code': 'TD', 'continent': 'Africa', 'name': 'Chad', 'capital': "N'Djamena"}, {'timezones': ['Indian/Comoro'], 'code': 'KM', 'continent': 'Africa', 'name': 'Comoros', 'capital': 'Moroni'}, {'timezones': ['Europe/Zagreb'], 'code': 'HR', 'continent': 'Europe', 'name': 'Croatia', 'capital': 'Zagreb'}, {'timezones': ['Asia/Dili'], 'code': 'TL', 'continent': 'Asia', 'name': 'East Timor', 'capital': 'Dili'}, {'timezones': ['America/El_Salvador'], 'code': 'SV', 'continent': 'North America', 'name': 'El Salvador', 'capital': 'San Salvador'}, {'timezones': ['Africa/Malabo'], 'code': 'GQ', 'continent': 'Africa', 'name': 'Equatorial Guinea', 'capital': 'Malabo'}, {'timezones': ['America/Grenada'], 'code': 'GD', 'continent': 'North America', 'name': 'Grenada', 'capital': "St. George's"}, {'timezones': ['Asia/Almaty', 'Asia/Qyzylorda', 'Asia/Aqtobe', 'Asia/Aqtau', 'Asia/Oral'], 'code': 'KZ', 'continent': 'Asia', 'name': 'Kazakhstan', 'capital': 'Astana'}, {'timezones': ['Asia/Vientiane'], 'code': 'LA', 'continent': 'Asia', 'name': 'Laos', 'capital': 'Vientiane'}, {'timezones': ['Pacific/Truk', 'Pacific/Ponape', 'Pacific/Kosrae'], 'code': 'FM', 'continent': 'Oceania', 'name': 'Federated States of Micronesia', 'capital': 'Palikir'}, {'timezones': ['Europe/Chisinau'], 'code': 'MD', 'continent': 'Europe', 'name': 'Moldova', 'capital': 'Chi\xc5\x9fin\xc4\x83u'}, {'timezones': ['Europe/Monaco'], 'code': 'MC', 'continent': 'Europe', 'name': 'Monaco', 'capital': 'Monaco'}, {'timezones': ['Europe/Podgorica'], 'code': 'ME', 'continent': 'Europe', 'name': 'Montenegro', 'capital': 'Podgorica'}, {'timezones': ['Africa/Casablanca'], 'code': 'MA', 'continent': 'Africa', 'name': 'Morocco', 'capital': 'Rabat'}, {'timezones': ['America/St_Kitts'], 'code': 'KN', 'continent': 'North America', 'name': 'Saint Kitts and Nevis', 'capital': 'Basseterre'}, {'timezones': ['America/St_Lucia'], 'code': 'LC', 'continent': 'North America', 'name': 'Saint Lucia', 'capital': 'Castries'}, {'timezones': ['America/St_Vincent'], 'code': 'VC', 'continent': 'North America', 'name': 'Saint Vincent and the Grenadines', 'capital': 'Kingstown'}, {'timezones': ['Pacific/Apia'], 'code': 'WS', 'continent': 'Oceania', 'name': 'Samoa', 'capital': 'Apia'}, {'timezones': ['Europe/Belgrade'], 'code': 'RS', 'continent': 'Europe', 'name': 'Serbia', 'capital': 'Belgrade'}, {'timezones': ['Africa/Johannesburg'], 'code': 'ZA', 'continent': 'Africa', 'name': 'South Africa', 'capital': 'Pretoria'}, {'timezones': ['Europe/Madrid', 'Africa/Ceuta', 'Atlantic/Canary'], 'code': 'ES', 'continent': 'Europe', 'name': 'Spain', 'capital': 'Madrid'}, {'timezones': ['Asia/Colombo'], 'code': 'LK', 'continent': 'Asia', 'name': 'Sri Lanka', 'capital': 'Sri Jayewardenepura Kotte'}, {'timezones': ['Africa/Mbabane'], 'code': 'SZ', 'continent': 'Africa', 'name': 'Swaziland', 'capital': 'Mbabane'}, {'timezones': ['Europe/Zurich'], 'code': 'CH', 'continent': 'Europe', 'name': 'Switzerland', 'capital': 'Bern'}, {'timezones': ['Asia/Dubai'], 'code': 'AE', 'continent': 'Asia', 'name': 'United Arab Emirates', 'capital': 'Abu Dhabi'}, {'timezones': ['Europe/London'], 'code': 'GB', 'continent': 'Europe', 'name': 'United Kingdom', 'capital': 'London'}, ] regex = re.compile(timedelta_pattern) @classmethod def unix_time(cls): """ Get a timestamp between January 1, 1970 and now :example 1061306726 """ return random.randint(0, int(time())) @classmethod def time_delta(cls): """ Get a timedelta object """ ts = random.randint(0, int(time())) return timedelta(seconds=ts) @classmethod def date_time(cls, tzinfo=None): """ Get a datetime object for a date between January 1, 1970 and now :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('2005-08-16 20:39:21') :return datetime """ return datetime.fromtimestamp(cls.unix_time(), tzinfo) @classmethod def date_time_ad(cls, tzinfo=None): """ Get a datetime object for a date between January 1, 001 and now :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('1265-03-22 21:15:52') :return datetime """ ts = random.randint(-62135600400, int(time())) # NOTE: using datetime.fromtimestamp(ts) directly will raise # a "ValueError: timestamp out of range for platform time_t" # on some platforms due to system C functions; # see http://stackoverflow.com/a/10588133/2315612 return datetime.fromtimestamp(0, tzinfo) + timedelta(seconds=ts) @classmethod def iso8601(cls, tzinfo=None): """ :param tzinfo: timezone, instance of datetime.tzinfo subclass :example '2003-10-21T16:05:52+0000' """ return cls.date_time(tzinfo).isoformat() @classmethod def date(cls, pattern='%Y-%m-%d'): """ Get a date string between January 1, 1970 and now :param pattern format :example '2008-11-27' """ return cls.date_time().strftime(pattern) @classmethod def date_object(cls): """ Get a date object between January 1, 1970 and now :example datetime.date(2016, 9, 20) """ return cls.date_time().date() @classmethod def time(cls, pattern='%H:%M:%S'): """ Get a time string (24h format by default) :param pattern format :example '15:02:34' """ return cls.date_time().time().strftime(pattern) @classmethod def time_object(cls): """ Get a time object :example datetime.time(15, 56, 56, 772876) """ return cls.date_time().time() @classmethod def _parse_date_time(cls, text, tzinfo=None): if isinstance(text, (datetime, date, real_datetime, real_date)): return datetime_to_timestamp(text) now = datetime.now(tzinfo) if isinstance(text, timedelta): return datetime_to_timestamp(now - text) if is_string(text): if text == 'now': return datetime_to_timestamp(datetime.now(tzinfo)) parts = cls.regex.match(text) if not parts: return parts = parts.groupdict() time_params = {} for (name, param) in parts.items(): if param: time_params[name] = int(param) if 'years' in time_params: if 'days' not in time_params: time_params['days'] = 0 time_params['days'] += 365.24 * time_params.pop('years') return datetime_to_timestamp(now + timedelta(**time_params)) if isinstance(text, int): return datetime_to_timestamp(now + timedelta(text)) raise ValueError("Invalid format for date '{0}'".format(text)) @classmethod def date_time_between(cls, start_date='-30y', end_date='now', tzinfo=None): """ Get a DateTime object based on a random date between two given dates. Accepts date strings that can be recognized by strtotime(). :param start_date Defaults to 30 years ago :param end_date Defaults to "now" :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('1999-02-02 11:42:52') :return DateTime """ start_date = cls._parse_date_time(start_date) end_date = cls._parse_date_time(end_date) timestamp = random.randint(start_date, end_date) return datetime.fromtimestamp(timestamp, tzinfo) @classmethod def date_time_between_dates(cls, datetime_start=None, datetime_end=None, tzinfo=None): """ Takes two DateTime objects and returns a random date between the two given dates. Accepts DateTime objects. :param datetime_start DateTime :param datetime_end DateTime :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('1999-02-02 11:42:52') :return DateTime """ if datetime_start is None: datetime_start = datetime.now(tzinfo) if datetime_end is None: datetime_end = datetime.now(tzinfo) timestamp = random.randint( datetime_to_timestamp(datetime_start), datetime_to_timestamp(datetime_end), ) return datetime.fromtimestamp(timestamp, tzinfo) @classmethod def date_time_this_century(cls, before_now=True, after_now=False, tzinfo=None): """ Gets a DateTime object for the current century. :param before_now: include days in current century before today :param after_now: include days in current century after today :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('2012-04-04 11:02:02') :return DateTime """ now = datetime.now(tzinfo) this_century_start = datetime(now.year - (now.year % 100), 1, 1, tzinfo=tzinfo) next_century_start = datetime(this_century_start.year + 100, 1, 1, tzinfo=tzinfo) if before_now and after_now: return cls.date_time_between_dates(this_century_start, next_century_start, tzinfo) elif not before_now and after_now: return cls.date_time_between_dates(now, next_century_start, tzinfo) elif not after_now and before_now: return cls.date_time_between_dates(this_century_start, now, tzinfo) else: return now @classmethod def date_time_this_decade(cls, before_now=True, after_now=False, tzinfo=None): """ Gets a DateTime object for the decade year. :param before_now: include days in current decade before today :param after_now: include days in current decade after today :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('2012-04-04 11:02:02') :return DateTime """ now = datetime.now(tzinfo) this_decade_start = datetime(now.year - (now.year % 10), 1, 1, tzinfo=tzinfo) next_decade_start = datetime(this_decade_start.year + 10, 1, 1, tzinfo=tzinfo) if before_now and after_now: return cls.date_time_between_dates(this_decade_start, next_decade_start, tzinfo) elif not before_now and after_now: return cls.date_time_between_dates(now, next_decade_start, tzinfo) elif not after_now and before_now: return cls.date_time_between_dates(this_decade_start, now, tzinfo) else: return now @classmethod def date_time_this_year(cls, before_now=True, after_now=False, tzinfo=None): """ Gets a DateTime object for the current year. :param before_now: include days in current year before today :param after_now: include days in current year after today :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('2012-04-04 11:02:02') :return DateTime """ now = datetime.now(tzinfo) this_year_start = now.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0) next_year_start = datetime(now.year + 1, 1, 1, tzinfo=tzinfo) if before_now and after_now: return cls.date_time_between_dates(this_year_start, next_year_start, tzinfo) elif not before_now and after_now: return cls.date_time_between_dates(now, next_year_start, tzinfo) elif not after_now and before_now: return cls.date_time_between_dates(this_year_start, now, tzinfo) else: return now @classmethod def date_time_this_month(cls, before_now=True, after_now=False, tzinfo=None): """ Gets a DateTime object for the current month. :param before_now: include days in current month before today :param after_now: include days in current month after today :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('2012-04-04 11:02:02') :return DateTime """ now = datetime.now(tzinfo) this_month_start = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) next_month_start = this_month_start + relativedelta.relativedelta(months=1) if before_now and after_now: return cls.date_time_between_dates(this_month_start, next_month_start, tzinfo) elif not before_now and after_now: return cls.date_time_between_dates(now, next_month_start, tzinfo) elif not after_now and before_now: return cls.date_time_between_dates(this_month_start, now, tzinfo) else: return now @classmethod def am_pm(cls): return cls.date('%p') @classmethod def day_of_month(cls): return cls.date('%d') @classmethod def day_of_week(cls): return cls.date('%A') @classmethod def month(cls): return cls.date('%m') @classmethod def month_name(cls): return cls.date('%B') @classmethod def year(cls): return cls.date('%Y') @classmethod def century(cls): """ :example 'XVII' """ return cls.random_element(cls.centuries) @classmethod def timezone(cls): return random.choice(cls.random_element(cls.countries)['timezones'])
mit
takeshineshiro/django
tests/auth_tests/test_context_processors.py
269
6773
import datetime from django.contrib.auth import authenticate from django.contrib.auth.context_processors import PermLookupDict, PermWrapper from django.contrib.auth.models import Permission, User from django.contrib.contenttypes.models import ContentType from django.db.models import Q from django.test import SimpleTestCase, TestCase, override_settings from .settings import AUTH_MIDDLEWARE_CLASSES, AUTH_TEMPLATES class MockUser(object): def has_module_perms(self, perm): if perm == 'mockapp': return True return False def has_perm(self, perm): if perm == 'mockapp.someperm': return True return False class PermWrapperTests(SimpleTestCase): """ Test some details of the PermWrapper implementation. """ class EQLimiterObject(object): """ This object makes sure __eq__ will not be called endlessly. """ def __init__(self): self.eq_calls = 0 def __eq__(self, other): if self.eq_calls > 0: return True self.eq_calls += 1 return False def test_permwrapper_in(self): """ Test that 'something' in PermWrapper works as expected. """ perms = PermWrapper(MockUser()) # Works for modules and full permissions. self.assertIn('mockapp', perms) self.assertNotIn('nonexisting', perms) self.assertIn('mockapp.someperm', perms) self.assertNotIn('mockapp.nonexisting', perms) def test_permlookupdict_in(self): """ No endless loops if accessed with 'in' - refs #18979. """ pldict = PermLookupDict(MockUser(), 'mockapp') with self.assertRaises(TypeError): self.EQLimiterObject() in pldict @override_settings( PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'], ROOT_URLCONF='auth_tests.urls', TEMPLATES=AUTH_TEMPLATES, USE_TZ=False, # required for loading the fixture ) class AuthContextProcessorTests(TestCase): """ Tests for the ``django.contrib.auth.context_processors.auth`` processor """ @classmethod def setUpTestData(cls): # password = "secret" cls.u1 = User.objects.create( id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super', first_name='Super', last_name='User', email='super@example.com', is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10) ) @override_settings(MIDDLEWARE_CLASSES=AUTH_MIDDLEWARE_CLASSES) def test_session_not_accessed(self): """ Tests that the session is not accessed simply by including the auth context processor """ response = self.client.get('/auth_processor_no_attr_access/') self.assertContains(response, "Session not accessed") @override_settings(MIDDLEWARE_CLASSES=AUTH_MIDDLEWARE_CLASSES) def test_session_is_accessed(self): """ Tests that the session is accessed if the auth context processor is used and relevant attributes accessed. """ response = self.client.get('/auth_processor_attr_access/') self.assertContains(response, "Session accessed") def test_perms_attrs(self): u = User.objects.create_user(username='normal', password='secret') u.user_permissions.add( Permission.objects.get( content_type=ContentType.objects.get_for_model(Permission), codename='add_permission')) self.client.login(username='normal', password='secret') response = self.client.get('/auth_processor_perms/') self.assertContains(response, "Has auth permissions") self.assertContains(response, "Has auth.add_permission permissions") self.assertNotContains(response, "nonexisting") def test_perm_in_perms_attrs(self): u = User.objects.create_user(username='normal', password='secret') u.user_permissions.add( Permission.objects.get( content_type=ContentType.objects.get_for_model(Permission), codename='add_permission')) self.client.login(username='normal', password='secret') response = self.client.get('/auth_processor_perm_in_perms/') self.assertContains(response, "Has auth permissions") self.assertContains(response, "Has auth.add_permission permissions") self.assertNotContains(response, "nonexisting") def test_message_attrs(self): self.client.login(username='super', password='secret') response = self.client.get('/auth_processor_messages/') self.assertContains(response, "Message 1") def test_user_attrs(self): """ Test that the lazy objects returned behave just like the wrapped objects. """ # These are 'functional' level tests for common use cases. Direct # testing of the implementation (SimpleLazyObject) is in the 'utils' # tests. self.client.login(username='super', password='secret') user = authenticate(username='super', password='secret') response = self.client.get('/auth_processor_user/') self.assertContains(response, "unicode: super") self.assertContains(response, "id: 100") self.assertContains(response, "username: super") # bug #12037 is tested by the {% url %} in the template: self.assertContains(response, "url: /userpage/super/") # See if this object can be used for queries where a Q() comparing # a user can be used with another Q() (in an AND or OR fashion). # This simulates what a template tag might do with the user from the # context. Note that we don't need to execute a query, just build it. # # The failure case (bug #12049) on Python 2.4 with a LazyObject-wrapped # User is a fatal TypeError: "function() takes at least 2 arguments # (0 given)" deep inside deepcopy(). # # Python 2.5 and 2.6 succeeded, but logged internally caught exception # spew: # # Exception RuntimeError: 'maximum recursion depth exceeded while # calling a Python object' in <type 'exceptions.AttributeError'> # ignored" Q(user=response.context['user']) & Q(someflag=True) # Tests for user equality. This is hard because User defines # equality in a non-duck-typing way # See bug #12060 self.assertEqual(response.context['user'], user) self.assertEqual(user, response.context['user'])
bsd-3-clause
sysadminmatmoz/ingadhoc
stock_picking_locations/stock.py
8
1503
# -*- coding: utf-8 -*- ############################################################################## # For copyright and license notices, see __openerp__.py file in module root # directory ############################################################################## from openerp import models, fields, api class stock_picking(models.Model): _inherit = 'stock.picking' new_location_id = fields.Many2one( 'stock.location', 'Source Location', readonly=True, states={ 'draft': [('readonly', False)], 'waiting': [('readonly', False)], 'confirmed': [('readonly', False)], }, help="Sets a location if you produce at a fixed location. This can be a partner location if you subcontract the manufacturing operations. This will be the default of the asociated stock moves.") new_location_dest_id = fields.Many2one( 'stock.location', 'Destination Location', readonly=True, states={ 'draft': [('readonly', False)], 'waiting': [('readonly', False)], 'confirmed': [('readonly', False)], }, help="Location where the system will stock the finished products. This will be the default of the asociated stock moves.") @api.one def update_locations(self): vals = { 'location_id': self.new_location_id.id, 'location_dest_id': self.new_location_dest_id.id } self.move_lines.write(vals)
agpl-3.0
funbaker/astropy
astropy/table/meta.py
2
11013
import textwrap import copy from collections import OrderedDict __all__ = ['get_header_from_yaml', 'get_yaml_from_header', 'get_yaml_from_table'] class ColumnOrderList(list): """ List of tuples that sorts in a specific order that makes sense for astropy table column attributes. """ def sort(self, *args, **kwargs): super().sort() column_keys = ['name', 'unit', 'datatype', 'format', 'description', 'meta'] in_dict = dict(self) out_list = [] for key in column_keys: if key in in_dict: out_list.append((key, in_dict[key])) for key, val in self: if key not in column_keys: out_list.append((key, val)) # Clear list in-place del self[:] self.extend(out_list) class ColumnDict(dict): """ Specialized dict subclass to represent attributes of a Column and return items() in a preferred order. This is only for use in generating a YAML map representation that has a fixed order. """ def items(self): """ Return items as a ColumnOrderList, which sorts in the preferred way for column attributes. """ return ColumnOrderList(super().items()) def _construct_odict(load, node): """ Construct OrderedDict from !!omap in yaml safe load. Source: https://gist.github.com/weaver/317164 License: Unspecified This is the same as SafeConstructor.construct_yaml_omap(), except the data type is changed to OrderedDict() and setitem is used instead of append in the loop Examples -------- :: >>> yaml.load(''' # doctest: +SKIP ... !!omap ... - foo: bar ... - mumble: quux ... - baz: gorp ... ''') OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')]) >>> yaml.load('''!!omap [ foo: bar, mumble: quux, baz : gorp ]''') # doctest: +SKIP OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')]) """ import yaml omap = OrderedDict() yield omap if not isinstance(node, yaml.SequenceNode): raise yaml.constructor.ConstructorError( "while constructing an ordered map", node.start_mark, "expected a sequence, but found {}".format(node.id), node.start_mark) for subnode in node.value: if not isinstance(subnode, yaml.MappingNode): raise yaml.constructor.ConstructorError( "while constructing an ordered map", node.start_mark, "expected a mapping of length 1, but found {}".format(subnode.id), subnode.start_mark) if len(subnode.value) != 1: raise yaml.constructor.ConstructorError( "while constructing an ordered map", node.start_mark, "expected a single mapping item, but found {} items".format(len(subnode.value)), subnode.start_mark) key_node, value_node = subnode.value[0] key = load.construct_object(key_node) value = load.construct_object(value_node) omap[key] = value def _repr_pairs(dump, tag, sequence, flow_style=None): """ This is the same code as BaseRepresenter.represent_sequence(), but the value passed to dump.represent_data() in the loop is a dictionary instead of a tuple. Source: https://gist.github.com/weaver/317164 License: Unspecified """ import yaml value = [] node = yaml.SequenceNode(tag, value, flow_style=flow_style) if dump.alias_key is not None: dump.represented_objects[dump.alias_key] = node best_style = True for (key, val) in sequence: item = dump.represent_data({key: val}) if not (isinstance(item, yaml.ScalarNode) and not item.style): best_style = False value.append(item) if flow_style is None: if dump.default_flow_style is not None: node.flow_style = dump.default_flow_style else: node.flow_style = best_style return node def _repr_odict(dumper, data): """ Represent OrderedDict in yaml dump. Source: https://gist.github.com/weaver/317164 License: Unspecified >>> data = OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')]) >>> yaml.dump(data, default_flow_style=False) # doctest: +SKIP '!!omap\\n- foo: bar\\n- mumble: quux\\n- baz: gorp\\n' >>> yaml.dump(data, default_flow_style=True) # doctest: +SKIP '!!omap [foo: bar, mumble: quux, baz: gorp]\\n' """ return _repr_pairs(dumper, u'tag:yaml.org,2002:omap', data.items()) def _repr_column_dict(dumper, data): """ Represent ColumnDict in yaml dump. This is the same as an ordinary mapping except that the keys are written in a fixed order that makes sense for astropy table columns. """ return dumper.represent_mapping(u'tag:yaml.org,2002:map', data) def _get_col_attributes(col): """ Extract information from a column (apart from the values) that is required to fully serialize the column. """ attrs = ColumnDict() attrs['name'] = col.info.name type_name = col.info.dtype.type.__name__ if type_name.startswith(('bytes', 'str')): type_name = 'string' if type_name.endswith('_'): type_name = type_name[:-1] # string_ and bool_ lose the final _ for ECSV attrs['datatype'] = type_name # Set the output attributes for attr, nontrivial, xform in (('unit', lambda x: x is not None, str), ('format', lambda x: x is not None, None), ('description', lambda x: x is not None, None), ('meta', lambda x: x, None)): col_attr = getattr(col.info, attr) if nontrivial(col_attr): attrs[attr] = xform(col_attr) if xform else col_attr return attrs def get_yaml_from_table(table): """ Return lines with a YAML representation of header content from the ``table``. Parameters ---------- table : `~astropy.table.Table` object Table for which header content is output Returns ------- lines : list List of text lines with YAML header content """ header = {'cols': list(table.columns.values())} if table.meta: header['meta'] = table.meta return get_yaml_from_header(header) def get_yaml_from_header(header): """ Return lines with a YAML representation of header content from a Table. The ``header`` dict must contain these keys: - 'cols' : list of table column objects (required) - 'meta' : table 'meta' attribute (optional) Other keys included in ``header`` will be serialized in the output YAML representation. Parameters ---------- header : dict Table header content Returns ------- lines : list List of text lines with YAML header content """ try: import yaml except ImportError: raise ImportError('`import yaml` failed, PyYAML package is ' 'required for serializing mixin columns') from ..io.misc.yaml import AstropyDumper class TableDumper(AstropyDumper): """ Custom Dumper that represents OrderedDict as an !!omap object. """ def represent_mapping(self, tag, mapping, flow_style=None): """ This is a combination of the Python 2 and 3 versions of this method in the PyYAML library to allow the required key ordering via the ColumnOrderList object. The Python 3 version insists on turning the items() mapping into a list object and sorting, which results in alphabetical order for the column keys. """ value = [] node = yaml.MappingNode(tag, value, flow_style=flow_style) if self.alias_key is not None: self.represented_objects[self.alias_key] = node best_style = True if hasattr(mapping, 'items'): mapping = mapping.items() if hasattr(mapping, 'sort'): mapping.sort() else: mapping = list(mapping) try: mapping = sorted(mapping) except TypeError: pass for item_key, item_value in mapping: node_key = self.represent_data(item_key) node_value = self.represent_data(item_value) if not (isinstance(node_key, yaml.ScalarNode) and not node_key.style): best_style = False if not (isinstance(node_value, yaml.ScalarNode) and not node_value.style): best_style = False value.append((node_key, node_value)) if flow_style is None: if self.default_flow_style is not None: node.flow_style = self.default_flow_style else: node.flow_style = best_style return node TableDumper.add_representer(OrderedDict, _repr_odict) TableDumper.add_representer(ColumnDict, _repr_column_dict) header = copy.copy(header) # Don't overwrite original header['datatype'] = [_get_col_attributes(col) for col in header['cols']] del header['cols'] lines = yaml.dump(header, Dumper=TableDumper, width=130).splitlines() return lines class YamlParseError(Exception): pass def get_header_from_yaml(lines): """ Get a header dict from input ``lines`` which should be valid YAML. This input will typically be created by get_yaml_from_header. The output is a dictionary which describes all the table and column meta. The get_cols() method in the io/ascii/ecsv.py file should be used as a guide to using the information when constructing a table using this header dict information. Parameters ---------- lines : list List of text lines with YAML header content Returns ------- header : dict Dictionary describing table and column meta """ try: import yaml except ImportError: raise ImportError('`import yaml` failed, PyYAML package ' 'is required for serializing mixin columns') from ..io.misc.yaml import AstropyLoader class TableLoader(AstropyLoader): """ Custom Loader that constructs OrderedDict from an !!omap object. This does nothing but provide a namespace for adding the custom odict constructor. """ TableLoader.add_constructor(u'tag:yaml.org,2002:omap', _construct_odict) # Now actually load the YAML data structure into `meta` header_yaml = textwrap.dedent('\n'.join(lines)) try: header = yaml.load(header_yaml, Loader=TableLoader) except Exception as err: raise YamlParseError(str(err)) return header
bsd-3-clause
hellobbn/android_kernel_htc_msm8974
tools/perf/util/setup.py
4998
1330
#!/usr/bin/python2 from distutils.core import setup, Extension from os import getenv from distutils.command.build_ext import build_ext as _build_ext from distutils.command.install_lib import install_lib as _install_lib class build_ext(_build_ext): def finalize_options(self): _build_ext.finalize_options(self) self.build_lib = build_lib self.build_temp = build_tmp class install_lib(_install_lib): def finalize_options(self): _install_lib.finalize_options(self) self.build_dir = build_lib cflags = ['-fno-strict-aliasing', '-Wno-write-strings'] cflags += getenv('CFLAGS', '').split() build_lib = getenv('PYTHON_EXTBUILD_LIB') build_tmp = getenv('PYTHON_EXTBUILD_TMP') ext_sources = [f.strip() for f in file('util/python-ext-sources') if len(f.strip()) > 0 and f[0] != '#'] perf = Extension('perf', sources = ext_sources, include_dirs = ['util/include'], extra_compile_args = cflags, ) setup(name='perf', version='0.1', description='Interface with the Linux profiling infrastructure', author='Arnaldo Carvalho de Melo', author_email='acme@redhat.com', license='GPLv2', url='http://perf.wiki.kernel.org', ext_modules=[perf], cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
gpl-2.0
gajim/python-nbxmpp
nbxmpp/modules/rsm.py
1
1846
# Copyright (C) 2020 Philipp Hörist <philipp AT hoerist.com> # # This file is part of nbxmpp. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 3 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; If not, see <http://www.gnu.org/licenses/>. from nbxmpp.namespaces import Namespace from nbxmpp.structs import RSMData def parse_rsm(stanza): stanza = stanza.getTag('set', namespace=Namespace.RSM) if stanza is None: return None after = stanza.getTagData('after') or None before = stanza.getTagData('before') or None last = stanza.getTagData('last') or None first_index = None first = stanza.getTagData('first') or None if first is not None: try: first_index = int(first.getAttr('index')) except Exception: pass try: count = int(stanza.getTagData('count')) except Exception: count = None try: max_ = int(stanza.getTagData('max')) except Exception: max_ = None try: index = int(stanza.getTagData('index')) except Exception: index = None return RSMData(after=after, before=before, last=last, first=first, first_index=first_index, count=count, max=max_, index=index)
gpl-3.0
sorenh/cc
vendor/tornado/website/markdown/blockprocessors.py
20
17769
""" CORE MARKDOWN BLOCKPARSER ============================================================================= This parser handles basic parsing of Markdown blocks. It doesn't concern itself with inline elements such as **bold** or *italics*, but rather just catches blocks, lists, quotes, etc. The BlockParser is made up of a bunch of BlockProssors, each handling a different type of block. Extensions may add/replace/remove BlockProcessors as they need to alter how markdown blocks are parsed. """ import re import markdown class BlockProcessor: """ Base class for block processors. Each subclass will provide the methods below to work with the source and tree. Each processor will need to define it's own ``test`` and ``run`` methods. The ``test`` method should return True or False, to indicate whether the current block should be processed by this processor. If the test passes, the parser will call the processors ``run`` method. """ def __init__(self, parser=None): self.parser = parser def lastChild(self, parent): """ Return the last child of an etree element. """ if len(parent): return parent[-1] else: return None def detab(self, text): """ Remove a tab from the front of each line of the given text. """ newtext = [] lines = text.split('\n') for line in lines: if line.startswith(' '*markdown.TAB_LENGTH): newtext.append(line[markdown.TAB_LENGTH:]) elif not line.strip(): newtext.append('') else: break return '\n'.join(newtext), '\n'.join(lines[len(newtext):]) def looseDetab(self, text, level=1): """ Remove a tab from front of lines but allowing dedented lines. """ lines = text.split('\n') for i in range(len(lines)): if lines[i].startswith(' '*markdown.TAB_LENGTH*level): lines[i] = lines[i][markdown.TAB_LENGTH*level:] return '\n'.join(lines) def test(self, parent, block): """ Test for block type. Must be overridden by subclasses. As the parser loops through processors, it will call the ``test`` method on each to determine if the given block of text is of that type. This method must return a boolean ``True`` or ``False``. The actual method of testing is left to the needs of that particular block type. It could be as simple as ``block.startswith(some_string)`` or a complex regular expression. As the block type may be different depending on the parent of the block (i.e. inside a list), the parent etree element is also provided and may be used as part of the test. Keywords: * ``parent``: A etree element which will be the parent of the block. * ``block``: A block of text from the source which has been split at blank lines. """ pass def run(self, parent, blocks): """ Run processor. Must be overridden by subclasses. When the parser determines the appropriate type of a block, the parser will call the corresponding processor's ``run`` method. This method should parse the individual lines of the block and append them to the etree. Note that both the ``parent`` and ``etree`` keywords are pointers to instances of the objects which should be edited in place. Each processor must make changes to the existing objects as there is no mechanism to return new/different objects to replace them. This means that this method should be adding SubElements or adding text to the parent, and should remove (``pop``) or add (``insert``) items to the list of blocks. Keywords: * ``parent``: A etree element which is the parent of the current block. * ``blocks``: A list of all remaining blocks of the document. """ pass class ListIndentProcessor(BlockProcessor): """ Process children of list items. Example: * a list item process this part or this part """ INDENT_RE = re.compile(r'^(([ ]{%s})+)'% markdown.TAB_LENGTH) ITEM_TYPES = ['li'] LIST_TYPES = ['ul', 'ol'] def test(self, parent, block): return block.startswith(' '*markdown.TAB_LENGTH) and \ not self.parser.state.isstate('detabbed') and \ (parent.tag in self.ITEM_TYPES or \ (len(parent) and parent[-1] and \ (parent[-1].tag in self.LIST_TYPES) ) ) def run(self, parent, blocks): block = blocks.pop(0) level, sibling = self.get_level(parent, block) block = self.looseDetab(block, level) self.parser.state.set('detabbed') if parent.tag in self.ITEM_TYPES: # The parent is already a li. Just parse the child block. self.parser.parseBlocks(parent, [block]) elif sibling.tag in self.ITEM_TYPES: # The sibling is a li. Use it as parent. self.parser.parseBlocks(sibling, [block]) elif len(sibling) and sibling[-1].tag in self.ITEM_TYPES: # The parent is a list (``ol`` or ``ul``) which has children. # Assume the last child li is the parent of this block. if sibling[-1].text: # If the parent li has text, that text needs to be moved to a p block = '%s\n\n%s' % (sibling[-1].text, block) sibling[-1].text = '' self.parser.parseChunk(sibling[-1], block) else: self.create_item(sibling, block) self.parser.state.reset() def create_item(self, parent, block): """ Create a new li and parse the block with it as the parent. """ li = markdown.etree.SubElement(parent, 'li') self.parser.parseBlocks(li, [block]) def get_level(self, parent, block): """ Get level of indent based on list level. """ # Get indent level m = self.INDENT_RE.match(block) if m: indent_level = len(m.group(1))/markdown.TAB_LENGTH else: indent_level = 0 if self.parser.state.isstate('list'): # We're in a tightlist - so we already are at correct parent. level = 1 else: # We're in a looselist - so we need to find parent. level = 0 # Step through children of tree to find matching indent level. while indent_level > level: child = self.lastChild(parent) if child and (child.tag in self.LIST_TYPES or child.tag in self.ITEM_TYPES): if child.tag in self.LIST_TYPES: level += 1 parent = child else: # No more child levels. If we're short of indent_level, # we have a code block. So we stop here. break return level, parent class CodeBlockProcessor(BlockProcessor): """ Process code blocks. """ def test(self, parent, block): return block.startswith(' '*markdown.TAB_LENGTH) def run(self, parent, blocks): sibling = self.lastChild(parent) block = blocks.pop(0) theRest = '' if sibling and sibling.tag == "pre" and len(sibling) \ and sibling[0].tag == "code": # The previous block was a code block. As blank lines do not start # new code blocks, append this block to the previous, adding back # linebreaks removed from the split into a list. code = sibling[0] block, theRest = self.detab(block) code.text = markdown.AtomicString('%s\n%s\n' % (code.text, block.rstrip())) else: # This is a new codeblock. Create the elements and insert text. pre = markdown.etree.SubElement(parent, 'pre') code = markdown.etree.SubElement(pre, 'code') block, theRest = self.detab(block) code.text = markdown.AtomicString('%s\n' % block.rstrip()) if theRest: # This block contained unindented line(s) after the first indented # line. Insert these lines as the first block of the master blocks # list for future processing. blocks.insert(0, theRest) class BlockQuoteProcessor(BlockProcessor): RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)') def test(self, parent, block): return bool(self.RE.search(block)) def run(self, parent, blocks): block = blocks.pop(0) m = self.RE.search(block) if m: before = block[:m.start()] # Lines before blockquote # Pass lines before blockquote in recursively for parsing forst. self.parser.parseBlocks(parent, [before]) # Remove ``> `` from begining of each line. block = '\n'.join([self.clean(line) for line in block[m.start():].split('\n')]) sibling = self.lastChild(parent) if sibling and sibling.tag == "blockquote": # Previous block was a blockquote so set that as this blocks parent quote = sibling else: # This is a new blockquote. Create a new parent element. quote = markdown.etree.SubElement(parent, 'blockquote') # Recursively parse block with blockquote as parent. self.parser.parseChunk(quote, block) def clean(self, line): """ Remove ``>`` from beginning of a line. """ m = self.RE.match(line) if line.strip() == ">": return "" elif m: return m.group(2) else: return line class OListProcessor(BlockProcessor): """ Process ordered list blocks. """ TAG = 'ol' # Detect an item (``1. item``). ``group(1)`` contains contents of item. RE = re.compile(r'^[ ]{0,3}\d+\.[ ](.*)') # Detect items on secondary lines. they can be of either list type. CHILD_RE = re.compile(r'^[ ]{0,3}((\d+\.)|[*+-])[ ](.*)') # Detect indented (nested) items of either type INDENT_RE = re.compile(r'^[ ]{4,7}((\d+\.)|[*+-])[ ].*') def test(self, parent, block): return bool(self.RE.match(block)) def run(self, parent, blocks): # Check fr multiple items in one block. items = self.get_items(blocks.pop(0)) sibling = self.lastChild(parent) if sibling and sibling.tag in ['ol', 'ul']: # Previous block was a list item, so set that as parent lst = sibling # make sure previous item is in a p. if len(lst) and lst[-1].text and not len(lst[-1]): p = markdown.etree.SubElement(lst[-1], 'p') p.text = lst[-1].text lst[-1].text = '' # parse first block differently as it gets wrapped in a p. li = markdown.etree.SubElement(lst, 'li') self.parser.state.set('looselist') firstitem = items.pop(0) self.parser.parseBlocks(li, [firstitem]) self.parser.state.reset() else: # This is a new list so create parent with appropriate tag. lst = markdown.etree.SubElement(parent, self.TAG) self.parser.state.set('list') # Loop through items in block, recursively parsing each with the # appropriate parent. for item in items: if item.startswith(' '*markdown.TAB_LENGTH): # Item is indented. Parse with last item as parent self.parser.parseBlocks(lst[-1], [item]) else: # New item. Create li and parse with it as parent li = markdown.etree.SubElement(lst, 'li') self.parser.parseBlocks(li, [item]) self.parser.state.reset() def get_items(self, block): """ Break a block into list items. """ items = [] for line in block.split('\n'): m = self.CHILD_RE.match(line) if m: # This is a new item. Append items.append(m.group(3)) elif self.INDENT_RE.match(line): # This is an indented (possibly nested) item. if items[-1].startswith(' '*markdown.TAB_LENGTH): # Previous item was indented. Append to that item. items[-1] = '%s\n%s' % (items[-1], line) else: items.append(line) else: # This is another line of previous item. Append to that item. items[-1] = '%s\n%s' % (items[-1], line) return items class UListProcessor(OListProcessor): """ Process unordered list blocks. """ TAG = 'ul' RE = re.compile(r'^[ ]{0,3}[*+-][ ](.*)') class HashHeaderProcessor(BlockProcessor): """ Process Hash Headers. """ # Detect a header at start of any line in block RE = re.compile(r'(^|\n)(?P<level>#{1,6})(?P<header>.*?)#*(\n|$)') def test(self, parent, block): return bool(self.RE.search(block)) def run(self, parent, blocks): block = blocks.pop(0) m = self.RE.search(block) if m: before = block[:m.start()] # All lines before header after = block[m.end():] # All lines after header if before: # As the header was not the first line of the block and the # lines before the header must be parsed first, # recursively parse this lines as a block. self.parser.parseBlocks(parent, [before]) # Create header using named groups from RE h = markdown.etree.SubElement(parent, 'h%d' % len(m.group('level'))) h.text = m.group('header').strip() if after: # Insert remaining lines as first block for future parsing. blocks.insert(0, after) else: # This should never happen, but just in case... message(CRITICAL, "We've got a problem header!") class SetextHeaderProcessor(BlockProcessor): """ Process Setext-style Headers. """ # Detect Setext-style header. Must be first 2 lines of block. RE = re.compile(r'^.*?\n[=-]{3,}', re.MULTILINE) def test(self, parent, block): return bool(self.RE.match(block)) def run(self, parent, blocks): lines = blocks.pop(0).split('\n') # Determine level. ``=`` is 1 and ``-`` is 2. if lines[1].startswith('='): level = 1 else: level = 2 h = markdown.etree.SubElement(parent, 'h%d' % level) h.text = lines[0].strip() if len(lines) > 2: # Block contains additional lines. Add to master blocks for later. blocks.insert(0, '\n'.join(lines[2:])) class HRProcessor(BlockProcessor): """ Process Horizontal Rules. """ RE = r'[ ]{0,3}(?P<ch>[*_-])[ ]?((?P=ch)[ ]?){2,}[ ]*' # Detect hr on any line of a block. SEARCH_RE = re.compile(r'(^|\n)%s(\n|$)' % RE) # Match a hr on a single line of text. MATCH_RE = re.compile(r'^%s$' % RE) def test(self, parent, block): return bool(self.SEARCH_RE.search(block)) def run(self, parent, blocks): lines = blocks.pop(0).split('\n') prelines = [] # Check for lines in block before hr. for line in lines: m = self.MATCH_RE.match(line) if m: break else: prelines.append(line) if len(prelines): # Recursively parse lines before hr so they get parsed first. self.parser.parseBlocks(parent, ['\n'.join(prelines)]) # create hr hr = markdown.etree.SubElement(parent, 'hr') # check for lines in block after hr. lines = lines[len(prelines)+1:] if len(lines): # Add lines after hr to master blocks for later parsing. blocks.insert(0, '\n'.join(lines)) class EmptyBlockProcessor(BlockProcessor): """ Process blocks and start with an empty line. """ # Detect a block that only contains whitespace # or only whitespace on the first line. RE = re.compile(r'^\s*\n') def test(self, parent, block): return bool(self.RE.match(block)) def run(self, parent, blocks): block = blocks.pop(0) m = self.RE.match(block) if m: # Add remaining line to master blocks for later. blocks.insert(0, block[m.end():]) sibling = self.lastChild(parent) if sibling and sibling.tag == 'pre' and sibling[0] and \ sibling[0].tag == 'code': # Last block is a codeblock. Append to preserve whitespace. sibling[0].text = markdown.AtomicString('%s/n/n/n' % sibling[0].text ) class ParagraphProcessor(BlockProcessor): """ Process Paragraph blocks. """ def test(self, parent, block): return True def run(self, parent, blocks): block = blocks.pop(0) if block.strip(): # Not a blank block. Add to parent, otherwise throw it away. if self.parser.state.isstate('list'): # The parent is a tight-list. Append to parent.text if parent.text: parent.text = '%s\n%s' % (parent.text, block) else: parent.text = block.lstrip() else: # Create a regular paragraph p = markdown.etree.SubElement(parent, 'p') p.text = block.lstrip()
apache-2.0
pigeonflight/strider-plone
docker/appengine/lib/django-1.4/django/conf/locale/hr/formats.py
99
1758
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j. E Y.' TIME_FORMAT = 'H:i:s' DATETIME_FORMAT = 'j. E Y. H:i' YEAR_MONTH_FORMAT = 'F Y.' MONTH_DAY_FORMAT = 'j. F' SHORT_DATE_FORMAT = 'j.m.Y.' SHORT_DATETIME_FORMAT = 'j.m.Y. H:i' FIRST_DAY_OF_WEEK = 1 # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = ( '%Y-%m-%d', # '2006-10-25' '%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.' '%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.' ) TIME_INPUT_FORMATS = ( '%H:%M:%S', # '14:30:59' '%H:%M', # '14:30' ) DATETIME_INPUT_FORMATS = ( '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%Y-%m-%d', # '2006-10-25' '%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59' '%d.%m.%Y. %H:%M', # '25.10.2006. 14:30' '%d.%m.%Y.', # '25.10.2006.' '%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59' '%d.%m.%y. %H:%M', # '25.10.06. 14:30' '%d.%m.%y.', # '25.10.06.' '%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59' '%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30' '%d. %m. %Y.', # '25. 10. 2006.' '%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59' '%d. %m. %y. %H:%M', # '25. 10. 06. 14:30' '%d. %m. %y.', # '25. 10. 06.' ) DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' NUMBER_GROUPING = 3
mit
moondrop-entertainment/django-nonrel-drawp
django/test/testcases.py
157
24882
import re import sys from urlparse import urlsplit, urlunsplit from xml.dom.minidom import parseString, Node from django.conf import settings from django.core import mail from django.core.management import call_command from django.core.signals import request_started from django.core.urlresolvers import clear_url_caches from django.db import (transaction, connection, connections, DEFAULT_DB_ALIAS, reset_queries) from django.http import QueryDict from django.test import _doctest as doctest from django.test.client import Client from django.test.utils import get_warnings_state, restore_warnings_state from django.utils import simplejson, unittest as ut2 from django.utils.encoding import smart_str from django.utils.functional import wraps __all__ = ('DocTestRunner', 'OutputChecker', 'TestCase', 'TransactionTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature') try: all except NameError: from django.utils.itercompat import all normalize_long_ints = lambda s: re.sub(r'(?<![\w])(\d+)L(?![\w])', '\\1', s) normalize_decimals = lambda s: re.sub(r"Decimal\('(\d+(\.\d*)?)'\)", lambda m: "Decimal(\"%s\")" % m.groups()[0], s) def to_list(value): """ Puts value into a list if it's not already one. Returns an empty list if value is None. """ if value is None: value = [] elif not isinstance(value, list): value = [value] return value real_commit = transaction.commit real_rollback = transaction.rollback real_enter_transaction_management = transaction.enter_transaction_management real_leave_transaction_management = transaction.leave_transaction_management real_managed = transaction.managed def nop(*args, **kwargs): return def disable_transaction_methods(): transaction.commit = nop transaction.rollback = nop transaction.enter_transaction_management = nop transaction.leave_transaction_management = nop transaction.managed = nop def restore_transaction_methods(): transaction.commit = real_commit transaction.rollback = real_rollback transaction.enter_transaction_management = real_enter_transaction_management transaction.leave_transaction_management = real_leave_transaction_management transaction.managed = real_managed class OutputChecker(doctest.OutputChecker): def check_output(self, want, got, optionflags): "The entry method for doctest output checking. Defers to a sequence of child checkers" checks = (self.check_output_default, self.check_output_numeric, self.check_output_xml, self.check_output_json) for check in checks: if check(want, got, optionflags): return True return False def check_output_default(self, want, got, optionflags): "The default comparator provided by doctest - not perfect, but good for most purposes" return doctest.OutputChecker.check_output(self, want, got, optionflags) def check_output_numeric(self, want, got, optionflags): """Doctest does an exact string comparison of output, which means that some numerically equivalent values aren't equal. This check normalizes * long integers (22L) so that they equal normal integers. (22) * Decimals so that they are comparable, regardless of the change made to __repr__ in Python 2.6. """ return doctest.OutputChecker.check_output(self, normalize_decimals(normalize_long_ints(want)), normalize_decimals(normalize_long_ints(got)), optionflags) def check_output_xml(self, want, got, optionsflags): """Tries to do a 'xml-comparision' of want and got. Plain string comparision doesn't always work because, for example, attribute ordering should not be important. Based on http://codespeak.net/svn/lxml/trunk/src/lxml/doctestcompare.py """ _norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+') def norm_whitespace(v): return _norm_whitespace_re.sub(' ', v) def child_text(element): return ''.join([c.data for c in element.childNodes if c.nodeType == Node.TEXT_NODE]) def children(element): return [c for c in element.childNodes if c.nodeType == Node.ELEMENT_NODE] def norm_child_text(element): return norm_whitespace(child_text(element)) def attrs_dict(element): return dict(element.attributes.items()) def check_element(want_element, got_element): if want_element.tagName != got_element.tagName: return False if norm_child_text(want_element) != norm_child_text(got_element): return False if attrs_dict(want_element) != attrs_dict(got_element): return False want_children = children(want_element) got_children = children(got_element) if len(want_children) != len(got_children): return False for want, got in zip(want_children, got_children): if not check_element(want, got): return False return True want, got = self._strip_quotes(want, got) want = want.replace('\\n','\n') got = got.replace('\\n','\n') # If the string is not a complete xml document, we may need to add a # root element. This allow us to compare fragments, like "<foo/><bar/>" if not want.startswith('<?xml'): wrapper = '<root>%s</root>' want = wrapper % want got = wrapper % got # Parse the want and got strings, and compare the parsings. try: want_root = parseString(want).firstChild got_root = parseString(got).firstChild except: return False return check_element(want_root, got_root) def check_output_json(self, want, got, optionsflags): "Tries to compare want and got as if they were JSON-encoded data" want, got = self._strip_quotes(want, got) try: want_json = simplejson.loads(want) got_json = simplejson.loads(got) except: return False return want_json == got_json def _strip_quotes(self, want, got): """ Strip quotes of doctests output values: >>> o = OutputChecker() >>> o._strip_quotes("'foo'") "foo" >>> o._strip_quotes('"foo"') "foo" >>> o._strip_quotes("u'foo'") "foo" >>> o._strip_quotes('u"foo"') "foo" """ def is_quoted_string(s): s = s.strip() return (len(s) >= 2 and s[0] == s[-1] and s[0] in ('"', "'")) def is_quoted_unicode(s): s = s.strip() return (len(s) >= 3 and s[0] == 'u' and s[1] == s[-1] and s[1] in ('"', "'")) if is_quoted_string(want) and is_quoted_string(got): want = want.strip()[1:-1] got = got.strip()[1:-1] elif is_quoted_unicode(want) and is_quoted_unicode(got): want = want.strip()[2:-1] got = got.strip()[2:-1] return want, got class DocTestRunner(doctest.DocTestRunner): def __init__(self, *args, **kwargs): doctest.DocTestRunner.__init__(self, *args, **kwargs) self.optionflags = doctest.ELLIPSIS def report_unexpected_exception(self, out, test, example, exc_info): doctest.DocTestRunner.report_unexpected_exception(self, out, test, example, exc_info) # Rollback, in case of database errors. Otherwise they'd have # side effects on other tests. for conn in connections: transaction.rollback_unless_managed(using=conn) class _AssertNumQueriesContext(object): def __init__(self, test_case, num, connection): self.test_case = test_case self.num = num self.connection = connection def __enter__(self): self.old_debug_cursor = self.connection.use_debug_cursor self.connection.use_debug_cursor = True self.starting_queries = len(self.connection.queries) request_started.disconnect(reset_queries) return self def __exit__(self, exc_type, exc_value, traceback): self.connection.use_debug_cursor = self.old_debug_cursor request_started.connect(reset_queries) if exc_type is not None: return final_queries = len(self.connection.queries) executed = final_queries - self.starting_queries self.test_case.assertEqual( executed, self.num, "%d queries executed, %d expected" % ( executed, self.num ) ) class TransactionTestCase(ut2.TestCase): # The class we'll use for the test client self.client. # Can be overridden in derived classes. client_class = Client def _pre_setup(self): """Performs any pre-test setup. This includes: * Flushing the database. * If the Test Case class has a 'fixtures' member, installing the named fixtures. * If the Test Case class has a 'urls' member, replace the ROOT_URLCONF with it. * Clearing the mail test outbox. """ self._fixture_setup() self._urlconf_setup() mail.outbox = [] def _fixture_setup(self): # If the test case has a multi_db=True flag, flush all databases. # Otherwise, just flush default. if getattr(self, 'multi_db', False): databases = connections else: databases = [DEFAULT_DB_ALIAS] for db in databases: call_command('flush', verbosity=0, interactive=False, database=db) if hasattr(self, 'fixtures'): # We have to use this slightly awkward syntax due to the fact # that we're using *args and **kwargs together. call_command('loaddata', *self.fixtures, **{'verbosity': 0, 'database': db}) def _urlconf_setup(self): if hasattr(self, 'urls'): self._old_root_urlconf = settings.ROOT_URLCONF settings.ROOT_URLCONF = self.urls clear_url_caches() def __call__(self, result=None): """ Wrapper around default __call__ method to perform common Django test set up. This means that user-defined Test Cases aren't required to include a call to super().setUp(). """ self.client = self.client_class() try: self._pre_setup() except (KeyboardInterrupt, SystemExit): raise except Exception: import sys result.addError(self, sys.exc_info()) return super(TransactionTestCase, self).__call__(result) try: self._post_teardown() except (KeyboardInterrupt, SystemExit): raise except Exception: import sys result.addError(self, sys.exc_info()) return def _post_teardown(self): """ Performs any post-test things. This includes: * Putting back the original ROOT_URLCONF if it was changed. * Force closing the connection, so that the next test gets a clean cursor. """ self._fixture_teardown() self._urlconf_teardown() # Some DB cursors include SQL statements as part of cursor # creation. If you have a test that does rollback, the effect # of these statements is lost, which can effect the operation # of tests (e.g., losing a timezone setting causing objects to # be created with the wrong time). # To make sure this doesn't happen, get a clean connection at the # start of every test. for connection in connections.all(): connection.close() def _fixture_teardown(self): pass def _urlconf_teardown(self): if hasattr(self, '_old_root_urlconf'): settings.ROOT_URLCONF = self._old_root_urlconf clear_url_caches() def save_warnings_state(self): """ Saves the state of the warnings module """ self._warnings_state = get_warnings_state() def restore_warnings_state(self): """ Restores the sate of the warnings module to the state saved by save_warnings_state() """ restore_warnings_state(self._warnings_state) def assertRedirects(self, response, expected_url, status_code=302, target_status_code=200, host=None, msg_prefix=''): """Asserts that a response redirected to a specific URL, and that the redirect URL can be loaded. Note that assertRedirects won't work for external links since it uses TestClient to do a request. """ if msg_prefix: msg_prefix += ": " if hasattr(response, 'redirect_chain'): # The request was a followed redirect self.assertTrue(len(response.redirect_chain) > 0, msg_prefix + "Response didn't redirect as expected: Response" " code was %d (expected %d)" % (response.status_code, status_code)) self.assertEqual(response.redirect_chain[0][1], status_code, msg_prefix + "Initial response didn't redirect as expected:" " Response code was %d (expected %d)" % (response.redirect_chain[0][1], status_code)) url, status_code = response.redirect_chain[-1] self.assertEqual(response.status_code, target_status_code, msg_prefix + "Response didn't redirect as expected: Final" " Response code was %d (expected %d)" % (response.status_code, target_status_code)) else: # Not a followed redirect self.assertEqual(response.status_code, status_code, msg_prefix + "Response didn't redirect as expected: Response" " code was %d (expected %d)" % (response.status_code, status_code)) url = response['Location'] scheme, netloc, path, query, fragment = urlsplit(url) redirect_response = response.client.get(path, QueryDict(query)) # Get the redirection page, using the same client that was used # to obtain the original response. self.assertEqual(redirect_response.status_code, target_status_code, msg_prefix + "Couldn't retrieve redirection page '%s':" " response code was %d (expected %d)" % (path, redirect_response.status_code, target_status_code)) e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected_url) if not (e_scheme or e_netloc): expected_url = urlunsplit(('http', host or 'testserver', e_path, e_query, e_fragment)) self.assertEqual(url, expected_url, msg_prefix + "Response redirected to '%s', expected '%s'" % (url, expected_url)) def assertContains(self, response, text, count=None, status_code=200, msg_prefix=''): """ Asserts that a response indicates that some content was retrieved successfully, (i.e., the HTTP status code was as expected), and that ``text`` occurs ``count`` times in the content of the response. If ``count`` is None, the count doesn't matter - the assertion is true if the text occurs at least once in the response. """ if msg_prefix: msg_prefix += ": " self.assertEqual(response.status_code, status_code, msg_prefix + "Couldn't retrieve content: Response code was %d" " (expected %d)" % (response.status_code, status_code)) text = smart_str(text, response._charset) real_count = response.content.count(text) if count is not None: self.assertEqual(real_count, count, msg_prefix + "Found %d instances of '%s' in response" " (expected %d)" % (real_count, text, count)) else: self.assertTrue(real_count != 0, msg_prefix + "Couldn't find '%s' in response" % text) def assertNotContains(self, response, text, status_code=200, msg_prefix=''): """ Asserts that a response indicates that some content was retrieved successfully, (i.e., the HTTP status code was as expected), and that ``text`` doesn't occurs in the content of the response. """ if msg_prefix: msg_prefix += ": " self.assertEqual(response.status_code, status_code, msg_prefix + "Couldn't retrieve content: Response code was %d" " (expected %d)" % (response.status_code, status_code)) text = smart_str(text, response._charset) self.assertEqual(response.content.count(text), 0, msg_prefix + "Response should not contain '%s'" % text) def assertFormError(self, response, form, field, errors, msg_prefix=''): """ Asserts that a form used to render the response has a specific field error. """ if msg_prefix: msg_prefix += ": " # Put context(s) into a list to simplify processing. contexts = to_list(response.context) if not contexts: self.fail(msg_prefix + "Response did not use any contexts to " "render the response") # Put error(s) into a list to simplify processing. errors = to_list(errors) # Search all contexts for the error. found_form = False for i,context in enumerate(contexts): if form not in context: continue found_form = True for err in errors: if field: if field in context[form].errors: field_errors = context[form].errors[field] self.assertTrue(err in field_errors, msg_prefix + "The field '%s' on form '%s' in" " context %d does not contain the error '%s'" " (actual errors: %s)" % (field, form, i, err, repr(field_errors))) elif field in context[form].fields: self.fail(msg_prefix + "The field '%s' on form '%s'" " in context %d contains no errors" % (field, form, i)) else: self.fail(msg_prefix + "The form '%s' in context %d" " does not contain the field '%s'" % (form, i, field)) else: non_field_errors = context[form].non_field_errors() self.assertTrue(err in non_field_errors, msg_prefix + "The form '%s' in context %d does not" " contain the non-field error '%s'" " (actual errors: %s)" % (form, i, err, non_field_errors)) if not found_form: self.fail(msg_prefix + "The form '%s' was not used to render the" " response" % form) def assertTemplateUsed(self, response, template_name, msg_prefix=''): """ Asserts that the template with the provided name was used in rendering the response. """ if msg_prefix: msg_prefix += ": " template_names = [t.name for t in response.templates] if not template_names: self.fail(msg_prefix + "No templates used to render the response") self.assertTrue(template_name in template_names, msg_prefix + "Template '%s' was not a template used to render" " the response. Actual template(s) used: %s" % (template_name, u', '.join(template_names))) def assertTemplateNotUsed(self, response, template_name, msg_prefix=''): """ Asserts that the template with the provided name was NOT used in rendering the response. """ if msg_prefix: msg_prefix += ": " template_names = [t.name for t in response.templates] self.assertFalse(template_name in template_names, msg_prefix + "Template '%s' was used unexpectedly in rendering" " the response" % template_name) def assertQuerysetEqual(self, qs, values, transform=repr): return self.assertEqual(map(transform, qs), values) def assertNumQueries(self, num, func=None, *args, **kwargs): using = kwargs.pop("using", DEFAULT_DB_ALIAS) connection = connections[using] context = _AssertNumQueriesContext(self, num, connection) if func is None: return context # Basically emulate the `with` statement here. context.__enter__() try: func(*args, **kwargs) except: context.__exit__(*sys.exc_info()) raise else: context.__exit__(*sys.exc_info()) def connections_support_transactions(): """ Returns True if all connections support transactions. This is messy because 2.4 doesn't support any or all. """ return all(conn.features.supports_transactions for conn in connections.all()) class TestCase(TransactionTestCase): """ Does basically the same as TransactionTestCase, but surrounds every test with a transaction, monkey-patches the real transaction management routines to do nothing, and rollsback the test transaction at the end of the test. You have to use TransactionTestCase, if you need transaction management inside a test. """ def _fixture_setup(self): if not connections_support_transactions(): return super(TestCase, self)._fixture_setup() # If the test case has a multi_db=True flag, setup all databases. # Otherwise, just use default. if getattr(self, 'multi_db', False): databases = connections else: databases = [DEFAULT_DB_ALIAS] for db in databases: transaction.enter_transaction_management(using=db) transaction.managed(True, using=db) disable_transaction_methods() from django.contrib.sites.models import Site Site.objects.clear_cache() for db in databases: if hasattr(self, 'fixtures'): call_command('loaddata', *self.fixtures, **{ 'verbosity': 0, 'commit': False, 'database': db }) def _fixture_teardown(self): if not connections_support_transactions(): return super(TestCase, self)._fixture_teardown() # If the test case has a multi_db=True flag, teardown all databases. # Otherwise, just teardown default. if getattr(self, 'multi_db', False): databases = connections else: databases = [DEFAULT_DB_ALIAS] restore_transaction_methods() for db in databases: transaction.rollback(using=db) transaction.leave_transaction_management(using=db) def _deferredSkip(condition, reason): def decorator(test_func): if not (isinstance(test_func, type) and issubclass(test_func, TestCase)): @wraps(test_func) def skip_wrapper(*args, **kwargs): if condition(): raise ut2.SkipTest(reason) return test_func(*args, **kwargs) test_item = skip_wrapper else: test_item = test_func test_item.__unittest_skip_why__ = reason return test_item return decorator def skipIfDBFeature(feature): "Skip a test if a database has the named feature" return _deferredSkip(lambda: getattr(connection.features, feature), "Database has feature %s" % feature) def skipUnlessDBFeature(feature): "Skip a test unless a database has the named feature" return _deferredSkip(lambda: not getattr(connection.features, feature), "Database doesn't support feature %s" % feature)
bsd-3-clause
fighterCui/L4ReFiascoOC
l4/pkg/python/contrib/Lib/test/test_uu.py
61
5316
""" Tests for uu module. Nick Mathewson """ import unittest from test import test_support import sys, os, uu, cStringIO import uu plaintext = "The smooth-scaled python crept over the sleeping dog\n" encodedtext = """\ M5&AE('-M;V]T:\"US8V%L960@<'ET:&]N(&-R97!T(&]V97(@=&AE('-L965P (:6YG(&1O9PH """ encodedtextwrapped = "begin %03o %s\n" + encodedtext.replace("%", "%%") + "\n \nend\n" class UUTest(unittest.TestCase): def test_encode(self): inp = cStringIO.StringIO(plaintext) out = cStringIO.StringIO() uu.encode(inp, out, "t1") self.assertEqual(out.getvalue(), encodedtextwrapped % (0666, "t1")) inp = cStringIO.StringIO(plaintext) out = cStringIO.StringIO() uu.encode(inp, out, "t1", 0644) self.assertEqual(out.getvalue(), encodedtextwrapped % (0644, "t1")) def test_decode(self): inp = cStringIO.StringIO(encodedtextwrapped % (0666, "t1")) out = cStringIO.StringIO() uu.decode(inp, out) self.assertEqual(out.getvalue(), plaintext) inp = cStringIO.StringIO( "UUencoded files may contain many lines,\n" + "even some that have 'begin' in them.\n" + encodedtextwrapped % (0666, "t1") ) out = cStringIO.StringIO() uu.decode(inp, out) self.assertEqual(out.getvalue(), plaintext) def test_truncatedinput(self): inp = cStringIO.StringIO("begin 644 t1\n" + encodedtext) out = cStringIO.StringIO() try: uu.decode(inp, out) self.fail("No exception thrown") except uu.Error, e: self.assertEqual(str(e), "Truncated input file") def test_missingbegin(self): inp = cStringIO.StringIO("") out = cStringIO.StringIO() try: uu.decode(inp, out) self.fail("No exception thrown") except uu.Error, e: self.assertEqual(str(e), "No valid begin line found in input file") class UUStdIOTest(unittest.TestCase): def setUp(self): self.stdin = sys.stdin self.stdout = sys.stdout def tearDown(self): sys.stdin = self.stdin sys.stdout = self.stdout def test_encode(self): sys.stdin = cStringIO.StringIO(plaintext) sys.stdout = cStringIO.StringIO() uu.encode("-", "-", "t1", 0666) self.assertEqual( sys.stdout.getvalue(), encodedtextwrapped % (0666, "t1") ) def test_decode(self): sys.stdin = cStringIO.StringIO(encodedtextwrapped % (0666, "t1")) sys.stdout = cStringIO.StringIO() uu.decode("-", "-") self.assertEqual(sys.stdout.getvalue(), plaintext) class UUFileTest(unittest.TestCase): def _kill(self, f): # close and remove file try: f.close() except (SystemExit, KeyboardInterrupt): raise except: pass try: os.unlink(f.name) except (SystemExit, KeyboardInterrupt): raise except: pass def setUp(self): self.tmpin = test_support.TESTFN + "i" self.tmpout = test_support.TESTFN + "o" def tearDown(self): del self.tmpin del self.tmpout def test_encode(self): fin = fout = None try: test_support.unlink(self.tmpin) fin = open(self.tmpin, 'wb') fin.write(plaintext) fin.close() fin = open(self.tmpin, 'rb') fout = open(self.tmpout, 'w') uu.encode(fin, fout, self.tmpin, mode=0644) fin.close() fout.close() fout = open(self.tmpout, 'r') s = fout.read() fout.close() self.assertEqual(s, encodedtextwrapped % (0644, self.tmpin)) # in_file and out_file as filenames uu.encode(self.tmpin, self.tmpout, self.tmpin, mode=0644) fout = open(self.tmpout, 'r') s = fout.read() fout.close() self.assertEqual(s, encodedtextwrapped % (0644, self.tmpin)) finally: self._kill(fin) self._kill(fout) def test_decode(self): f = None try: test_support.unlink(self.tmpin) f = open(self.tmpin, 'w') f.write(encodedtextwrapped % (0644, self.tmpout)) f.close() f = open(self.tmpin, 'r') uu.decode(f) f.close() f = open(self.tmpout, 'r') s = f.read() f.close() self.assertEqual(s, plaintext) # XXX is there an xp way to verify the mode? finally: self._kill(f) def test_decodetwice(self): # Verify that decode() will refuse to overwrite an existing file f = None try: f = cStringIO.StringIO(encodedtextwrapped % (0644, self.tmpout)) f = open(self.tmpin, 'r') uu.decode(f) f.close() f = open(self.tmpin, 'r') self.assertRaises(uu.Error, uu.decode, f) f.close() finally: self._kill(f) def test_main(): test_support.run_unittest(UUTest, UUStdIOTest, UUFileTest) if __name__=="__main__": test_main()
gpl-2.0
GreenCoinX/greencoin
qa/rpc-tests/util.py
1
12392
# Copyright (c) 2014 The GreenCoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Helpful routines for regression testing # # Add python-greencoinrpc to module search path: import os import sys sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-greencoinrpc")) from decimal import Decimal, ROUND_DOWN import json import random import shutil import subprocess import time import re from greencoinrpc.authproxy import AuthServiceProxy, JSONRPCException from util import * def p2p_port(n): return 11000 + n + os.getpid()%999 def rpc_port(n): return 12000 + n + os.getpid()%999 def check_json_precision(): """Make sure json library being used does not lose precision converting XGC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n)))*1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def sync_blocks(rpc_connections): """ Wait until everybody has the same block count """ while True: counts = [ x.getblockcount() for x in rpc_connections ] if counts == [ counts[0] ]*len(counts): break time.sleep(1) def sync_mempools(rpc_connections): """ Wait until everybody has the same transactions in their memory pools """ while True: pool = set(rpc_connections[0].getrawmempool()) num_match = 1 for i in range(1, len(rpc_connections)): if set(rpc_connections[i].getrawmempool()) == pool: num_match = num_match+1 if num_match == len(rpc_connections): break time.sleep(1) greencoind_processes = {} def initialize_datadir(dirname, n): datadir = os.path.join(dirname, "node"+str(n)) if not os.path.isdir(datadir): os.makedirs(datadir) with open(os.path.join(datadir, "greencoin.conf"), 'w') as f: f.write("regtest=1\n"); f.write("rpcuser=rt\n"); f.write("rpcpassword=rt\n"); f.write("port="+str(p2p_port(n))+"\n"); f.write("rpcport="+str(rpc_port(n))+"\n"); return datadir def initialize_chain(test_dir): """ Create (or copy from cache) a 200-block-long chain and 4 wallets. greencoind and greencoin-cli must be in search path. """ if not os.path.isdir(os.path.join("cache", "node0")): devnull = open("/dev/null", "w+") # Create cache directories, run greencoinds: for i in range(4): datadir=initialize_datadir("cache", i) args = [ os.getenv("GREENCOIND", "greencoind"), "-keypool=1", "-datadir="+datadir, "-discover=0" ] if i > 0: args.append("-connect=127.0.0.1:"+str(p2p_port(0))) greencoind_processes[i] = subprocess.Popen(args) subprocess.check_call([ os.getenv("GREENCOINCLI", "greencoin-cli"), "-datadir="+datadir, "-rpcwait", "getblockcount"], stdout=devnull) devnull.close() rpcs = [] for i in range(4): try: url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),) rpcs.append(AuthServiceProxy(url)) except: sys.stderr.write("Error connecting to "+url+"\n") sys.exit(1) # Create a 200-block-long chain; each of the 4 nodes # gets 25 mature blocks and 25 immature. # blocks are created with timestamps 10 minutes apart, starting # at 1 Jan 2014 block_time = 1388534400 for i in range(2): for peer in range(4): for j in range(25): set_node_times(rpcs, block_time) rpcs[peer].setgenerate(True, 1) block_time += 10*60 # Must sync before next peer starts generating blocks sync_blocks(rpcs) # Shut them down, and clean up cache directories: stop_nodes(rpcs) wait_greencoinds() for i in range(4): os.remove(log_filename("cache", i, "debug.log")) os.remove(log_filename("cache", i, "db.log")) os.remove(log_filename("cache", i, "peers.dat")) os.remove(log_filename("cache", i, "fee_estimates.dat")) for i in range(4): from_dir = os.path.join("cache", "node"+str(i)) to_dir = os.path.join(test_dir, "node"+str(i)) shutil.copytree(from_dir, to_dir) initialize_datadir(test_dir, i) # Overwrite port/rpcport in greencoin.conf def initialize_chain_clean(test_dir, num_nodes): """ Create an empty blockchain and num_nodes wallets. Useful if a test case wants complete control over initialization. """ for i in range(num_nodes): datadir=initialize_datadir(test_dir, i) def _rpchost_to_args(rpchost): '''Convert optional IP:port spec to rpcconnect/rpcport args''' if rpchost is None: return [] match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost) if not match: raise ValueError('Invalid RPC host spec ' + rpchost) rpcconnect = match.group(1) rpcport = match.group(2) if rpcconnect.startswith('['): # remove IPv6 [...] wrapping rpcconnect = rpcconnect[1:-1] rv = ['-rpcconnect=' + rpcconnect] if rpcport: rv += ['-rpcport=' + rpcport] return rv def start_node(i, dirname, extra_args=None, rpchost=None): """ Start a greencoind and return RPC connection to it """ datadir = os.path.join(dirname, "node"+str(i)) args = [ os.getenv("GREENCOIND", "greencoind"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ] if extra_args is not None: args.extend(extra_args) greencoind_processes[i] = subprocess.Popen(args) devnull = open("/dev/null", "w+") subprocess.check_call([ os.getenv("GREENCOINCLI", "greencoin-cli"), "-datadir="+datadir] + _rpchost_to_args(rpchost) + ["-rpcwait", "getblockcount"], stdout=devnull) devnull.close() url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i)) proxy = AuthServiceProxy(url) proxy.url = url # store URL on proxy for info return proxy def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None): """ Start multiple greencoinds, return RPC connections to them """ if extra_args is None: extra_args = [ None for i in range(num_nodes) ] return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ] def log_filename(dirname, n_node, logname): return os.path.join(dirname, "node"+str(n_node), "regtest", logname) def stop_node(node, i): node.stop() greencoind_processes[i].wait() del greencoind_processes[i] def stop_nodes(nodes): for node in nodes: node.stop() del nodes[:] # Emptying array closes connections as a side effect def set_node_times(nodes, t): for node in nodes: node.setmocktime(t) def wait_greencoinds(): # Wait for all greencoinds to cleanly exit for greencoind in greencoind_processes.values(): greencoind.wait() greencoind_processes.clear() def connect_nodes(from_connection, node_num): ip_port = "127.0.0.1:"+str(p2p_port(node_num)) from_connection.addnode(ip_port, "onetry") # poll until version handshake complete to avoid race conditions # with transaction relaying while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()): time.sleep(0.1) def connect_nodes_bi(nodes, a, b): connect_nodes(nodes[a], b) connect_nodes(nodes[b], a) def find_output(node, txid, amount): """ Return index to output of txid with value amount Raises exception if there is none. """ txdata = node.getrawtransaction(txid, 1) for i in range(len(txdata["vout"])): if txdata["vout"][i]["value"] == amount: return i raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount))) def gather_inputs(from_node, amount_needed, confirmations_required=1): """ Return a random set of unspent txouts that are enough to pay amount_needed """ assert(confirmations_required >=0) utxo = from_node.listunspent(confirmations_required) random.shuffle(utxo) inputs = [] total_in = Decimal("0.00000000") while total_in < amount_needed and len(utxo) > 0: t = utxo.pop() total_in += t["amount"] inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } ) if total_in < amount_needed: raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in)) return (total_in, inputs) def make_change(from_node, amount_in, amount_out, fee): """ Create change output(s), return them """ outputs = {} amount = amount_out+fee change = amount_in - amount if change > amount*2: # Create an extra change output to break up big inputs change_address = from_node.getnewaddress() # Split change in two, being careful of rounding: outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) change = amount_in - amount - outputs[change_address] if change > 0: outputs[from_node.getnewaddress()] = change return outputs def send_zeropri_transaction(from_node, to_node, amount, fee): """ Create&broadcast a zero-priority transaction. Returns (txid, hex-encoded-txdata) Ensures transaction is zero-priority by first creating a send-to-self, then using it's output """ # Create a send-to-self with confirmed inputs: self_address = from_node.getnewaddress() (total_in, inputs) = gather_inputs(from_node, amount+fee*2) outputs = make_change(from_node, total_in, amount+fee, fee) outputs[self_address] = float(amount+fee) self_rawtx = from_node.createrawtransaction(inputs, outputs) self_signresult = from_node.signrawtransaction(self_rawtx) self_txid = from_node.sendrawtransaction(self_signresult["hex"], True) vout = find_output(from_node, self_txid, amount+fee) # Now immediately spend the output to create a 1-input, 1-output # zero-priority transaction: inputs = [ { "txid" : self_txid, "vout" : vout } ] outputs = { to_node.getnewaddress() : float(amount) } rawtx = from_node.createrawtransaction(inputs, outputs) signresult = from_node.signrawtransaction(rawtx) txid = from_node.sendrawtransaction(signresult["hex"], True) return (txid, signresult["hex"]) def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants): """ Create a random zero-priority transaction. Returns (txid, hex-encoded-transaction-data, fee) """ from_node = random.choice(nodes) to_node = random.choice(nodes) fee = min_fee + fee_increment*random.randint(0,fee_variants) (txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee) return (txid, txhex, fee) def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants): """ Create a random transaction. Returns (txid, hex-encoded-transaction-data, fee) """ from_node = random.choice(nodes) to_node = random.choice(nodes) fee = min_fee + fee_increment*random.randint(0,fee_variants) (total_in, inputs) = gather_inputs(from_node, amount+fee) outputs = make_change(from_node, total_in, amount, fee) outputs[to_node.getnewaddress()] = float(amount) rawtx = from_node.createrawtransaction(inputs, outputs) signresult = from_node.signrawtransaction(rawtx) txid = from_node.sendrawtransaction(signresult["hex"], True) return (txid, signresult["hex"], fee) def assert_equal(thing1, thing2): if thing1 != thing2: raise AssertionError("%s != %s"%(str(thing1),str(thing2))) def assert_greater_than(thing1, thing2): if thing1 <= thing2: raise AssertionError("%s <= %s"%(str(thing1),str(thing2))) def assert_raises(exc, fun, *args, **kwds): try: fun(*args, **kwds) except exc: pass except Exception as e: raise AssertionError("Unexpected exception raised: "+type(e).__name__) else: raise AssertionError("No exception raised")
mit
qizenguf/MLC-STT
ext/ply/doc/makedoc.py
177
5862
#!/usr/local/bin/python ############################################################################### # Takes a chapter as input and adds internal links and numbering to all # of the H1, H2, H3, H4 and H5 sections. # # Every heading HTML tag (H1, H2 etc) is given an autogenerated name to link # to. However, if the name is not an autogenerated name from a previous run, # it will be kept. If it is autogenerated, it might change on subsequent runs # of this program. Thus if you want to create links to one of the headings, # then change the heading link name to something that does not look like an # autogenerated link name. ############################################################################### import sys import re import string ############################################################################### # Functions ############################################################################### # Regexs for <a name="..."></a> alink = re.compile(r"<a *name *= *\"(.*)\"></a>", re.IGNORECASE) heading = re.compile(r"(_nn\d)", re.IGNORECASE) def getheadingname(m): autogeneratedheading = True; if m.group(1) != None: amatch = alink.match(m.group(1)) if amatch: # A non-autogenerated heading - keep it headingname = amatch.group(1) autogeneratedheading = heading.match(headingname) if autogeneratedheading: # The heading name was either non-existent or autogenerated, # We can create a new heading / change the existing heading headingname = "%s_nn%d" % (filenamebase, nameindex) return headingname ############################################################################### # Main program ############################################################################### if len(sys.argv) != 2: print "usage: makedoc.py filename" sys.exit(1) filename = sys.argv[1] filenamebase = string.split(filename,".")[0] section = 0 subsection = 0 subsubsection = 0 subsubsubsection = 0 nameindex = 0 name = "" # Regexs for <h1>,... <h5> sections h1 = re.compile(r".*?<H1>(<a.*a>)*[\d\.\s]*(.*?)</H1>", re.IGNORECASE) h2 = re.compile(r".*?<H2>(<a.*a>)*[\d\.\s]*(.*?)</H2>", re.IGNORECASE) h3 = re.compile(r".*?<H3>(<a.*a>)*[\d\.\s]*(.*?)</H3>", re.IGNORECASE) h4 = re.compile(r".*?<H4>(<a.*a>)*[\d\.\s]*(.*?)</H4>", re.IGNORECASE) h5 = re.compile(r".*?<H5>(<a.*a>)*[\d\.\s]*(.*?)</H5>", re.IGNORECASE) data = open(filename).read() # Read data open(filename+".bak","w").write(data) # Make backup lines = data.splitlines() result = [ ] # This is the result of postprocessing the file index = "<!-- INDEX -->\n<div class=\"sectiontoc\">\n" # index contains the index for adding at the top of the file. Also printed to stdout. skip = 0 skipspace = 0 for s in lines: if s == "<!-- INDEX -->": if not skip: result.append("@INDEX@") skip = 1 else: skip = 0 continue; if skip: continue if not s and skipspace: continue if skipspace: result.append("") result.append("") skipspace = 0 m = h2.match(s) if m: prevheadingtext = m.group(2) nameindex += 1 section += 1 headingname = getheadingname(m) result.append("""<H2><a name="%s"></a>%d. %s</H2>""" % (headingname,section, prevheadingtext)) if subsubsubsection: index += "</ul>\n" if subsubsection: index += "</ul>\n" if subsection: index += "</ul>\n" if section == 1: index += "<ul>\n" index += """<li><a href="#%s">%s</a>\n""" % (headingname,prevheadingtext) subsection = 0 subsubsection = 0 subsubsubsection = 0 skipspace = 1 continue m = h3.match(s) if m: prevheadingtext = m.group(2) nameindex += 1 subsection += 1 headingname = getheadingname(m) result.append("""<H3><a name="%s"></a>%d.%d %s</H3>""" % (headingname,section, subsection, prevheadingtext)) if subsubsubsection: index += "</ul>\n" if subsubsection: index += "</ul>\n" if subsection == 1: index += "<ul>\n" index += """<li><a href="#%s">%s</a>\n""" % (headingname,prevheadingtext) subsubsection = 0 skipspace = 1 continue m = h4.match(s) if m: prevheadingtext = m.group(2) nameindex += 1 subsubsection += 1 subsubsubsection = 0 headingname = getheadingname(m) result.append("""<H4><a name="%s"></a>%d.%d.%d %s</H4>""" % (headingname,section, subsection, subsubsection, prevheadingtext)) if subsubsubsection: index += "</ul>\n" if subsubsection == 1: index += "<ul>\n" index += """<li><a href="#%s">%s</a>\n""" % (headingname,prevheadingtext) skipspace = 1 continue m = h5.match(s) if m: prevheadingtext = m.group(2) nameindex += 1 subsubsubsection += 1 headingname = getheadingname(m) result.append("""<H5><a name="%s"></a>%d.%d.%d.%d %s</H5>""" % (headingname,section, subsection, subsubsection, subsubsubsection, prevheadingtext)) if subsubsubsection == 1: index += "<ul>\n" index += """<li><a href="#%s">%s</a>\n""" % (headingname,prevheadingtext) skipspace = 1 continue result.append(s) if subsubsubsection: index += "</ul>\n" if subsubsection: index += "</ul>\n" if subsection: index += "</ul>\n" if section: index += "</ul>\n" index += "</div>\n<!-- INDEX -->\n" data = "\n".join(result) data = data.replace("@INDEX@",index) + "\n"; # Write the file back out open(filename,"w").write(data)
bsd-3-clause
mitnk/letsencrypt
letsencrypt/display/util.py
2
20691
"""Let's Encrypt display.""" import os import textwrap import dialog import zope.interface from letsencrypt import interfaces from letsencrypt import errors from letsencrypt.display import completer WIDTH = 72 HEIGHT = 20 DSELECT_HELP = ( "Use the arrow keys or Tab to move between window elements. Space can be " "used to complete the input path with the selected element in the " "directory window. Pressing enter will select the currently highlighted " "button.") """Help text on how to use dialog's dselect.""" # Display exit codes OK = "ok" """Display exit code indicating user acceptance.""" CANCEL = "cancel" """Display exit code for a user canceling the display.""" HELP = "help" """Display exit code when for when the user requests more help.""" def _wrap_lines(msg): """Format lines nicely to 80 chars. :param str msg: Original message :returns: Formatted message respecting newlines in message :rtype: str """ lines = msg.splitlines() fixed_l = [] for line in lines: fixed_l.append(textwrap.fill(line, 80)) return os.linesep.join(fixed_l) @zope.interface.implementer(interfaces.IDisplay) class NcursesDisplay(object): """Ncurses-based display.""" def __init__(self, width=WIDTH, height=HEIGHT): super(NcursesDisplay, self).__init__() self.dialog = dialog.Dialog() self.width = width self.height = height def notification(self, message, height=10, pause=False): # pylint: disable=unused-argument """Display a notification to the user and wait for user acceptance. .. todo:: It probably makes sense to use one of the transient message types for pause. It isn't straightforward how best to approach the matter though given the context of our messages. http://pythondialog.sourceforge.net/doc/widgets.html#displaying-transient-messages :param str message: Message to display :param int height: Height of the dialog box :param bool pause: Not applicable to NcursesDisplay """ self.dialog.msgbox(message, height, width=self.width) def menu(self, message, choices, ok_label="OK", cancel_label="Cancel", help_label="", **unused_kwargs): """Display a menu. :param str message: title of menu :param choices: menu lines, len must be > 0 :type choices: list of tuples (`tag`, `item`) tags must be unique or list of items (tags will be enumerated) :param str ok_label: label of the OK button :param str help_label: label of the help button :param dict unused_kwargs: absorbs default / cli_args :returns: tuple of the form (`code`, `index`) where `code` - int display exit code `int` - index of the selected item :rtype: tuple """ menu_options = { "choices": choices, "ok_label": ok_label, "cancel_label": cancel_label, "help_button": bool(help_label), "help_label": help_label, "width": self.width, "height": self.height, "menu_height": self.height - 6, } # Can accept either tuples or just the actual choices if choices and isinstance(choices[0], tuple): # pylint: disable=star-args code, selection = self.dialog.menu(message, **menu_options) # Return the selection index for i, choice in enumerate(choices): if choice[0] == selection: return code, i return code, -1 else: # "choices" is not formatted the way the dialog.menu expects... menu_options["choices"] = [ (str(i), choice) for i, choice in enumerate(choices, 1) ] # pylint: disable=star-args code, index = self.dialog.menu(message, **menu_options) if code == CANCEL: return code, -1 return code, int(index) - 1 def input(self, message, **unused_kwargs): """Display an input box to the user. :param str message: Message to display that asks for input. :param dict _kwargs: absorbs default / cli_args :returns: tuple of the form (`code`, `string`) where `code` - int display exit code `string` - input entered by the user """ sections = message.split("\n") # each section takes at least one line, plus extras if it's longer than self.width wordlines = [1 + (len(section) / self.width) for section in sections] height = 6 + sum(wordlines) + len(sections) return self.dialog.inputbox(message, width=self.width, height=height) def yesno(self, message, yes_label="Yes", no_label="No", **unused_kwargs): """Display a Yes/No dialog box. Yes and No label must begin with different letters. :param str message: message to display to user :param str yes_label: label on the "yes" button :param str no_label: label on the "no" button :param dict _kwargs: absorbs default / cli_args :returns: if yes_label was selected :rtype: bool """ return self.dialog.DIALOG_OK == self.dialog.yesno( message, self.height, self.width, yes_label=yes_label, no_label=no_label) def checklist(self, message, tags, default_status=True, **unused_kwargs): """Displays a checklist. :param message: Message to display before choices :param list tags: where each is of type :class:`str` len(tags) > 0 :param bool default_status: If True, items are in a selected state by default. :param dict _kwargs: absorbs default / cli_args :returns: tuple of the form (`code`, `list_tags`) where `code` - int display exit code `list_tags` - list of str tags selected by the user """ choices = [(tag, "", default_status) for tag in tags] return self.dialog.checklist( message, width=self.width, height=self.height, choices=choices) def directory_select(self, message, **unused_kwargs): """Display a directory selection screen. :param str message: prompt to give the user :returns: tuple of the form (`code`, `string`) where `code` - int display exit code `string` - input entered by the user """ root_directory = os.path.abspath(os.sep) return self.dialog.dselect( filepath=root_directory, width=self.width, height=self.height, help_button=True, title=message) @zope.interface.implementer(interfaces.IDisplay) class FileDisplay(object): """File-based display.""" def __init__(self, outfile): super(FileDisplay, self).__init__() self.outfile = outfile def notification(self, message, height=10, pause=True): # pylint: disable=unused-argument """Displays a notification and waits for user acceptance. :param str message: Message to display :param int height: No effect for FileDisplay :param bool pause: Whether or not the program should pause for the user's confirmation """ side_frame = "-" * 79 message = _wrap_lines(message) self.outfile.write( "{line}{frame}{line}{msg}{line}{frame}{line}".format( line=os.linesep, frame=side_frame, msg=message)) if pause: raw_input("Press Enter to Continue") def menu(self, message, choices, ok_label="", cancel_label="", help_label="", **unused_kwargs): # pylint: disable=unused-argument """Display a menu. .. todo:: This doesn't enable the help label/button (I wasn't sold on any interface I came up with for this). It would be a nice feature :param str message: title of menu :param choices: Menu lines, len must be > 0 :type choices: list of tuples (tag, item) or list of descriptions (tags will be enumerated) :param dict _kwargs: absorbs default / cli_args :returns: tuple of (`code`, `index`) where `code` - str display exit code `index` - int index of the user's selection :rtype: tuple """ self._print_menu(message, choices) code, selection = self._get_valid_int_ans(len(choices)) return code, selection - 1 def input(self, message, **unused_kwargs): # pylint: disable=no-self-use """Accept input from the user. :param str message: message to display to the user :param dict _kwargs: absorbs default / cli_args :returns: tuple of (`code`, `input`) where `code` - str display exit code `input` - str of the user's input :rtype: tuple """ ans = raw_input( textwrap.fill("%s (Enter 'c' to cancel): " % message, 80)) if ans == "c" or ans == "C": return CANCEL, "-1" else: return OK, ans def yesno(self, message, yes_label="Yes", no_label="No", **unused_kwargs): """Query the user with a yes/no question. Yes and No label must begin with different letters, and must contain at least one letter each. :param str message: question for the user :param str yes_label: Label of the "Yes" parameter :param str no_label: Label of the "No" parameter :param dict _kwargs: absorbs default / cli_args :returns: True for "Yes", False for "No" :rtype: bool """ side_frame = ("-" * 79) + os.linesep message = _wrap_lines(message) self.outfile.write("{0}{frame}{msg}{0}{frame}".format( os.linesep, frame=side_frame, msg=message)) while True: ans = raw_input("{yes}/{no}: ".format( yes=_parens_around_char(yes_label), no=_parens_around_char(no_label))) # Couldn't get pylint indentation right with elif # elif doesn't matter in this situation if (ans.startswith(yes_label[0].lower()) or ans.startswith(yes_label[0].upper())): return True if (ans.startswith(no_label[0].lower()) or ans.startswith(no_label[0].upper())): return False def checklist(self, message, tags, default_status=True, **unused_kwargs): # pylint: disable=unused-argument """Display a checklist. :param str message: Message to display to user :param list tags: `str` tags to select, len(tags) > 0 :param bool default_status: Not used for FileDisplay :param dict _kwargs: absorbs default / cli_args :returns: tuple of (`code`, `tags`) where `code` - str display exit code `tags` - list of selected tags :rtype: tuple """ while True: self._print_menu(message, tags) code, ans = self.input("Select the appropriate numbers separated " "by commas and/or spaces") if code == OK: indices = separate_list_input(ans) selected_tags = self._scrub_checklist_input(indices, tags) if selected_tags: return code, selected_tags else: self.outfile.write( "** Error - Invalid selection **%s" % os.linesep) else: return code, [] def directory_select(self, message, **unused_kwargs): """Display a directory selection screen. :param str message: prompt to give the user :returns: tuple of the form (`code`, `string`) where `code` - int display exit code `string` - input entered by the user """ with completer.Completer(): return self.input(message) def _scrub_checklist_input(self, indices, tags): # pylint: disable=no-self-use """Validate input and transform indices to appropriate tags. :param list indices: input :param list tags: Original tags of the checklist :returns: valid tags the user selected :rtype: :class:`list` of :class:`str` """ # They should all be of type int try: indices = [int(index) for index in indices] except ValueError: return [] # Remove duplicates indices = list(set(indices)) # Check all input is within range for index in indices: if index < 1 or index > len(tags): return [] # Transform indices to appropriate tags return [tags[index - 1] for index in indices] def _print_menu(self, message, choices): """Print a menu on the screen. :param str message: title of menu :param choices: Menu lines :type choices: list of tuples (tag, item) or list of descriptions (tags will be enumerated) """ # Can take either tuples or single items in choices list if choices and isinstance(choices[0], tuple): choices = ["%s - %s" % (c[0], c[1]) for c in choices] # Write out the message to the user self.outfile.write( "{new}{msg}{new}".format(new=os.linesep, msg=message)) side_frame = ("-" * 79) + os.linesep self.outfile.write(side_frame) # Write out the menu choices for i, desc in enumerate(choices, 1): self.outfile.write( textwrap.fill("{num}: {desc}".format(num=i, desc=desc), 80)) # Keep this outside of the textwrap self.outfile.write(os.linesep) self.outfile.write(side_frame) def _get_valid_int_ans(self, max_): """Get a numerical selection. :param int max: The maximum entry (len of choices), must be positive :returns: tuple of the form (`code`, `selection`) where `code` - str display exit code ('ok' or cancel') `selection` - int user's selection :rtype: tuple """ selection = -1 if max_ > 1: input_msg = ("Select the appropriate number " "[1-{max_}] then [enter] (press 'c' to " "cancel): ".format(max_=max_)) else: input_msg = ("Press 1 [enter] to confirm the selection " "(press 'c' to cancel): ") while selection < 1: ans = raw_input(input_msg) if ans.startswith("c") or ans.startswith("C"): return CANCEL, -1 try: selection = int(ans) if selection < 1 or selection > max_: selection = -1 raise ValueError except ValueError: self.outfile.write( "{0}** Invalid input **{0}".format(os.linesep)) return OK, selection @zope.interface.implementer(interfaces.IDisplay) class NoninteractiveDisplay(object): """An iDisplay implementation that never asks for interactive user input""" def __init__(self, outfile): super(NoninteractiveDisplay, self).__init__() self.outfile = outfile def _interaction_fail(self, message, cli_flag, extra=""): "Error out in case of an attempt to interact in noninteractive mode" msg = "Missing command line flag or config entry for this setting:\n" msg += message if extra: msg += "\n" + extra if cli_flag: msg += "\n\n(You can set this with the {0} flag)".format(cli_flag) raise errors.MissingCommandlineFlag(msg) def notification(self, message, height=10, pause=False): # pylint: disable=unused-argument """Displays a notification without waiting for user acceptance. :param str message: Message to display to stdout :param int height: No effect for NoninteractiveDisplay :param bool pause: The NoninteractiveDisplay waits for no keyboard """ side_frame = "-" * 79 message = _wrap_lines(message) self.outfile.write( "{line}{frame}{line}{msg}{line}{frame}{line}".format( line=os.linesep, frame=side_frame, msg=message)) def menu(self, message, choices, ok_label=None, cancel_label=None, help_label=None, default=None, cli_flag=None): # pylint: disable=unused-argument,too-many-arguments """Avoid displaying a menu. :param str message: title of menu :param choices: Menu lines, len must be > 0 :type choices: list of tuples (tag, item) or list of descriptions (tags will be enumerated) :param int default: the default choice :param dict kwargs: absorbs various irrelevant labelling arguments :returns: tuple of (`code`, `index`) where `code` - str display exit code `index` - int index of the user's selection :rtype: tuple :raises errors.MissingCommandlineFlag: if there was no default """ if default is None: self._interaction_fail(message, cli_flag, "Choices: " + repr(choices)) return OK, default def input(self, message, default=None, cli_flag=None): """Accept input from the user. :param str message: message to display to the user :returns: tuple of (`code`, `input`) where `code` - str display exit code `input` - str of the user's input :rtype: tuple :raises errors.MissingCommandlineFlag: if there was no default """ if default is None: self._interaction_fail(message, cli_flag) else: return OK, default def yesno(self, message, yes_label=None, no_label=None, default=None, cli_flag=None): # pylint: disable=unused-argument """Decide Yes or No, without asking anybody :param str message: question for the user :param dict kwargs: absorbs yes_label, no_label :raises errors.MissingCommandlineFlag: if there was no default :returns: True for "Yes", False for "No" :rtype: bool """ if default is None: self._interaction_fail(message, cli_flag) else: return default def checklist(self, message, tags, default=None, cli_flag=None, **kwargs): # pylint: disable=unused-argument """Display a checklist. :param str message: Message to display to user :param list tags: `str` tags to select, len(tags) > 0 :param dict kwargs: absorbs default_status arg :returns: tuple of (`code`, `tags`) where `code` - str display exit code `tags` - list of selected tags :rtype: tuple """ if default is None: self._interaction_fail(message, cli_flag, "? ".join(tags)) else: return OK, default def directory_select(self, message, default=None, cli_flag=None): """Simulate prompting the user for a directory. This function returns default if it is not ``None``, otherwise, an exception is raised explaining the problem. If cli_flag is not ``None``, the error message will include the flag that can be used to set this value with the CLI. :param str message: prompt to give the user :param default: default value to return (if one exists) :param str cli_flag: option used to set this value with the CLI :returns: tuple of the form (`code`, `string`) where `code` - int display exit code `string` - input entered by the user """ return self.input(message, default, cli_flag) def separate_list_input(input_): """Separate a comma or space separated list. :param str input_: input from the user :returns: strings :rtype: list """ no_commas = input_.replace(",", " ") # Each string is naturally unicode, this causes problems with M2Crypto SANs # TODO: check if above is still true when M2Crypto is gone ^ return [str(string) for string in no_commas.split()] def _parens_around_char(label): """Place parens around first character of label. :param str label: Must contain at least one character """ return "({first}){rest}".format(first=label[0], rest=label[1:])
apache-2.0
glennw/servo
tests/wpt/harness/wptrunner/executors/executorselenium.py
58
9434
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. import os import socket import sys import threading import time import traceback import urlparse import uuid from .base import (ExecutorException, Protocol, RefTestExecutor, RefTestImplementation, TestExecutor, TestharnessExecutor, testharness_result_converter, reftest_result_converter, strip_server) from ..testrunner import Stop here = os.path.join(os.path.split(__file__)[0]) webdriver = None exceptions = None RemoteConnection = None extra_timeout = 5 def do_delayed_imports(): global webdriver global exceptions global RemoteConnection from selenium import webdriver from selenium.common import exceptions from selenium.webdriver.remote.remote_connection import RemoteConnection class SeleniumProtocol(Protocol): def __init__(self, executor, browser, capabilities, **kwargs): do_delayed_imports() Protocol.__init__(self, executor, browser) self.capabilities = capabilities self.url = browser.webdriver_url self.webdriver = None def setup(self, runner): """Connect to browser via Selenium's WebDriver implementation.""" self.runner = runner self.logger.debug("Connecting to Selenium on URL: %s" % self.url) session_started = False try: self.webdriver = webdriver.Remote(command_executor=RemoteConnection(self.url.strip("/"), resolve_ip=False), desired_capabilities=self.capabilities) except: self.logger.warning( "Connecting to Selenium failed:\n%s" % traceback.format_exc()) else: self.logger.debug("Selenium session started") session_started = True if not session_started: self.logger.warning("Failed to connect to Selenium") self.executor.runner.send_message("init_failed") else: try: self.after_connect() except: print >> sys.stderr, traceback.format_exc() self.logger.warning( "Failed to connect to navigate initial page") self.executor.runner.send_message("init_failed") else: self.executor.runner.send_message("init_succeeded") def teardown(self): self.logger.debug("Hanging up on Selenium session") try: self.webdriver.quit() except: pass del self.webdriver def is_alive(self): try: # Get a simple property over the connection self.webdriver.current_window_handle # TODO what exception? except (socket.timeout, exceptions.ErrorInResponseException): return False return True def after_connect(self): self.load_runner("http") def load_runner(self, protocol): url = urlparse.urljoin(self.executor.server_url(protocol), "/testharness_runner.html") self.logger.debug("Loading %s" % url) self.webdriver.get(url) self.webdriver.execute_script("document.title = '%s'" % threading.current_thread().name.replace("'", '"')) def wait(self): while True: try: self.webdriver.execute_async_script(""); except exceptions.TimeoutException: pass except (socket.timeout, exceptions.NoSuchWindowException, exceptions.ErrorInResponseException, IOError): break except Exception as e: self.logger.error(traceback.format_exc(e)) break class SeleniumRun(object): def __init__(self, func, webdriver, url, timeout): self.func = func self.result = None self.webdriver = webdriver self.url = url self.timeout = timeout self.result_flag = threading.Event() def run(self): timeout = self.timeout try: self.webdriver.set_script_timeout((timeout + extra_timeout) * 1000) except exceptions.ErrorInResponseException: self.logger.error("Lost webdriver connection") return Stop executor = threading.Thread(target=self._run) executor.start() flag = self.result_flag.wait(timeout + 2 * extra_timeout) if self.result is None: assert not flag self.result = False, ("EXTERNAL-TIMEOUT", None) return self.result def _run(self): try: self.result = True, self.func(self.webdriver, self.url, self.timeout) except exceptions.TimeoutException: self.result = False, ("EXTERNAL-TIMEOUT", None) except (socket.timeout, exceptions.ErrorInResponseException): self.result = False, ("CRASH", None) except Exception as e: message = getattr(e, "message", "") if message: message += "\n" message += traceback.format_exc(e) self.result = False, ("ERROR", e) finally: self.result_flag.set() class SeleniumTestharnessExecutor(TestharnessExecutor): def __init__(self, browser, server_config, timeout_multiplier=1, close_after_done=True, capabilities=None, debug_info=None): """Selenium-based executor for testharness.js tests""" TestharnessExecutor.__init__(self, browser, server_config, timeout_multiplier=timeout_multiplier, debug_info=debug_info) self.protocol = SeleniumProtocol(self, browser, capabilities) with open(os.path.join(here, "testharness_webdriver.js")) as f: self.script = f.read() self.close_after_done = close_after_done self.window_id = str(uuid.uuid4()) def is_alive(self): return self.protocol.is_alive() def on_protocol_change(self, new_protocol): self.protocol.load_runner(new_protocol) def do_test(self, test): url = self.test_url(test) success, data = SeleniumRun(self.do_testharness, self.protocol.webdriver, url, test.timeout * self.timeout_multiplier).run() if success: return self.convert_result(test, data) return (test.result_cls(*data), []) def do_testharness(self, webdriver, url, timeout): return webdriver.execute_async_script( self.script % {"abs_url": url, "url": strip_server(url), "window_id": self.window_id, "timeout_multiplier": self.timeout_multiplier, "timeout": timeout * 1000}) class SeleniumRefTestExecutor(RefTestExecutor): def __init__(self, browser, server_config, timeout_multiplier=1, screenshot_cache=None, close_after_done=True, debug_info=None, capabilities=None): """Selenium WebDriver-based executor for reftests""" RefTestExecutor.__init__(self, browser, server_config, screenshot_cache=screenshot_cache, timeout_multiplier=timeout_multiplier, debug_info=debug_info) self.protocol = SeleniumProtocol(self, browser, capabilities=capabilities) self.implementation = RefTestImplementation(self) self.close_after_done = close_after_done self.has_window = False with open(os.path.join(here, "reftest.js")) as f: self.script = f.read() with open(os.path.join(here, "reftest-wait_webdriver.js")) as f: self.wait_script = f.read() def is_alive(self): return self.protocol.is_alive() def do_test(self, test): self.logger.info("Test requires OS-level window focus") self.protocol.webdriver.set_window_size(600, 600) result = self.implementation.run_test(test) return self.convert_result(test, result) def screenshot(self, test, viewport_size, dpi): # https://github.com/w3c/wptrunner/issues/166 assert viewport_size is None assert dpi is None return SeleniumRun(self._screenshot, self.protocol.webdriver, self.test_url(test), test.timeout).run() def _screenshot(self, webdriver, url, timeout): webdriver.get(url) webdriver.execute_async_script(self.wait_script) screenshot = webdriver.get_screenshot_as_base64() # strip off the data:img/png, part of the url if screenshot.startswith("data:image/png;base64,"): screenshot = screenshot.split(",", 1)[1] return screenshot
mpl-2.0
rversteegen/commandergenius
project/jni/python/src/Lib/test/test_scriptpackages.py
58
1329
# Copyright (C) 2003 Python Software Foundation import unittest from test import test_support import aetools class TestScriptpackages(unittest.TestCase): def _test_scriptpackage(self, package, testobject=1): # Check that we can import the package mod = __import__(package) # Test that we can get the main event class klass = getattr(mod, package) # Test that we can instantiate that class talker = klass() if testobject: # Test that we can get an application object obj = mod.application(0) def test__builtinSuites(self): self._test_scriptpackage('_builtinSuites', testobject=0) def test_StdSuites(self): self._test_scriptpackage('StdSuites') def test_SystemEvents(self): self._test_scriptpackage('SystemEvents') def test_Finder(self): self._test_scriptpackage('Finder') def test_Terminal(self): self._test_scriptpackage('Terminal') def test_Netscape(self): self._test_scriptpackage('Netscape') def test_Explorer(self): self._test_scriptpackage('Explorer') def test_CodeWarrior(self): self._test_scriptpackage('CodeWarrior') def test_main(): test_support.run_unittest(TestScriptpackages) if __name__ == '__main__': test_main()
lgpl-2.1
runjmc/maraschino
lib/werkzeug/test.py
77
32616
# -*- coding: utf-8 -*- """ werkzeug.test ~~~~~~~~~~~~~ This module implements a client to WSGI applications for testing. :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import sys import urlparse import mimetypes from time import time from random import random from itertools import chain from tempfile import TemporaryFile from cStringIO import StringIO from cookielib import CookieJar from urllib2 import Request as U2Request from werkzeug._internal import _empty_stream, _get_environ from werkzeug.wrappers import BaseRequest from werkzeug.urls import url_encode, url_fix, iri_to_uri, _unquote from werkzeug.wsgi import get_host, get_current_url, ClosingIterator from werkzeug.utils import dump_cookie from werkzeug.datastructures import FileMultiDict, MultiDict, \ CombinedMultiDict, Headers, FileStorage def stream_encode_multipart(values, use_tempfile=True, threshold=1024 * 500, boundary=None, charset='utf-8'): """Encode a dict of values (either strings or file descriptors or :class:`FileStorage` objects.) into a multipart encoded string stored in a file descriptor. """ if boundary is None: boundary = '---------------WerkzeugFormPart_%s%s' % (time(), random()) _closure = [StringIO(), 0, False] if use_tempfile: def write(string): stream, total_length, on_disk = _closure if on_disk: stream.write(string) else: length = len(string) if length + _closure[1] <= threshold: stream.write(string) else: new_stream = TemporaryFile('wb+') new_stream.write(stream.getvalue()) new_stream.write(string) _closure[0] = new_stream _closure[2] = True _closure[1] = total_length + length else: write = _closure[0].write if not isinstance(values, MultiDict): values = MultiDict(values) for key, values in values.iterlists(): for value in values: write('--%s\r\nContent-Disposition: form-data; name="%s"' % (boundary, key)) reader = getattr(value, 'read', None) if reader is not None: filename = getattr(value, 'filename', getattr(value, 'name', None)) content_type = getattr(value, 'content_type', None) if content_type is None: content_type = filename and \ mimetypes.guess_type(filename)[0] or \ 'application/octet-stream' if filename is not None: write('; filename="%s"\r\n' % filename) else: write('\r\n') write('Content-Type: %s\r\n\r\n' % content_type) while 1: chunk = reader(16384) if not chunk: break write(chunk) else: if isinstance(value, unicode): value = value.encode(charset) write('\r\n\r\n' + str(value)) write('\r\n') write('--%s--\r\n' % boundary) length = int(_closure[0].tell()) _closure[0].seek(0) return _closure[0], length, boundary def encode_multipart(values, boundary=None, charset='utf-8'): """Like `stream_encode_multipart` but returns a tuple in the form (``boundary``, ``data``) where data is a bytestring. """ stream, length, boundary = stream_encode_multipart( values, use_tempfile=False, boundary=boundary, charset=charset) return boundary, stream.read() def File(fd, filename=None, mimetype=None): """Backwards compat.""" from warnings import warn warn(DeprecationWarning('werkzeug.test.File is deprecated, use the ' 'EnvironBuilder or FileStorage instead')) return FileStorage(fd, filename=filename, content_type=mimetype) class _TestCookieHeaders(object): """A headers adapter for cookielib """ def __init__(self, headers): self.headers = headers def getheaders(self, name): headers = [] name = name.lower() for k, v in self.headers: if k.lower() == name: headers.append(v) return headers class _TestCookieResponse(object): """Something that looks like a httplib.HTTPResponse, but is actually just an adapter for our test responses to make them available for cookielib. """ def __init__(self, headers): self.headers = _TestCookieHeaders(headers) def info(self): return self.headers class _TestCookieJar(CookieJar): """A cookielib.CookieJar modified to inject and read cookie headers from and to wsgi environments, and wsgi application responses. """ def inject_wsgi(self, environ): """Inject the cookies as client headers into the server's wsgi environment. """ cvals = [] for cookie in self: cvals.append('%s=%s' % (cookie.name, cookie.value)) if cvals: environ['HTTP_COOKIE'] = '; '.join(cvals) def extract_wsgi(self, environ, headers): """Extract the server's set-cookie headers as cookies into the cookie jar. """ self.extract_cookies( _TestCookieResponse(headers), U2Request(get_current_url(environ)), ) def _iter_data(data): """Iterates over a dict or multidict yielding all keys and values. This is used to iterate over the data passed to the :class:`EnvironBuilder`. """ if isinstance(data, MultiDict): for key, values in data.iterlists(): for value in values: yield key, value else: for key, values in data.iteritems(): if isinstance(values, list): for value in values: yield key, value else: yield key, values class EnvironBuilder(object): """This class can be used to conveniently create a WSGI environment for testing purposes. It can be used to quickly create WSGI environments or request objects from arbitrary data. The signature of this class is also used in some other places as of Werkzeug 0.5 (:func:`create_environ`, :meth:`BaseResponse.from_values`, :meth:`Client.open`). Because of this most of the functionality is available through the constructor alone. Files and regular form data can be manipulated independently of each other with the :attr:`form` and :attr:`files` attributes, but are passed with the same argument to the constructor: `data`. `data` can be any of these values: - a `str`: If it's a string it is converted into a :attr:`input_stream`, the :attr:`content_length` is set and you have to provide a :attr:`content_type`. - a `dict`: If it's a dict the keys have to be strings and the values any of the following objects: - a :class:`file`-like object. These are converted into :class:`FileStorage` objects automatically. - a tuple. The :meth:`~FileMultiDict.add_file` method is called with the tuple items as positional arguments. .. versionadded:: 0.6 `path` and `base_url` can now be unicode strings that are encoded using the :func:`iri_to_uri` function. :param path: the path of the request. In the WSGI environment this will end up as `PATH_INFO`. If the `query_string` is not defined and there is a question mark in the `path` everything after it is used as query string. :param base_url: the base URL is a URL that is used to extract the WSGI URL scheme, host (server name + server port) and the script root (`SCRIPT_NAME`). :param query_string: an optional string or dict with URL parameters. :param method: the HTTP method to use, defaults to `GET`. :param input_stream: an optional input stream. Do not specify this and `data`. As soon as an input stream is set you can't modify :attr:`args` and :attr:`files` unless you set the :attr:`input_stream` to `None` again. :param content_type: The content type for the request. As of 0.5 you don't have to provide this when specifying files and form data via `data`. :param content_length: The content length for the request. You don't have to specify this when providing data via `data`. :param errors_stream: an optional error stream that is used for `wsgi.errors`. Defaults to :data:`stderr`. :param multithread: controls `wsgi.multithread`. Defaults to `False`. :param multiprocess: controls `wsgi.multiprocess`. Defaults to `False`. :param run_once: controls `wsgi.run_once`. Defaults to `False`. :param headers: an optional list or :class:`Headers` object of headers. :param data: a string or dict of form data. See explanation above. :param environ_base: an optional dict of environment defaults. :param environ_overrides: an optional dict of environment overrides. :param charset: the charset used to encode unicode data. """ #: the server protocol to use. defaults to HTTP/1.1 server_protocol = 'HTTP/1.1' #: the wsgi version to use. defaults to (1, 0) wsgi_version = (1, 0) #: the default request class for :meth:`get_request` request_class = BaseRequest def __init__(self, path='/', base_url=None, query_string=None, method='GET', input_stream=None, content_type=None, content_length=None, errors_stream=None, multithread=False, multiprocess=False, run_once=False, headers=None, data=None, environ_base=None, environ_overrides=None, charset='utf-8'): if query_string is None and '?' in path: path, query_string = path.split('?', 1) self.charset = charset if isinstance(path, unicode): path = iri_to_uri(path, charset) self.path = path if base_url is not None: if isinstance(base_url, unicode): base_url = iri_to_uri(base_url, charset) else: base_url = url_fix(base_url, charset) self.base_url = base_url if isinstance(query_string, basestring): self.query_string = query_string else: if query_string is None: query_string = MultiDict() elif not isinstance(query_string, MultiDict): query_string = MultiDict(query_string) self.args = query_string self.method = method if headers is None: headers = Headers() elif not isinstance(headers, Headers): headers = Headers(headers) self.headers = headers self.content_type = content_type if errors_stream is None: errors_stream = sys.stderr self.errors_stream = errors_stream self.multithread = multithread self.multiprocess = multiprocess self.run_once = run_once self.environ_base = environ_base self.environ_overrides = environ_overrides self.input_stream = input_stream self.content_length = content_length self.closed = False if data: if input_stream is not None: raise TypeError('can\'t provide input stream and data') if isinstance(data, basestring): self.input_stream = StringIO(data) if self.content_length is None: self.content_length = len(data) else: for key, value in _iter_data(data): if isinstance(value, (tuple, dict)) or \ hasattr(value, 'read'): self._add_file_from_data(key, value) else: self.form.setlistdefault(key).append(value) def _add_file_from_data(self, key, value): """Called in the EnvironBuilder to add files from the data dict.""" if isinstance(value, tuple): self.files.add_file(key, *value) elif isinstance(value, dict): from warnings import warn warn(DeprecationWarning('it\'s no longer possible to pass dicts ' 'as `data`. Use tuples or FileStorage ' 'objects instead'), stacklevel=2) value = dict(value) mimetype = value.pop('mimetype', None) if mimetype is not None: value['content_type'] = mimetype self.files.add_file(key, **value) else: self.files.add_file(key, value) def _get_base_url(self): return urlparse.urlunsplit((self.url_scheme, self.host, self.script_root, '', '')).rstrip('/') + '/' def _set_base_url(self, value): if value is None: scheme = 'http' netloc = 'localhost' scheme = 'http' script_root = '' else: scheme, netloc, script_root, qs, anchor = urlparse.urlsplit(value) if qs or anchor: raise ValueError('base url must not contain a query string ' 'or fragment') self.script_root = script_root.rstrip('/') self.host = netloc self.url_scheme = scheme base_url = property(_get_base_url, _set_base_url, doc=''' The base URL is a URL that is used to extract the WSGI URL scheme, host (server name + server port) and the script root (`SCRIPT_NAME`).''') del _get_base_url, _set_base_url def _get_content_type(self): ct = self.headers.get('Content-Type') if ct is None and not self._input_stream: if self.method in ('POST', 'PUT', 'PATCH'): if self._files: return 'multipart/form-data' return 'application/x-www-form-urlencoded' return None return ct def _set_content_type(self, value): if value is None: self.headers.pop('Content-Type', None) else: self.headers['Content-Type'] = value content_type = property(_get_content_type, _set_content_type, doc=''' The content type for the request. Reflected from and to the :attr:`headers`. Do not set if you set :attr:`files` or :attr:`form` for auto detection.''') del _get_content_type, _set_content_type def _get_content_length(self): return self.headers.get('Content-Length', type=int) def _set_content_length(self, value): if value is None: self.headers.pop('Content-Length', None) else: self.headers['Content-Length'] = str(value) content_length = property(_get_content_length, _set_content_length, doc=''' The content length as integer. Reflected from and to the :attr:`headers`. Do not set if you set :attr:`files` or :attr:`form` for auto detection.''') del _get_content_length, _set_content_length def form_property(name, storage, doc): key = '_' + name def getter(self): if self._input_stream is not None: raise AttributeError('an input stream is defined') rv = getattr(self, key) if rv is None: rv = storage() setattr(self, key, rv) return rv def setter(self, value): self._input_stream = None setattr(self, key, value) return property(getter, setter, doc) form = form_property('form', MultiDict, doc=''' A :class:`MultiDict` of form values.''') files = form_property('files', FileMultiDict, doc=''' A :class:`FileMultiDict` of uploaded files. You can use the :meth:`~FileMultiDict.add_file` method to add new files to the dict.''') del form_property def _get_input_stream(self): return self._input_stream def _set_input_stream(self, value): self._input_stream = value self._form = self._files = None input_stream = property(_get_input_stream, _set_input_stream, doc=''' An optional input stream. If you set this it will clear :attr:`form` and :attr:`files`.''') del _get_input_stream, _set_input_stream def _get_query_string(self): if self._query_string is None: if self._args is not None: return url_encode(self._args, charset=self.charset) return '' return self._query_string def _set_query_string(self, value): self._query_string = value self._args = None query_string = property(_get_query_string, _set_query_string, doc=''' The query string. If you set this to a string :attr:`args` will no longer be available.''') del _get_query_string, _set_query_string def _get_args(self): if self._query_string is not None: raise AttributeError('a query string is defined') if self._args is None: self._args = MultiDict() return self._args def _set_args(self, value): self._query_string = None self._args = value args = property(_get_args, _set_args, doc=''' The URL arguments as :class:`MultiDict`.''') del _get_args, _set_args @property def server_name(self): """The server name (read-only, use :attr:`host` to set)""" return self.host.split(':', 1)[0] @property def server_port(self): """The server port as integer (read-only, use :attr:`host` to set)""" pieces = self.host.split(':', 1) if len(pieces) == 2 and pieces[1].isdigit(): return int(pieces[1]) elif self.url_scheme == 'https': return 443 return 80 def __del__(self): self.close() def close(self): """Closes all files. If you put real :class:`file` objects into the :attr:`files` dict you can call this method to automatically close them all in one go. """ if self.closed: return try: files = self.files.itervalues() except AttributeError: files = () for f in files: try: f.close() except Exception, e: pass self.closed = True def get_environ(self): """Return the built environ.""" input_stream = self.input_stream content_length = self.content_length content_type = self.content_type if input_stream is not None: start_pos = input_stream.tell() input_stream.seek(0, 2) end_pos = input_stream.tell() input_stream.seek(start_pos) content_length = end_pos - start_pos elif content_type == 'multipart/form-data': values = CombinedMultiDict([self.form, self.files]) input_stream, content_length, boundary = \ stream_encode_multipart(values, charset=self.charset) content_type += '; boundary="%s"' % boundary elif content_type == 'application/x-www-form-urlencoded': values = url_encode(self.form, charset=self.charset) content_length = len(values) input_stream = StringIO(values) else: input_stream = _empty_stream result = {} if self.environ_base: result.update(self.environ_base) def _path_encode(x): if isinstance(x, unicode): x = x.encode(self.charset) return _unquote(x) result.update({ 'REQUEST_METHOD': self.method, 'SCRIPT_NAME': _path_encode(self.script_root), 'PATH_INFO': _path_encode(self.path), 'QUERY_STRING': self.query_string, 'SERVER_NAME': self.server_name, 'SERVER_PORT': str(self.server_port), 'HTTP_HOST': self.host, 'SERVER_PROTOCOL': self.server_protocol, 'CONTENT_TYPE': content_type or '', 'CONTENT_LENGTH': str(content_length or '0'), 'wsgi.version': self.wsgi_version, 'wsgi.url_scheme': self.url_scheme, 'wsgi.input': input_stream, 'wsgi.errors': self.errors_stream, 'wsgi.multithread': self.multithread, 'wsgi.multiprocess': self.multiprocess, 'wsgi.run_once': self.run_once }) for key, value in self.headers.to_list(self.charset): result['HTTP_%s' % key.upper().replace('-', '_')] = value if self.environ_overrides: result.update(self.environ_overrides) return result def get_request(self, cls=None): """Returns a request with the data. If the request class is not specified :attr:`request_class` is used. :param cls: The request wrapper to use. """ if cls is None: cls = self.request_class return cls(self.get_environ()) class ClientRedirectError(Exception): """ If a redirect loop is detected when using follow_redirects=True with the :cls:`Client`, then this exception is raised. """ class Client(object): """This class allows to send requests to a wrapped application. The response wrapper can be a class or factory function that takes three arguments: app_iter, status and headers. The default response wrapper just returns a tuple. Example:: class ClientResponse(BaseResponse): ... client = Client(MyApplication(), response_wrapper=ClientResponse) The use_cookies parameter indicates whether cookies should be stored and sent for subsequent requests. This is True by default, but passing False will disable this behaviour. If you want to request some subdomain of your application you may set `allow_subdomain_redirects` to `True` as if not no external redirects are allowed. .. versionadded:: 0.5 `use_cookies` is new in this version. Older versions did not provide builtin cookie support. """ def __init__(self, application, response_wrapper=None, use_cookies=True, allow_subdomain_redirects=False): self.application = application if response_wrapper is None: response_wrapper = lambda a, s, h: (a, s, h) self.response_wrapper = response_wrapper if use_cookies: self.cookie_jar = _TestCookieJar() else: self.cookie_jar = None self.redirect_client = None self.allow_subdomain_redirects = allow_subdomain_redirects def set_cookie(self, server_name, key, value='', max_age=None, expires=None, path='/', domain=None, secure=None, httponly=False, charset='utf-8'): """Sets a cookie in the client's cookie jar. The server name is required and has to match the one that is also passed to the open call. """ assert self.cookie_jar is not None, 'cookies disabled' header = dump_cookie(key, value, max_age, expires, path, domain, secure, httponly, charset) environ = create_environ(path, base_url='http://' + server_name) headers = [('Set-Cookie', header)] self.cookie_jar.extract_wsgi(environ, headers) def delete_cookie(self, server_name, key, path='/', domain=None): """Deletes a cookie in the test client.""" self.set_cookie(server_name, key, expires=0, max_age=0, path=path, domain=domain) def open(self, *args, **kwargs): """Takes the same arguments as the :class:`EnvironBuilder` class with some additions: You can provide a :class:`EnvironBuilder` or a WSGI environment as only argument instead of the :class:`EnvironBuilder` arguments and two optional keyword arguments (`as_tuple`, `buffered`) that change the type of the return value or the way the application is executed. .. versionchanged:: 0.5 If a dict is provided as file in the dict for the `data` parameter the content type has to be called `content_type` now instead of `mimetype`. This change was made for consistency with :class:`werkzeug.FileWrapper`. The `follow_redirects` parameter was added to :func:`open`. Additional parameters: :param as_tuple: Returns a tuple in the form ``(environ, result)`` :param buffered: Set this to True to buffer the application run. This will automatically close the application for you as well. :param follow_redirects: Set this to True if the `Client` should follow HTTP redirects. """ as_tuple = kwargs.pop('as_tuple', False) buffered = kwargs.pop('buffered', False) follow_redirects = kwargs.pop('follow_redirects', False) environ = None if not kwargs and len(args) == 1: if isinstance(args[0], EnvironBuilder): environ = args[0].get_environ() elif isinstance(args[0], dict): environ = args[0] if environ is None: builder = EnvironBuilder(*args, **kwargs) try: environ = builder.get_environ() finally: builder.close() if self.cookie_jar is not None: self.cookie_jar.inject_wsgi(environ) rv = run_wsgi_app(self.application, environ, buffered=buffered) if self.cookie_jar is not None: self.cookie_jar.extract_wsgi(environ, rv[2]) # handle redirects redirect_chain = [] status_code = int(rv[1].split(None, 1)[0]) while status_code in (301, 302, 303, 305, 307) and follow_redirects: if not self.redirect_client: # assume that we're not using the user defined response wrapper # so that we don't need any ugly hacks to get the status # code from the response. self.redirect_client = Client(self.application) self.redirect_client.cookie_jar = self.cookie_jar redirect = dict(rv[2])['Location'] scheme, netloc, script_root, qs, anchor = urlparse.urlsplit(redirect) base_url = urlparse.urlunsplit((scheme, netloc, '', '', '')).rstrip('/') + '/' cur_server_name = netloc.split(':', 1)[0].split('.') real_server_name = get_host(environ).split(':', 1)[0].split('.') if self.allow_subdomain_redirects: allowed = cur_server_name[-len(real_server_name):] == real_server_name else: allowed = cur_server_name == real_server_name if not allowed: raise RuntimeError('%r does not support redirect to ' 'external targets' % self.__class__) redirect_chain.append((redirect, status_code)) # the redirect request should be a new request, and not be based on # the old request redirect_kwargs = { 'path': script_root, 'base_url': base_url, 'query_string': qs, 'as_tuple': True, 'buffered': buffered, 'follow_redirects': False, } environ, rv = self.redirect_client.open(**redirect_kwargs) status_code = int(rv[1].split(None, 1)[0]) # Prevent loops if redirect_chain[-1] in redirect_chain[:-1]: raise ClientRedirectError("loop detected") response = self.response_wrapper(*rv) if as_tuple: return environ, response return response def get(self, *args, **kw): """Like open but method is enforced to GET.""" kw['method'] = 'GET' return self.open(*args, **kw) def patch(self, *args, **kw): """Like open but method is enforced to PATCH.""" kw['method'] = 'PATCH' return self.open(*args, **kw) def post(self, *args, **kw): """Like open but method is enforced to POST.""" kw['method'] = 'POST' return self.open(*args, **kw) def head(self, *args, **kw): """Like open but method is enforced to HEAD.""" kw['method'] = 'HEAD' return self.open(*args, **kw) def put(self, *args, **kw): """Like open but method is enforced to PUT.""" kw['method'] = 'PUT' return self.open(*args, **kw) def delete(self, *args, **kw): """Like open but method is enforced to DELETE.""" kw['method'] = 'DELETE' return self.open(*args, **kw) def __repr__(self): return '<%s %r>' % ( self.__class__.__name__, self.application ) def create_environ(*args, **kwargs): """Create a new WSGI environ dict based on the values passed. The first parameter should be the path of the request which defaults to '/'. The second one can either be an absolute path (in that case the host is localhost:80) or a full path to the request with scheme, netloc port and the path to the script. This accepts the same arguments as the :class:`EnvironBuilder` constructor. .. versionchanged:: 0.5 This function is now a thin wrapper over :class:`EnvironBuilder` which was added in 0.5. The `headers`, `environ_base`, `environ_overrides` and `charset` parameters were added. """ builder = EnvironBuilder(*args, **kwargs) try: return builder.get_environ() finally: builder.close() def run_wsgi_app(app, environ, buffered=False): """Return a tuple in the form (app_iter, status, headers) of the application output. This works best if you pass it an application that returns an iterator all the time. Sometimes applications may use the `write()` callable returned by the `start_response` function. This tries to resolve such edge cases automatically. But if you don't get the expected output you should set `buffered` to `True` which enforces buffering. If passed an invalid WSGI application the behavior of this function is undefined. Never pass non-conforming WSGI applications to this function. :param app: the application to execute. :param buffered: set to `True` to enforce buffering. :return: tuple in the form ``(app_iter, status, headers)`` """ environ = _get_environ(environ) response = [] buffer = [] def start_response(status, headers, exc_info=None): if exc_info is not None: raise exc_info[0], exc_info[1], exc_info[2] response[:] = [status, headers] return buffer.append app_iter = app(environ, start_response) # when buffering we emit the close call early and convert the # application iterator into a regular list if buffered: close_func = getattr(app_iter, 'close', None) try: app_iter = list(app_iter) finally: if close_func is not None: close_func() # otherwise we iterate the application iter until we have # a response, chain the already received data with the already # collected data and wrap it in a new `ClosingIterator` if # we have a close callable. else: while not response: buffer.append(app_iter.next()) if buffer: close_func = getattr(app_iter, 'close', None) app_iter = chain(buffer, app_iter) if close_func is not None: app_iter = ClosingIterator(app_iter, close_func) return app_iter, response[0], response[1]
mit
noxora/flask-base
flask/lib/python3.4/site-packages/pip/_vendor/requests/packages/chardet/mbcsgroupprober.py
2769
1967
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # Proofpoint, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .charsetgroupprober import CharSetGroupProber from .utf8prober import UTF8Prober from .sjisprober import SJISProber from .eucjpprober import EUCJPProber from .gb2312prober import GB2312Prober from .euckrprober import EUCKRProber from .cp949prober import CP949Prober from .big5prober import Big5Prober from .euctwprober import EUCTWProber class MBCSGroupProber(CharSetGroupProber): def __init__(self): CharSetGroupProber.__init__(self) self._mProbers = [ UTF8Prober(), SJISProber(), EUCJPProber(), GB2312Prober(), EUCKRProber(), CP949Prober(), Big5Prober(), EUCTWProber() ] self.reset()
mit
montanapr/Plugin.Video.Mercy
servers/netload.py
44
1873
# -*- coding: utf-8 -*- #------------------------------------------------------------ # pelisalacarta - XBMC Plugin # Conector para netload # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/ #------------------------------------------------------------ import urlparse,urllib2,urllib,re import os from core import scrapertools from core import logger from core import config def get_video_url( page_url , premium = False , user="" , password="", video_password="" ): logger.info("[netload.py] get_video_url(page_url='%s')" % page_url) video_urls = [] return video_urls # Encuentra vídeos del servidor en el texto pasado def find_videos(data): encontrados = set() devuelve = [] # http://netload.in/dateiroqHV0QNJg/Salmon.Fishing.in.the.Yemen.2012.720p.UNSOLOCLIC.INFO.mkv.htm patronvideos = '(netload.in/[a-zA-Z0-9]+/.*?.htm)' logger.info("[netload.py] find_videos #"+patronvideos+"#") matches = re.compile(patronvideos,re.DOTALL).findall(data+'"') for match in matches: titulo = "[netload]" url = "http://"+match if url not in encontrados: logger.info(" url="+url) devuelve.append( [ titulo , url , 'netload' ] ) encontrados.add(url) else: logger.info(" url duplicada="+url) # http://netload.in/datei2OuYAjcVGq.htm patronvideos = '(netload.in/[a-zA-Z0-9]+.htm)' logger.info("[netload.py] find_videos #"+patronvideos+"#") matches = re.compile(patronvideos,re.DOTALL).findall(data+'"') for match in matches: titulo = "[netload]" url = "http://"+match if url not in encontrados: logger.info(" url="+url) devuelve.append( [ titulo , url , 'netload' ] ) encontrados.add(url) else: logger.info(" url duplicada="+url) return devuelve
gpl-2.0
danmichaelo/mwclient
tests/test_client.py
5
15693
# encoding=utf-8 from __future__ import print_function from six import StringIO import unittest import pytest import mwclient import logging import requests import responses import pkg_resources # part of setuptools import mock try: import json except ImportError: import simplejson as json if __name__ == "__main__": print() print("Note: Running in stand-alone mode. Consult the README") print(" (section 'Contributing') for advice on running tests.") print() logging.basicConfig(level=logging.DEBUG) class TestCase(unittest.TestCase): def makeMetaResponse(self, **kwargs): tpl = '{"query":{"general":{"generator":"MediaWiki %(version)s"},"namespaces":{"-1":{"*":"Special","canonical":"Special","case":"first-letter","id":-1},"-2":{"*":"Media","canonical":"Media","case":"first-letter","id":-2},"0":{"*":"","case":"first-letter","content":"","id":0},"1":{"*":"Talk","canonical":"Talk","case":"first-letter","id":1,"subpages":""},"10":{"*":"Template","canonical":"Template","case":"first-letter","id":10,"subpages":""},"100":{"*":"Test namespace 1","canonical":"Test namespace 1","case":"first-letter","id":100,"subpages":""},"101":{"*":"Test namespace 1 talk","canonical":"Test namespace 1 talk","case":"first-letter","id":101,"subpages":""},"102":{"*":"Test namespace 2","canonical":"Test namespace 2","case":"first-letter","id":102,"subpages":""},"103":{"*":"Test namespace 2 talk","canonical":"Test namespace 2 talk","case":"first-letter","id":103,"subpages":""},"11":{"*":"Template talk","canonical":"Template talk","case":"first-letter","id":11,"subpages":""},"1198":{"*":"Translations","canonical":"Translations","case":"first-letter","id":1198,"subpages":""},"1199":{"*":"Translations talk","canonical":"Translations talk","case":"first-letter","id":1199,"subpages":""},"12":{"*":"Help","canonical":"Help","case":"first-letter","id":12,"subpages":""},"13":{"*":"Help talk","canonical":"Help talk","case":"first-letter","id":13,"subpages":""},"14":{"*":"Category","canonical":"Category","case":"first-letter","id":14},"15":{"*":"Category talk","canonical":"Category talk","case":"first-letter","id":15,"subpages":""},"2":{"*":"User","canonical":"User","case":"first-letter","id":2,"subpages":""},"2500":{"*":"VisualEditor","canonical":"VisualEditor","case":"first-letter","id":2500},"2501":{"*":"VisualEditor talk","canonical":"VisualEditor talk","case":"first-letter","id":2501},"2600":{"*":"Topic","canonical":"Topic","case":"first-letter","defaultcontentmodel":"flow-board","id":2600},"3":{"*":"User talk","canonical":"User talk","case":"first-letter","id":3,"subpages":""},"4":{"*":"Wikipedia","canonical":"Project","case":"first-letter","id":4,"subpages":""},"460":{"*":"Campaign","canonical":"Campaign","case":"case-sensitive","defaultcontentmodel":"Campaign","id":460},"461":{"*":"Campaign talk","canonical":"Campaign talk","case":"case-sensitive","id":461},"5":{"*":"Wikipedia talk","canonical":"Project talk","case":"first-letter","id":5,"subpages":""},"6":{"*":"File","canonical":"File","case":"first-letter","id":6},"7":{"*":"File talk","canonical":"File talk","case":"first-letter","id":7,"subpages":""},"710":{"*":"TimedText","canonical":"TimedText","case":"first-letter","id":710},"711":{"*":"TimedText talk","canonical":"TimedText talk","case":"first-letter","id":711},"8":{"*":"MediaWiki","canonical":"MediaWiki","case":"first-letter","id":8,"subpages":""},"828":{"*":"Module","canonical":"Module","case":"first-letter","id":828,"subpages":""},"829":{"*":"Module talk","canonical":"Module talk","case":"first-letter","id":829,"subpages":""},"866":{"*":"CNBanner","canonical":"CNBanner","case":"first-letter","id":866},"867":{"*":"CNBanner talk","canonical":"CNBanner talk","case":"first-letter","id":867,"subpages":""},"9":{"*":"MediaWiki talk","canonical":"MediaWiki talk","case":"first-letter","id":9,"subpages":""},"90":{"*":"Thread","canonical":"Thread","case":"first-letter","id":90},"91":{"*":"Thread talk","canonical":"Thread talk","case":"first-letter","id":91},"92":{"*":"Summary","canonical":"Summary","case":"first-letter","id":92},"93":{"*":"Summary talk","canonical":"Summary talk","case":"first-letter","id":93}},"userinfo":{"anon":"","groups":["*"],"id":0,"name":"127.0.0.1","rights": %(rights)s}}}' tpl = tpl % {'version': kwargs.get('version', '1.24wmf17'), 'rights': json.dumps(kwargs.get('rights', ["createaccount", "read", "edit", "createpage", "createtalk", "writeapi", "editmyusercss", "editmyuserjs", "viewmywatchlist", "editmywatchlist", "viewmyprivateinfo", "editmyprivateinfo", "editmyoptions", "centralauth-merge", "abusefilter-view", "abusefilter-log", "translate", "vipsscaler-test", "upload"])) } res = json.loads(tpl) if kwargs.get('writeapi', True): res['query']['general']['writeapi'] = '' return json.dumps(res) def httpShouldReturn(self, body=None, callback=None, scheme='http', host='test.wikipedia.org', path='/w/', script='api', headers=None, status=200): url = '{scheme}://{host}{path}{script}.php'.format(scheme=scheme, host=host, path=path, script=script) if body is None: responses.add_callback(responses.POST, url, callback=callback) else: responses.add(responses.POST, url, body=body, content_type='application/json', adding_headers=headers, status=status) def stdSetup(self): self.httpShouldReturn(self.makeMetaResponse()) site = mwclient.Site('test.wikipedia.org') responses.reset() return site def makePageResponse(self, title='Dummy.jpg', **kwargs): # Creates a dummy page response pageinfo = { "contentmodel": "wikitext", "lastrevid": 112353797, "length": 389, "ns": 6, "pageid": 738154, "pagelanguage": "en", "protection": [], "title": title, "touched": "2014-09-10T20:37:25Z" } pageinfo.update(**kwargs) res = { "query": { "pages": { "9": pageinfo } } } return json.dumps(res) class TestClient(TestCase): def setUp(self): pass def testVersion(self): # The version specified in setup.py should equal the one specified in client.py version = pkg_resources.require("mwclient")[0].version assert version == mwclient.__ver__ @responses.activate def test_http_as_default(self): # 'http' should be the default scheme (for historical reasons) self.httpShouldReturn(self.makeMetaResponse(), scheme='http') site = mwclient.Site('test.wikipedia.org') assert len(responses.calls) == 1 assert responses.calls[0].request.method == 'POST' @responses.activate def test_max_lag(self): # Client should wait and retry if lag exceeds max-lag def request_callback(request): if len(responses.calls) == 0: return (200, {'x-database-lag': '0', 'retry-after': '0'}, '') else: return (200, {}, self.makeMetaResponse()) self.httpShouldReturn(callback=request_callback, scheme='http') site = mwclient.Site('test.wikipedia.org') assert len(responses.calls) == 2 assert 'retry-after' in responses.calls[0].response.headers assert 'retry-after' not in responses.calls[1].response.headers @responses.activate def test_http_error(self): # Client should raise HTTPError self.httpShouldReturn('Uh oh', scheme='http', status=400) with pytest.raises(requests.exceptions.HTTPError): site = mwclient.Site('test.wikipedia.org') @responses.activate def test_headers(self): # Content-type should be 'application/x-www-form-urlencoded' self.httpShouldReturn(self.makeMetaResponse(), scheme='http') site = mwclient.Site('test.wikipedia.org') assert len(responses.calls) == 1 assert 'content-type' in responses.calls[0].request.headers assert responses.calls[0].request.headers['content-type'] == 'application/x-www-form-urlencoded' @responses.activate def test_force_https(self): # Setting https should work self.httpShouldReturn(self.makeMetaResponse(), scheme='https') site = mwclient.Site(('https', 'test.wikipedia.org')) assert len(responses.calls) == 1 @responses.activate def test_user_agent_is_sent(self): # User specified user agent should be sent sent to server self.httpShouldReturn(self.makeMetaResponse()) site = mwclient.Site('test.wikipedia.org', clients_useragent='MyFabulousClient') assert 'MyFabulousClient' in responses.calls[0].request.headers['user-agent'] @responses.activate def test_basic_request(self): self.httpShouldReturn(self.makeMetaResponse()) site = mwclient.Site('test.wikipedia.org') assert 'action=query' in responses.calls[0].request.body assert 'meta=siteinfo%7Cuserinfo' in responses.calls[0].request.body @responses.activate def test_httpauth_defaults_to_basic_auth(self): self.httpShouldReturn(self.makeMetaResponse()) site = mwclient.Site('test.wikipedia.org', httpauth=('me', 'verysecret')) assert isinstance(site.httpauth, requests.auth.HTTPBasicAuth) @responses.activate def test_httpauth_raise_error_on_invalid_type(self): self.httpShouldReturn(self.makeMetaResponse()) with pytest.raises(RuntimeError): site = mwclient.Site('test.wikipedia.org', httpauth=1) @responses.activate def test_api_disabled(self): # Should raise APIDisabledError if API is not enabled self.httpShouldReturn('MediaWiki API is not enabled for this site.') with pytest.raises(mwclient.errors.APIDisabledError): site = mwclient.Site('test.wikipedia.org') @responses.activate def test_version(self): # Should parse the MediaWiki version number correctly self.httpShouldReturn(self.makeMetaResponse(version='1.16')) site = mwclient.Site('test.wikipedia.org') assert site.initialized is True assert site.version == (1, 16) @responses.activate def test_min_version(self): # Should raise MediaWikiVersionError if API version is < 1.16 self.httpShouldReturn(self.makeMetaResponse(version='1.15')) with pytest.raises(mwclient.errors.MediaWikiVersionError): site = mwclient.Site('test.wikipedia.org') # ----- Use standard setup for rest @responses.activate def test_raw_index(self): # Initializing the client should result in one request site = self.stdSetup() self.httpShouldReturn('Some data', script='index') site.raw_index(action='purge', title='Main Page') assert len(responses.calls) == 1 class TestClientUploadArgs(TestCase): def setUp(self): self.raw_call = mock.patch('mwclient.client.Site.raw_call').start() def configure(self, rights=['read', 'upload']): self.raw_call.side_effect = [self.makeMetaResponse(rights=rights)] self.site = mwclient.Site('test.wikipedia.org') self.vars = { 'fname': u'Some "ßeta" æøå.jpg', 'comment': u'Some slightly complex comment<br> π ≈ 3, © Me.jpg', 'token': u'abc+\\' } self.raw_call.side_effect = [ # 1st response: self.makePageResponse(title='File:Test.jpg', imagerepository='local', imageinfo=[{ "comment": "", "height": 1440, "metadata": [], "sha1": "69a764a9cf8307ea4130831a0aa0b9b7f9585726", "size": 123, "timestamp": "2013-12-22T07:11:07Z", "user": "TestUser", "width": 2160 }]), # 2nd response: json.dumps({'query': {'tokens': {'csrftoken': self.vars['token']}}}), # 3rd response: json.dumps({ "upload": { "result": "Success", "filename": self.vars['fname'], "imageinfo": [] } }) ] def tearDown(self): mock.patch.stopall() def test_upload_args(self): # Test that methods are called, and arguments sent as expected self.configure() self.site.upload(file=StringIO('test'), filename=self.vars['fname'], comment=self.vars['comment']) args, kwargs = self.raw_call.call_args data = args[1] files = args[2] assert data.get('action') == 'upload' assert data.get('filename') == self.vars['fname'] assert data.get('comment') == self.vars['comment'] assert data.get('token') == self.vars['token'] assert 'file' in files def test_upload_missing_filename(self): self.configure() with pytest.raises(TypeError): self.site.upload(file=StringIO('test')) def test_upload_ambigitious_args(self): self.configure() with pytest.raises(TypeError): self.site.upload(filename='Test', file=StringIO('test'), filekey='abc') def test_upload_missing_upload_permission(self): self.configure(rights=['read']) with pytest.raises(mwclient.errors.InsufficientPermission): self.site.upload(filename='Test', file=StringIO('test')) class TestClientGetTokens(TestCase): def setUp(self): self.raw_call = mock.patch('mwclient.client.Site.raw_call').start() def configure(self, version='1.24'): self.raw_call.return_value = self.makeMetaResponse(version=version) self.site = mwclient.Site('test.wikipedia.org') responses.reset() def tearDown(self): mock.patch.stopall() def test_token_new_system(self): # Test get_token for MW >= 1.24 self.configure(version='1.24') self.raw_call.return_value = json.dumps({ 'query': {'tokens': {'csrftoken': 'sometoken'}} }) self.site.get_token('edit') args, kwargs = self.raw_call.call_args data = args[1] assert 'intoken' not in data assert data.get('type') == 'csrf' assert 'csrf' in self.site.tokens assert self.site.tokens['csrf'] == 'sometoken' assert 'edit' not in self.site.tokens def test_token_old_system_without_specifying_title(self): # Test get_token for MW < 1.24 self.configure(version='1.23') self.raw_call.return_value = self.makePageResponse(edittoken='sometoken', title='Test') self.site.get_token('edit') args, kwargs = self.raw_call.call_args data = args[1] assert 'type' not in data assert data.get('intoken') == 'edit' assert 'edit' in self.site.tokens assert self.site.tokens['edit'] == 'sometoken' assert 'csrf' not in self.site.tokens def test_token_old_system_with_specifying_title(self): # Test get_token for MW < 1.24 self.configure(version='1.23') self.raw_call.return_value = self.makePageResponse(edittoken='sometoken', title='Some page') self.site.get_token('edit', title='Some page') args, kwargs = self.raw_call.call_args data = args[1] assert self.site.tokens['edit'] == 'sometoken' if __name__ == '__main__': unittest.main()
mit
Zouyiran/ryu
ryu/services/protocols/bgp/utils/internable.py
1
3260
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import weakref from six.moves import intern dict_name = intern('_internable_dict') # # Internable # class Internable(object): """Class that allows instances to be 'interned'. That is, given an instance of this class, one can obtain a canonical (interned) copy. This saves memory when there are likely to be many identical instances of the class -- users hold references to a single interned object instead of references to different objects that are identical. The interned version of a given instance is created on demand if necessary, and automatically cleaned up when nobody holds a reference to it. Instances of sub-classes must be usable as dictionary keys for Internable to work. """ class Stats(object): def __init__(self): self.d = {} def incr(self, name): self.d[name] = self.d.get(name, 0) + 1 def __repr__(self): return repr(self.d) def __str__(self): return str(self.d) @classmethod def _internable_init(kls): # Objects to be interned are held as keys in a dictionary that # only holds weak references to keys. As a result_backup, when the # last reference to an interned object goes away, the object # will be removed from the dictionary. kls._internable_dict = weakref.WeakKeyDictionary() kls._internable_stats = Internable.Stats() @classmethod def intern_stats(kls): return kls._internable_stats def intern(self): """Returns either itself or a canonical copy of itself.""" # If this is an interned object, return it if hasattr(self, '_interned'): return self._internable_stats.incr('self') # # Got to find or create an interned object identical to this # one. Auto-initialize the class if need be. # kls = self.__class__ if not hasattr(kls, dict_name): kls._internable_init() obj = kls._internable_dict.get(self) if (obj): # Found an interned copy. kls._internable_stats.incr('found') return obj # Create an interned copy. Take care to only keep a weak # reference to the object itself. def object_collected(obj): kls._internable_stats.incr('collected') # print("Object %s garbage collected" % obj) pass ref = weakref.ref(self, object_collected) kls._internable_dict[self] = ref self._interned = True kls._internable_stats.incr('inserted') return self
apache-2.0
kmolab/kmolab.github.io
data/Brython-3.3.4/Lib/codeop.py
187
5994
r"""Utilities to compile possibly incomplete Python source code. This module provides two interfaces, broadly similar to the builtin function compile(), which take program text, a filename and a 'mode' and: - Return code object if the command is complete and valid - Return None if the command is incomplete - Raise SyntaxError, ValueError or OverflowError if the command is a syntax error (OverflowError and ValueError can be produced by malformed literals). Approach: First, check if the source consists entirely of blank lines and comments; if so, replace it with 'pass', because the built-in parser doesn't always do the right thing for these. Compile three times: as is, with \n, and with \n\n appended. If it compiles as is, it's complete. If it compiles with one \n appended, we expect more. If it doesn't compile either way, we compare the error we get when compiling with \n or \n\n appended. If the errors are the same, the code is broken. But if the errors are different, we expect more. Not intuitive; not even guaranteed to hold in future releases; but this matches the compiler's behavior from Python 1.4 through 2.2, at least. Caveat: It is possible (but not likely) that the parser stops parsing with a successful outcome before reaching the end of the source; in this case, trailing symbols may be ignored instead of causing an error. For example, a backslash followed by two newlines may be followed by arbitrary garbage. This will be fixed once the API for the parser is better. The two interfaces are: compile_command(source, filename, symbol): Compiles a single command in the manner described above. CommandCompiler(): Instances of this class have __call__ methods identical in signature to compile_command; the difference is that if the instance compiles program text containing a __future__ statement, the instance 'remembers' and compiles all subsequent program texts with the statement in force. The module also provides another class: Compile(): Instances of this class act like the built-in function compile, but with 'memory' in the sense described above. """ import __future__ _features = [getattr(__future__, fname) for fname in __future__.all_feature_names] __all__ = ["compile_command", "Compile", "CommandCompiler"] PyCF_DONT_IMPLY_DEDENT = 0x200 # Matches pythonrun.h def _maybe_compile(compiler, source, filename, symbol): # Check for source consisting of only blank lines and comments for line in source.split("\n"): line = line.strip() if line and line[0] != '#': break # Leave it alone else: if symbol != "eval": source = "pass" # Replace it with a 'pass' statement err = err1 = err2 = None code = code1 = code2 = None try: code = compiler(source, filename, symbol) except SyntaxError as err: pass try: code1 = compiler(source + "\n", filename, symbol) except SyntaxError as e: err1 = e try: code2 = compiler(source + "\n\n", filename, symbol) except SyntaxError as e: err2 = e if code: return code if not code1 and repr(err1) == repr(err2): raise err1 def _compile(source, filename, symbol): return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT) def compile_command(source, filename="<input>", symbol="single"): r"""Compile a command and determine whether it is incomplete. Arguments: source -- the source string; may contain \n characters filename -- optional filename from which source was read; default "<input>" symbol -- optional grammar start symbol; "single" (default) or "eval" Return value / exceptions raised: - Return a code object if the command is complete and valid - Return None if the command is incomplete - Raise SyntaxError, ValueError or OverflowError if the command is a syntax error (OverflowError and ValueError can be produced by malformed literals). """ return _maybe_compile(_compile, source, filename, symbol) class Compile: """Instances of this class behave much like the built-in compile function, but if one is used to compile text containing a future statement, it "remembers" and compiles all subsequent program texts with the statement in force.""" def __init__(self): self.flags = PyCF_DONT_IMPLY_DEDENT def __call__(self, source, filename, symbol): codeob = compile(source, filename, symbol, self.flags, 1) for feature in _features: if codeob.co_flags & feature.compiler_flag: self.flags |= feature.compiler_flag return codeob class CommandCompiler: """Instances of this class have __call__ methods identical in signature to compile_command; the difference is that if the instance compiles program text containing a __future__ statement, the instance 'remembers' and compiles all subsequent program texts with the statement in force.""" def __init__(self,): self.compiler = Compile() def __call__(self, source, filename="<input>", symbol="single"): r"""Compile a command and determine whether it is incomplete. Arguments: source -- the source string; may contain \n characters filename -- optional filename from which source was read; default "<input>" symbol -- optional grammar start symbol; "single" (default) or "eval" Return value / exceptions raised: - Return a code object if the command is complete and valid - Return None if the command is incomplete - Raise SyntaxError, ValueError or OverflowError if the command is a syntax error (OverflowError and ValueError can be produced by malformed literals). """ return _maybe_compile(self.compiler, source, filename, symbol)
agpl-3.0
LeartS/odoo
addons/email_template/wizard/mail_compose_message.py
38
11212
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> # ############################################################################## from openerp import tools, SUPERUSER_ID from openerp.osv import osv, fields def _reopen(self, res_id, model): return {'type': 'ir.actions.act_window', 'view_mode': 'form', 'view_type': 'form', 'res_id': res_id, 'res_model': self._name, 'target': 'new', # save original model in context, because selecting the list of available # templates requires a model in context 'context': { 'default_model': model, }, } class mail_compose_message(osv.TransientModel): _inherit = 'mail.compose.message' def default_get(self, cr, uid, fields, context=None): """ Override to pre-fill the data when having a template in single-email mode and not going through the view: the on_change is not called in that case. """ if context is None: context = {} res = super(mail_compose_message, self).default_get(cr, uid, fields, context=context) if res.get('composition_mode') != 'mass_mail' and context.get('default_template_id') and res.get('model') and res.get('res_id'): res.update( self.onchange_template_id( cr, uid, [], context['default_template_id'], res.get('composition_mode'), res.get('model'), res.get('res_id'), context=context )['value'] ) return res _columns = { 'template_id': fields.many2one('email.template', 'Use template', select=True), } def send_mail(self, cr, uid, ids, context=None): """ Override of send_mail to duplicate attachments linked to the email.template. Indeed, basic mail.compose.message wizard duplicates attachments in mass mailing mode. But in 'single post' mode, attachments of an email template also have to be duplicated to avoid changing their ownership. """ if context is None: context = {} wizard_context = dict(context) for wizard in self.browse(cr, uid, ids, context=context): if wizard.template_id: wizard_context['mail_notify_user_signature'] = False # template user_signature is added when generating body_html wizard_context['mail_auto_delete'] = wizard.template_id.auto_delete # mass mailing: use template auto_delete value -> note, for emails mass mailing only if not wizard.attachment_ids or wizard.composition_mode == 'mass_mail' or not wizard.template_id: continue new_attachment_ids = [] for attachment in wizard.attachment_ids: if attachment in wizard.template_id.attachment_ids: new_attachment_ids.append(self.pool.get('ir.attachment').copy(cr, uid, attachment.id, {'res_model': 'mail.compose.message', 'res_id': wizard.id}, context=context)) else: new_attachment_ids.append(attachment.id) self.write(cr, uid, wizard.id, {'attachment_ids': [(6, 0, new_attachment_ids)]}, context=context) return super(mail_compose_message, self).send_mail(cr, uid, ids, context=wizard_context) def onchange_template_id(self, cr, uid, ids, template_id, composition_mode, model, res_id, context=None): """ - mass_mailing: we cannot render, so return the template values - normal mode: return rendered values """ if template_id and composition_mode == 'mass_mail': fields = ['subject', 'body_html', 'email_from', 'reply_to', 'mail_server_id'] template = self.pool['email.template'].browse(cr, uid, template_id, context=context) values = dict((field, getattr(template, field)) for field in fields if getattr(template, field)) if template.attachment_ids: values['attachment_ids'] = [att.id for att in template.attachment_ids] if template.mail_server_id: values['mail_server_id'] = template.mail_server_id.id if template.user_signature and 'body_html' in values: signature = self.pool.get('res.users').browse(cr, uid, uid, context).signature values['body_html'] = tools.append_content_to_html(values['body_html'], signature) elif template_id: values = self.generate_email_for_composer_batch(cr, uid, template_id, [res_id], context=context)[res_id] # transform attachments into attachment_ids; not attached to the document because this will # be done further in the posting process, allowing to clean database if email not send ir_attach_obj = self.pool.get('ir.attachment') for attach_fname, attach_datas in values.pop('attachments', []): data_attach = { 'name': attach_fname, 'datas': attach_datas, 'datas_fname': attach_fname, 'res_model': 'mail.compose.message', 'res_id': 0, 'type': 'binary', # override default_type from context, possibly meant for another model! } values.setdefault('attachment_ids', list()).append(ir_attach_obj.create(cr, uid, data_attach, context=context)) else: values = self.default_get(cr, uid, ['subject', 'body', 'email_from', 'email_to', 'email_cc', 'partner_to', 'reply_to', 'attachment_ids', 'mail_server_id'], context=context) if values.get('body_html'): values['body'] = values.pop('body_html') return {'value': values} def save_as_template(self, cr, uid, ids, context=None): """ hit save as template button: current form value will be a new template attached to the current document. """ email_template = self.pool.get('email.template') ir_model_pool = self.pool.get('ir.model') for record in self.browse(cr, uid, ids, context=context): model_ids = ir_model_pool.search(cr, uid, [('model', '=', record.model)], context=context) model_id = model_ids and model_ids[0] or False model_name = '' if model_id: model_name = ir_model_pool.browse(cr, uid, model_id, context=context).name template_name = "%s: %s" % (model_name, tools.ustr(record.subject)) values = { 'name': template_name, 'subject': record.subject or False, 'body_html': record.body or False, 'model_id': model_id or False, 'attachment_ids': [(6, 0, [att.id for att in record.attachment_ids])], } template_id = email_template.create(cr, uid, values, context=context) # generate the saved template template_values = record.onchange_template_id(template_id, record.composition_mode, record.model, record.res_id)['value'] template_values['template_id'] = template_id record.write(template_values) return _reopen(self, record.id, record.model) #------------------------------------------------------ # Wizard validation and send #------------------------------------------------------ def generate_email_for_composer_batch(self, cr, uid, template_id, res_ids, context=None, fields=None): """ Call email_template.generate_email(), get fields relevant for mail.compose.message, transform email_cc and email_to into partner_ids """ if context is None: context = {} if fields is None: fields = ['subject', 'body_html', 'email_from', 'email_to', 'partner_to', 'email_cc', 'reply_to', 'attachment_ids', 'mail_server_id'] returned_fields = fields + ['partner_ids', 'attachments'] values = dict.fromkeys(res_ids, False) ctx = dict(context, tpl_partners_only=True) template_values = self.pool.get('email.template').generate_email_batch(cr, uid, template_id, res_ids, fields=fields, context=ctx) for res_id in res_ids: res_id_values = dict((field, template_values[res_id][field]) for field in returned_fields if template_values[res_id].get(field)) res_id_values['body'] = res_id_values.pop('body_html', '') values[res_id] = res_id_values return values def render_message_batch(self, cr, uid, wizard, res_ids, context=None): """ Override to handle templates. """ # generate composer values composer_values = super(mail_compose_message, self).render_message_batch(cr, uid, wizard, res_ids, context) # generate template-based values if wizard.template_id: template_values = self.generate_email_for_composer_batch( cr, uid, wizard.template_id.id, res_ids, fields=['email_to', 'partner_to', 'email_cc', 'attachment_ids', 'mail_server_id'], context=context) else: template_values = {} for res_id in res_ids: if template_values.get(res_id): # recipients are managed by the template composer_values[res_id].pop('partner_ids') composer_values[res_id].pop('email_to') composer_values[res_id].pop('email_cc') # remove attachments from template values as they should not be rendered template_values[res_id].pop('attachment_ids', None) else: template_values[res_id] = dict() # update template values by composer values template_values[res_id].update(composer_values[res_id]) return template_values def render_template_batch(self, cr, uid, template, model, res_ids, context=None, post_process=False): return self.pool.get('email.template').render_template_batch(cr, uid, template, model, res_ids, context=context, post_process=post_process) # Compatibility methods def generate_email_for_composer(self, cr, uid, template_id, res_id, context=None): return self.generate_email_for_composer_batch(cr, uid, template_id, [res_id], context)[res_id] # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
zeptonaut/catapult
dashboard/dashboard/update_bug_with_results_test.py
4
37355
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import json import unittest import mock import webapp2 import webtest from dashboard import rietveld_service from dashboard import testing_common from dashboard import update_bug_with_results from dashboard import utils from dashboard.models import anomaly from dashboard.models import bug_data from dashboard.models import try_job # Bisect log with multiple potential culprits with different authors. _BISECT_LOG_MULTI_OWNER = """ @@@STEP_CURSOR Results@@@ @@@STEP_STARTED@@@ ===== BISECT JOB RESULTS ===== Status: Positive Test Command: python tools/perf/run_benchmark -v --browser=release sunspider Test Metric: Total/Total Relative Change: 1.23% (+/-1.26%) Estimated Confidence: 99.9% ===== SUSPECTED CL(s) ===== Subject : Subject 1 Author : sullivan@google.com Link : http://src.chromium.org/viewvc/chrome?view=revision&revision=20798 Date : Sat, 22 Jun 2013 00:59:35 +0000 Subject : Subject 2 Author : prasadv@google.com Link : http://src.chromium.org/viewvc/chrome?view=revision&revision=20798 Date : Sat, 22 Jun 2013 00:57:48 +0000 Subject : Subject 3 Author : qyearsley@google.com Link : http://src.chromium.org/viewvc/chrome?view=revision&revision=20798 Date : Sat, 22 Jun 2013 00:55:52 +0000 """ # Bisect log with multiple potential culprits but same Author. _BISECT_LOG_MULTI_SAME_OWNER = """ @@@STEP_CURSOR Results@@@ @@@STEP_STARTED@@@ ===== BISECT JOB RESULTS ===== Status: Positive Test Command: python tools/perf/run_benchmark -v --browser=release sunspider Test Metric: Total/Total Relative Change: 1.23% (+/-1.26%) Estimated Confidence: 99.9% ===== SUSPECTED CL(s) ===== Subject : Subject 1 Author : sullivan@google.com Link : http://src.chromium.org/viewvc/chrome?view=revision&revision=20798 Date : Sat, 22 Jun 2013 00:59:35 +0000 Subject : Subject 2 Author : sullivan@google.com Link : http://src.chromium.org/viewvc/chrome?view=revision&revision=20798 Date : Sat, 22 Jun 2013 00:57:48 +0000:55:52 +0000 """ # Bisect log with single potential culprits. _BISECT_LOG_SINGLE_OWNER = """ @@@STEP_CURSOR Results@@@ @@@STEP_STARTED@@@ ===== BISECT JOB RESULTS ===== Status: Positive Test Command: python tools/perf/run_benchmark -v --browser=release sunspider Test Metric: Total/Total Relative Change: 1.23% (+/-1.26%) Estimated Confidence: 100% ===== SUSPECTED CL(s) ===== Subject : Subject 1 Author : sullivan@google.com Link : http://src.chromium.org/viewvc/chrome?view=revision&revision=20798 Commit : d6432657771a9fd720179d8c3dd64c8daee025c7 Date : Sat, 22 Jun 2013 00:59:35 +0000 """ _EXPECTED_BISECT_LOG_SINGLE_OWNER = """ ===== BISECT JOB RESULTS ===== Status: Positive Test Command: python tools/perf/run_benchmark -v --browser=release sunspider Test Metric: Total/Total Relative Change: 1.23% (+/-1.26%) Estimated Confidence: 100% ===== SUSPECTED CL(s) ===== Subject : Subject 1 Author : sullivan@google.com Link : http://src.chromium.org/viewvc/chrome?view=revision&revision=20798 Commit : d6432657771a9fd720179d8c3dd64c8daee025c7 Date : Sat, 22 Jun 2013 00:59:35 +0000""" _EXPECTED_BISECT_RESULTS_ON_BUG = """ ==== Auto-CCing suspected CL author sullivan@google.com ==== Hi sullivan@google.com, the bisect results pointed to your CL below as possibly causing a regression. Please have a look at this info and see whether your CL be related. Bisect job status: Completed Bisect job ran on: win_perf_bisect ===== BISECT JOB RESULTS ===== Status: Positive Test Command: python tools/perf/run_benchmark -v --browser=release sunspider Test Metric: Total/Total\nRelative Change: 1.23% (+/-1.26%) Estimated Confidence: 100% ===== SUSPECTED CL(s) ===== Subject : Subject 1 Author : sullivan@google.com Link : http://src.chromium.org/viewvc/chrome?view=revision&revision=20798 Commit : d6432657771a9fd720179d8c3dd64c8daee025c7 Date : Sat, 22 Jun 2013 00:59:35 +0000 Buildbot stdio: http://build.chromium.org/513 Job details: https://test-rietveld.appspot.com/200037 """ _BISECT_LOG_FAILED_REVISION = """ @@@STEP_CURSOR Results@@@ @@@STEP_STARTED@@@ ===== BISECT JOB RESULTS ===== Status: Positive Test Command: python tools/perf/run_benchmark -v --browser=release sunspider Test Metric: Total/Total Relative Change: 1.23% (+/-1.26%) Estimated Confidence: 99.9% ===== SUSPECTED CL(s) ===== Subject : Subject 1 Author : sullivan@google.com Link : http://src.chromium.org/viewvc/chrome?view=revision&revision=20799 Commit : a80773bb263a9706cc8ee4e3f336d2d3d28fadd8 Date : Sat, 22 Jun 2013 00:59:35 +0000 """ _BISECT_LOG_PARTIAL_RESULT = """ ===== PARTIAL RESULTS ===== Depot Commit SHA Mean Std. Error State chromium 282472 91730.00 +-0.00 Bad chromium 282469 92973.00 +-0.00 Good chromium 282460 93468.00 +-0.00 Good """ _EXPECTED_BISECT_LOG_PARTIAL_RESULT = u"""Bisect job status: Failure with \ partial results Bisect job ran on: win_perf_bisect Completed 1/2 builds. Run time: 724/720 minutes. Bisect timed out! Try again with a smaller revision range. Failed steps: slave_steps, Working on def ===== PARTIAL RESULTS ===== Depot Commit SHA Mean Std. Error State chromium 282472 91730.00 +-0.00 Bad chromium 282469 92973.00 +-0.00 Good chromium 282460 93468.00 +-0.00 Good Buildbot stdio: http://build.chromium.org/builders/515 Job details: https://test-rietveld.appspot.com/200039 """ _REVISION_RESPONSE = """ <html xmlns=....> <head><title>[chrome] Revision 207985</title></head><body><table>.... <tr align="left"> <th>Log Message:</th> <td> Message....</td> &gt; &gt; Review URL: <a href="https://codereview.chromium.org/81533002">\ https://codereview.chromium.org/81533002</a> &gt; &gt; Review URL: <a href="https://codereview.chromium.org/96073002">\ https://codereview.chromium.org/96073002</a> Review URL: <a href="https://codereview.chromium.org/17504006">\ https://codereview.chromium.org/96363002</a></pre></td></tr></table>....</body> </html> """ _PERF_TEST_CONFIG = """config = { 'command': 'tools/perf/run_benchmark -v --browser=release\ dromaeo.jslibstylejquery --profiler=trace', 'good_revision': '215806', 'bad_revision': '215828', 'repeat_count': '1', 'max_time_minutes': '120', 'truncate_percent': '0' }""" _PERF_LOG_EXPECTED_TITLE_1 = 'With Patch - Profiler Data[0]' _PERF_LOG_EXPECTED_TITLE_2 = 'Without Patch - Profiler Data[0]' _PERF_LOG_EXPECTED_PROFILER_LINK1 = ( 'https://console.developers.google.com/m/cloudstorage/b/chrome-telemetry/o/' 'profiler-file-id_0-2014-11-27_14-08-5560487.json') _PERF_LOG_EXPECTED_PROFILER_LINK2 = ( 'https://console.developers.google.com/m/cloudstorage/b/chrome-telemetry/o/' 'profiler-file-id_0-2014-11-27_14-10-1644780.json') _PERF_LOG_EXPECTED_HTML_LINK = ( 'http://storage.googleapis.com/chromium-telemetry/html-results/' 'results-2014-11-27_14-10-21') _PERF_LOG_WITH_RESULTS = """ @@@STEP_CLOSED@@@ @@@STEP_LINK@HTML Results@%s@@@ @@@STEP_LINK@%s@%s@@@ @@@STEP_LINK@%s@%s@@@ """ % (_PERF_LOG_EXPECTED_HTML_LINK, _PERF_LOG_EXPECTED_TITLE_1, _PERF_LOG_EXPECTED_PROFILER_LINK1, _PERF_LOG_EXPECTED_TITLE_2, _PERF_LOG_EXPECTED_PROFILER_LINK2) _ISSUE_RESPONSE = """ { "description": "Issue Description.", "cc": [ "chromium-reviews@chromium.org", "cc-bugs@chromium.org", "sullivan@google.com" ], "reviewers": [ "prasadv@google.com" ], "owner_email": "sullivan@google.com", "private": false, "base_url": "svn://chrome-svn/chrome/trunk/src/", "owner":"sullivan", "subject":"Issue Subject", "created":"2013-06-20 22:23:27.227150", "patchsets":[1,21001,29001], "modified":"2013-06-22 00:59:38.530190", "closed":true, "commit":false, "issue":17504006 } """ _BISECT_LOG_INFRA_FAILURE = 'Failed to produce build' # Globals that are set in mock functions and then checked in tests. _TEST_RECEIEVED_EMAIL_RESULTS = None _TEST_RECEIVED_EMAIL = None def _MockGetJobStatus(job): id_to_response_map = { # Complete '1234567': { 'result': 'SUCCESS', 'result_details': { 'buildername': 'Fake_Bot', }, 'url': 'http://build.chromium.org/bb1234567', 'status': 'COMPLETED', }, # In progress '11111': { 'result_details': { 'buildername': 'Fake_Bot', }, 'url': 'http://build.chromium.org/bb11111', 'status': 'STARTED', }, # Failed '66666': { 'result': 'FAILURE', 'result_details': { 'buildername': 'Fake_Bot', }, 'url': 'http://build.chromium.org/bb66666', 'status': 'COMPLETED', }, } return id_to_response_map.get(str(job.buildbucket_job_id)) def _MockFetch(url=None): url_to_response_map = { 'https://test-rietveld.appspot.com/api/200034/1': [ 200, json.dumps({'try_job_results': [{ 'result': '0', 'builder': 'win_perf_bisect', 'url': 'http://build.chromium.org/508'}]}) ], 'https://test-rietveld.appspot.com/api/302304/1': [ 200, json.dumps({'try_job_results': [{ 'result': '2', 'builder': 'win_perf_bisect', 'url': 'http://build.chromium.org/509'}]}) ], 'https://test-rietveld.appspot.com/api/100001/1': [ 200, json.dumps({'try_job_results': [{ 'result': '6', 'builder': 'win_perf_bisect', 'url': 'http://build.chromium.org/510'}]}) ], 'https://test-rietveld.appspot.com/api/200035/1': [ 200, json.dumps({'try_job_results': [{ 'result': '0', 'builder': 'win_perf_bisect', 'url': 'http://build.chromium.org/511'}]}) ], 'https://test-rietveld.appspot.com/api/200036/1': [ 200, json.dumps({'try_job_results': [{ 'result': '0', 'builder': 'win_perf_bisect', 'url': 'http://build.chromium.org/512'}]}) ], 'https://test-rietveld.appspot.com/api/200037/1': [ 200, json.dumps({'try_job_results': [{ 'result': '0', 'builder': 'win_perf_bisect', 'url': 'http://build.chromium.org/513'}]}) ], 'https://test-rietveld.appspot.com/api/200038/1': [ 200, json.dumps({'try_job_results': [{ 'result': '0', 'builder': 'win_perf_bisect', 'url': 'http://build.chromium.org/514'}]}) ], 'https://test-rietveld.appspot.com/api/200039/1': [ 200, json.dumps({'try_job_results': [{ 'result': '0', 'builder': 'win_perf_bisect', 'url': 'http://build.chromium.org/builders/515'}]}) ], 'http://build.chromium.org/json/builders/515': [ 200, json.dumps({ 'steps': [{'name': 'Working on abc', 'results': [0]}, {'name': 'Working on def', 'results': [2]}], 'times': [1411501756.293642, 1411545237.89049], 'text': ['failed', 'slave_steps', 'failed', 'Working on def']}) ], 'http://build.chromium.org/bb1234567/steps/Results/logs/stdio/text': [ 200, _BISECT_LOG_SINGLE_OWNER ], 'http://build.chromium.org/bb66666': [ 200, json.dumps({ 'steps': [{'name': 'Working on abc', 'results': [0]}, {'name': 'Working on def', 'results': [2]}], 'times': [1411501756.293642, 1411545237.89049], 'text': ['failed', 'slave_steps', 'failed', 'Working on def']}) ], ('http://build.chromium.org/builders/bb66666' '/steps/Results/logs/stdio/text'): [ 404, '' ], 'http://build.chromium.org/json/builders/516': [ 200, json.dumps({'steps': [{'name': 'gclient', 'results': [2]}]}) ], 'http://src.chromium.org/viewvc/chrome?view=revision&revision=20798': [ 200, _REVISION_RESPONSE ], 'http://src.chromium.org/viewvc/chrome?view=revision&revision=20799': [ 200, 'REVISION REQUEST FAILED!' ], 'https://codereview.chromium.org/api/17504006': [ 200, json.dumps(json.loads(_ISSUE_RESPONSE)) ], 'http://build.chromium.org/508/steps/Results/logs/stdio/text': [ 200, '===== BISECT JOB RESULTS =====' ], 'http://build.chromium.org/509/steps/Results/logs/stdio/text': [ 200, 'BISECT FAILURE! ' ], 'http://build.chromium.org/511/steps/Results/logs/stdio/text': [ 200, _BISECT_LOG_MULTI_OWNER ], 'http://build.chromium.org/512/steps/Results/logs/stdio/text': [ 200, _BISECT_LOG_MULTI_SAME_OWNER ], 'http://build.chromium.org/513/steps/Results/logs/stdio/text': [ 200, _BISECT_LOG_SINGLE_OWNER ], 'http://build.chromium.org/514/steps/Results/logs/stdio/text': [ 200, _BISECT_LOG_FAILED_REVISION ], 'http://build.chromium.org/builders/515/steps/Results/logs/stdio/text': [ 404, '' ], 'http://build.chromium.org/builders/515/steps/Working%20on%20abc/logs/' 'stdio/text': [ 200, _BISECT_LOG_PARTIAL_RESULT ], 'http://build.chromium.org/builders/516/steps/slave_steps/logs/stdio/' 'text': [ 200, _BISECT_LOG_INFRA_FAILURE ], 'http://build.chromium.org/508/steps/Running%20Bisection/logs/stdio/' 'text': [ 200, _PERF_LOG_WITH_RESULTS ], 'http://build.chromium.org/511/steps/Running%20Bisection/logs/stdio/' 'text': [ 200, '' ], } if url not in url_to_response_map: assert False, 'Bad url %s' % url response_code = url_to_response_map[url][0] response = url_to_response_map[url][1] return testing_common.FakeResponseObject(response_code, response) def _MockSendPerfTryJobEmail(_, results): global _TEST_RECEIEVED_EMAIL_RESULTS _TEST_RECEIEVED_EMAIL_RESULTS = results def _MockSendMail(**kwargs): global _TEST_RECEIVED_EMAIL _TEST_RECEIVED_EMAIL = kwargs class UpdateBugWithResultsTest(testing_common.TestCase): def setUp(self): super(UpdateBugWithResultsTest, self).setUp() app = webapp2.WSGIApplication([( '/update_bug_with_results', update_bug_with_results.UpdateBugWithResultsHandler)]) self.testapp = webtest.TestApp(app) self._AddRietveldConfig() # Calling the real Credentials function doesn't work in the test # environment; using no credentials in the tests works because the requests # to the issue tracker are mocked out as well. rietveld_service.Credentials = mock.MagicMock(return_value=None) def _AddRietveldConfig(self): """Adds a RietveldConfig entity to the datastore. This is used in order to get the Rietveld URL when requests are made to the handler in te tests below. In the real datastore, the RietveldConfig entity would contain credentials. """ rietveld_service.RietveldConfig( id='default_rietveld_config', client_email='sullivan@google.com', service_account_key='Fake Account Key', server_url='https://test-rietveld.appspot.com', internal_server_url='https://test-rietveld.appspot.com').put() @mock.patch( 'google.appengine.api.urlfetch.fetch', mock.MagicMock(side_effect=_MockFetch)) @mock.patch.object( update_bug_with_results.issue_tracker_service, 'IssueTrackerService', mock.MagicMock()) @mock.patch.object( update_bug_with_results.buildbucket_service, 'GetJobStatus', _MockGetJobStatus) def testGet(self): # Put succeeded, failed, and not yet finished jobs in the datastore. try_job.TryJob( bug_id=12345, rietveld_issue_id=200034, rietveld_patchset_id=1, status='started', bot='win_perf').put() try_job.TryJob( bug_id=54321, rietveld_issue_id=302304, rietveld_patchset_id=1, status='started', bot='win_perf').put() try_job.TryJob( bug_id=99999, rietveld_issue_id=100001, rietveld_patchset_id=1, status='started', bot='win_perf').put() try_job.TryJob( bug_id=77777, buildbucket_job_id='1234567', use_buildbucket=True, status='started', bot='win_perf').put() # Create bug. bug_data.Bug(id=12345).put() bug_data.Bug(id=54321).put() bug_data.Bug(id=99999).put() bug_data.Bug(id=77777).put() self.testapp.get('/update_bug_with_results') pending_jobs = try_job.TryJob.query().fetch() # Expects a failed and not yet finished bisect job to be in datastore. self.assertEqual(3, len(pending_jobs)) self.assertEqual(54321, pending_jobs[0].bug_id) self.assertEqual('failed', pending_jobs[0].status) self.assertEqual(99999, pending_jobs[1].bug_id) self.assertEqual(77777, pending_jobs[2].bug_id) self.assertEqual('started', pending_jobs[1].status) self.assertEqual('started', pending_jobs[2].status) self.assertEqual('bisect', pending_jobs[0].job_type) self.assertEqual('bisect', pending_jobs[1].job_type) self.assertEqual('bisect', pending_jobs[2].job_type) @mock.patch( 'google.appengine.api.urlfetch.fetch', mock.MagicMock(side_effect=_MockFetch)) @mock.patch.object( update_bug_with_results.issue_tracker_service, 'IssueTrackerService', mock.MagicMock()) def testCreateTryJob_WithoutExistingBug(self): # Put succeeded job in the datastore. try_job.TryJob( bug_id=12345, rietveld_issue_id=200034, rietveld_patchset_id=1, status='started', bot='win_perf').put() self.testapp.get('/update_bug_with_results') pending_jobs = try_job.TryJob.query().fetch() # Expects job to finish. self.assertEqual(0, len(pending_jobs)) @mock.patch( 'google.appengine.api.urlfetch.fetch', mock.MagicMock(side_effect=_MockFetch)) @mock.patch.object( update_bug_with_results.issue_tracker_service.IssueTrackerService, 'AddBugComment', mock.MagicMock(return_value=False)) @mock.patch('logging.error') def testGet_FailsToUpdateBug_LogsErrorAndMovesOn(self, mock_logging_error): # Put a successful job and a failed job with partial results. # Note that AddBugComment is mocked to always returns false, which # simulates failing to post results to the issue tracker for all bugs. try_job.TryJob( bug_id=12345, rietveld_issue_id=200034, rietveld_patchset_id=1, status='started', bot='win_perf').put() try_job.TryJob( bug_id=54321, rietveld_issue_id=200039, rietveld_patchset_id=1, status='started', bot='win_perf').put() bug_data.Bug(id=12345).put() bug_data.Bug(id=54321).put() self.testapp.get('/update_bug_with_results') # Two errors should be logged. self.assertEqual(2, mock_logging_error.call_count) mock_logging_error.assert_called_with( 'Caught Exception %s: %s', 'BugUpdateFailure', mock.ANY) # The pending jobs should still be there. pending_jobs = try_job.TryJob.query().fetch() self.assertEqual(2, len(pending_jobs)) self.assertEqual('started', pending_jobs[0].status) self.assertEqual('started', pending_jobs[1].status) @mock.patch( 'google.appengine.api.urlfetch.fetch', mock.MagicMock(side_effect=_MockFetch)) @mock.patch.object( update_bug_with_results.issue_tracker_service.IssueTrackerService, 'AddBugComment') def testGet_BisectJobWithPartialResults(self, mock_update_bug): # Put failed job in the datastore. try_job.TryJob( bug_id=54321, rietveld_issue_id=200039, rietveld_patchset_id=1, status='started', bot='win_perf').put() # Create bug. bug_data.Bug(id=54321).put() self.testapp.get('/update_bug_with_results') pending_jobs = try_job.TryJob.query().fetch() self.assertEqual(1, len(pending_jobs)) self.assertEqual('failed', pending_jobs[0].status) mock_update_bug.assert_called_once_with( 54321, _EXPECTED_BISECT_LOG_PARTIAL_RESULT, labels=None) @mock.patch( 'google.appengine.api.urlfetch.fetch', mock.MagicMock(side_effect=_MockFetch)) @mock.patch.object( update_bug_with_results.issue_tracker_service.IssueTrackerService, 'AddBugComment') def testGet_BisectCulpritHasMultipleAuthors_NoneCCd(self, mock_update_bug): # When a bisect finds multiple culprits for a perf regression, # owners of CLs shouldn't be cc'ed on issue update. try_job.TryJob( bug_id=12345, rietveld_issue_id=200035, rietveld_patchset_id=1, status='started', bot='win_perf').put() bug_data.Bug(id=12345).put() self.testapp.get('/update_bug_with_results') mock_update_bug.assert_called_once_with( mock.ANY, mock.ANY, cc_list=[], merge_issue=None, labels=None, owner=None) pending_jobs = try_job.TryJob.query().fetch() self.assertEqual(0, len(pending_jobs)) @mock.patch( 'google.appengine.api.urlfetch.fetch', mock.MagicMock(side_effect=_MockFetch)) @mock.patch.object( update_bug_with_results.issue_tracker_service.IssueTrackerService, 'AddBugComment') def testGet_MultipleCulpritsSameAuthor_AssignsAuthor(self, mock_update_bug): # When a bisect finds multiple culprits by same Author for a perf # regression, owner of CLs should be cc'ed. try_job.TryJob( bug_id=12345, rietveld_issue_id=200036, rietveld_patchset_id=1, status='started', bot='win_perf').put() bug_data.Bug(id=12345).put() self.testapp.get('/update_bug_with_results') mock_update_bug.assert_called_once_with( mock.ANY, mock.ANY, cc_list=['sullivan@google.com', 'prasadv@google.com'], merge_issue=None, labels=None, owner='sullivan@google.com') pending_jobs = try_job.TryJob.query().fetch() self.assertEqual(0, len(pending_jobs)) @mock.patch( 'google.appengine.api.urlfetch.fetch', mock.MagicMock(side_effect=_MockFetch)) @mock.patch.object( update_bug_with_results.issue_tracker_service.IssueTrackerService, 'AddBugComment') def testGet_BisectCulpritHasSingleAuthor_AssignsAuthor(self, mock_update_bug): # When a bisect finds a single culprit for a perf regression, # author and reviewer of the CL should be cc'ed on issue update. try_job.TryJob( bug_id=12345, rietveld_issue_id=200037, rietveld_patchset_id=1, status='started', bot='win_perf').put() # Create bug. bug_data.Bug(id=12345).put() self.testapp.get('/update_bug_with_results') mock_update_bug.assert_called_once_with( mock.ANY, mock.ANY, cc_list=['sullivan@google.com', 'prasadv@google.com'], merge_issue=None, labels=None, owner='sullivan@google.com') pending_jobs = try_job.TryJob.query().fetch() self.assertEqual(0, len(pending_jobs)) def testBeautifyContent(self): # Remove buildbot annotations (@@@), leading and trailing spaces from bisect # results log. actual_output = update_bug_with_results._BeautifyContent( _BISECT_LOG_SINGLE_OWNER) self.assertNotIn('@@@', actual_output) for line in actual_output.split('\n'): self.assertFalse(line.startswith(' ')) self.assertFalse(line.endswith(' ')) self.assertEqual(_EXPECTED_BISECT_LOG_SINGLE_OWNER, actual_output) @mock.patch( 'google.appengine.api.urlfetch.fetch', mock.MagicMock(side_effect=_MockFetch)) @mock.patch.object( update_bug_with_results.issue_tracker_service.IssueTrackerService, 'AddBugComment') def testGet_FailedRevisionResponse(self, mock_add_bug): # When a Rietveld CL link fails to respond, only update CL owner in CC list. try_job.TryJob( bug_id=12345, rietveld_issue_id=200038, rietveld_patchset_id=1, status='started', bot='win_perf').put() # Create bug. bug_data.Bug(id=12345).put() self.testapp.get('/update_bug_with_results') mock_add_bug.assert_called_once_with(mock.ANY, mock.ANY, cc_list=['sullivan@google.com'], merge_issue=None, labels=None, owner='sullivan@google.com') pending_jobs = try_job.TryJob.query().fetch() self.assertEqual(0, len(pending_jobs)) @mock.patch( 'google.appengine.api.urlfetch.fetch', mock.MagicMock(side_effect=_MockFetch)) @mock.patch.object( update_bug_with_results.issue_tracker_service.IssueTrackerService, 'AddBugComment') def testGetForMergeIssue(self, mock_update_bug): # When there exists a bug with the same revision (commit hash), # mark bug as duplicate and merge current issue into that. try_job.TryJob( bug_id=12345, rietveld_issue_id=200037, rietveld_patchset_id=1, status='started', bot='win_perf').put() try_job.TryJob( bug_id=54321, rietveld_issue_id=200037, rietveld_patchset_id=1, status='started', bot='win_perf').put() # Create bug. bug_data.Bug(id=12345).put() bug_data.Bug(id=54321).put() self.testapp.get('/update_bug_with_results') # Owners of CLs are not cc'ed for duplicate bugs and the issue should be # marked as duplicate. mock_update_bug.assert_called_with(mock.ANY, mock.ANY, cc_list=[], merge_issue='12345', labels=None, owner=None) pending_jobs = try_job.TryJob.query().fetch() self.assertEqual(0, len(pending_jobs)) # Add anomalies. test_keys = map(utils.TestKey, [ 'ChromiumGPU/linux-release/scrolling-benchmark/first_paint', 'ChromiumGPU/linux-release/scrolling-benchmark/mean_frame_time']) anomaly.Anomaly( start_revision=9990, end_revision=9997, test=test_keys[0], median_before_anomaly=100, median_after_anomaly=200, sheriff=None, bug_id=12345).put() anomaly.Anomaly( start_revision=9990, end_revision=9996, test=test_keys[0], median_before_anomaly=100, median_after_anomaly=200, sheriff=None, bug_id=54321).put() # Map anomalies to base(dest_bug_id) bug. update_bug_with_results._MapAnomaliesToMergeIntoBug( dest_bug_id=12345, source_bug_id=54321) anomalies = anomaly.Anomaly.query( anomaly.Anomaly.bug_id == int(54321)).fetch() self.assertEqual(0, len(anomalies)) def testAnomalyMappingForMergeIssue(self): # Add anomalies. test_keys = map(utils.TestKey, [ 'ChromiumGPU/linux-release/scrolling-benchmark/first_paint', 'ChromiumGPU/linux-release/scrolling-benchmark/mean_frame_time']) anomaly.Anomaly( start_revision=9990, end_revision=9997, test=test_keys[0], median_before_anomaly=100, median_after_anomaly=200, sheriff=None, bug_id=12345).put() anomaly.Anomaly( start_revision=9990, end_revision=9996, test=test_keys[0], median_before_anomaly=100, median_after_anomaly=200, sheriff=None, bug_id=54321).put() # Map anomalies to base(dest_bug_id) bug. update_bug_with_results._MapAnomaliesToMergeIntoBug( dest_bug_id=12345, source_bug_id=54321) anomalies = anomaly.Anomaly.query( anomaly.Anomaly.bug_id == int(54321)).fetch() self.assertEqual(0, len(anomalies)) @mock.patch( 'google.appengine.api.urlfetch.fetch', mock.MagicMock(side_effect=_MockFetch)) @mock.patch.object(update_bug_with_results, '_LogBisectInfraFailure') def testCheckBisectBotForInfraBotFailure(self, log_bisect_failure_mock): bug_id = 516 build_data = { 'steps': [{'name': 'A', 'results': [0]}, {'name': 'B', 'results': [2]}], 'times': [1411501756, 1411545237], } build_url = 'http://build.chromium.org/builders/516' update_bug_with_results._CheckBisectBotForInfraFailure( bug_id, build_data, build_url) log_bisect_failure_mock.assert_called_with( bug_id, 'Bot failure.', mock.ANY) @mock.patch( 'google.appengine.api.urlfetch.fetch', mock.MagicMock(side_effect=_MockFetch)) @mock.patch.object(update_bug_with_results, '_LogBisectInfraFailure') def testCheckBisectBotForInfraBuildFailure(self, log_bisect_failure_mock): bug_id = 516 build_data = { 'steps': [{'name': 'A', 'results': [0]}, {'name': 'slave_steps', 'results': [2]}], 'times': [1411500000, 1411501000], } build_url = 'http://build.chromium.org/builders/516' update_bug_with_results._CheckBisectBotForInfraFailure( bug_id, build_data, build_url) log_bisect_failure_mock.assert_called_with( bug_id, 'Build failure.', mock.ANY) @mock.patch( 'google.appengine.api.urlfetch.fetch', mock.MagicMock(side_effect=_MockFetch)) @mock.patch.object( update_bug_with_results.issue_tracker_service.IssueTrackerService, 'AddBugComment') def testBotInfoInBisectResults(self, mock_update_bug): # When a bisect finds multiple culprits by same Author for a perf # regression, owner of CLs should be cc'ed. try_job.TryJob( bug_id=12345, rietveld_issue_id=200037, rietveld_patchset_id=1, status='started', bot='win_perf').put() # Create bug. bug_data.Bug(id=12345).put() self.testapp.get('/update_bug_with_results') mock_update_bug.assert_called_once_with( 12345, _EXPECTED_BISECT_RESULTS_ON_BUG, cc_list=['sullivan@google.com', 'prasadv@google.com'], merge_issue=None, labels=None, owner='sullivan@google.com') @mock.patch( 'google.appengine.api.urlfetch.fetch', mock.MagicMock(side_effect=_MockFetch)) @mock.patch.object( update_bug_with_results, '_SendPerfTryJobEmail', mock.MagicMock(side_effect=_MockSendPerfTryJobEmail)) @mock.patch.object( update_bug_with_results.issue_tracker_service, 'IssueTrackerService', mock.MagicMock()) def testGet_PerfTryJob(self): try_job.TryJob( rietveld_issue_id=200034, rietveld_patchset_id=1, status='started', bot='win_perf', email='just@atestemail.com', job_type='perf-try', config=_PERF_TEST_CONFIG).put() global _TEST_RECEIEVED_EMAIL_RESULTS _TEST_RECEIEVED_EMAIL_RESULTS = None self.testapp.get('/update_bug_with_results') results = _TEST_RECEIEVED_EMAIL_RESULTS self.assertEqual('Completed', results['status']) self.assertEqual(2, len(results['profiler_results'])) self.assertEqual(_PERF_LOG_EXPECTED_HTML_LINK, results['html_results']) self.assertEqual(_PERF_LOG_EXPECTED_TITLE_1, results['profiler_results'][0][0]) self.assertEqual(_PERF_LOG_EXPECTED_PROFILER_LINK1, results['profiler_results'][0][1]) self.assertEqual(_PERF_LOG_EXPECTED_TITLE_2, results['profiler_results'][1][0]) self.assertEqual(_PERF_LOG_EXPECTED_PROFILER_LINK2, results['profiler_results'][1][1]) self.assertEqual('win_perf_bisect', results['bisect_bot']) @mock.patch( 'google.appengine.api.urlfetch.fetch', mock.MagicMock(side_effect=_MockFetch)) @mock.patch.object( update_bug_with_results, '_SendPerfTryJobEmail', mock.MagicMock(side_effect=_MockSendPerfTryJobEmail)) @mock.patch.object( update_bug_with_results.issue_tracker_service, 'IssueTrackerService', mock.MagicMock()) def testGet_PerfTryJobWithInvalidOutput_EmailResultsAreEmpty(self): try_job.TryJob( rietveld_issue_id=200035, rietveld_patchset_id=1, status='started', bot='win_perf', email='just@atestemail.com', job_type='perf-try', config=_PERF_TEST_CONFIG).put() global _TEST_RECEIEVED_EMAIL_RESULTS _TEST_RECEIEVED_EMAIL_RESULTS = None self.testapp.get('/update_bug_with_results') results = _TEST_RECEIEVED_EMAIL_RESULTS self.assertEqual('Completed', results['status']) self.assertEqual(0, len(results['profiler_results'])) self.assertEqual('', results['html_results']) self.assertEqual('win_perf_bisect', results['bisect_bot']) @mock.patch( 'google.appengine.api.urlfetch.fetch', mock.MagicMock(side_effect=_MockFetch)) @mock.patch( 'google.appengine.api.mail.send_mail', mock.MagicMock(side_effect=_MockSendMail)) @mock.patch.object( update_bug_with_results.issue_tracker_service, 'IssueTrackerService', mock.MagicMock()) def testGet_CreatePerfSuccessEmail(self): try_job.TryJob( rietveld_issue_id=200034, rietveld_patchset_id=1, status='started', bot='win_perf', email='just@atestemail.com', job_type='perf-try', config=_PERF_TEST_CONFIG).put() global _TEST_RECEIVED_EMAIL _TEST_RECEIVED_EMAIL = {} self.testapp.get('/update_bug_with_results') self.assertIn('<a href="http://build.chromium.org/508">' 'http://build.chromium.org/508</a>.', _TEST_RECEIVED_EMAIL.get('html')) self.assertIn('With Patch', _TEST_RECEIVED_EMAIL.get('body')) self.assertIn('Without Patch', _TEST_RECEIVED_EMAIL.get('body')) self.assertIn('just@atestemail.com', _TEST_RECEIVED_EMAIL.get('to')) @mock.patch( 'google.appengine.api.urlfetch.fetch', mock.MagicMock(side_effect=_MockFetch)) @mock.patch( 'google.appengine.api.mail.send_mail', mock.MagicMock(side_effect=_MockSendMail)) @mock.patch.object( update_bug_with_results.issue_tracker_service, 'IssueTrackerService', mock.MagicMock()) def testGet_CreatePerfFailureEmail(self): try_job.TryJob( rietveld_issue_id=200034, rietveld_patchset_id=1, status='started', bot='win_perf', email='just@atestemail.com', job_type='perf-try').put() global _TEST_RECEIVED_EMAIL _TEST_RECEIVED_EMAIL = {} self.testapp.get('/update_bug_with_results') self.assertIn('Perf Try Job FAILURE\n<br>', _TEST_RECEIVED_EMAIL.get('html')) self.assertIn('Perf Try Job FAILURE\n\n', _TEST_RECEIVED_EMAIL.get('body')) self.assertIn('just@atestemail.com', _TEST_RECEIVED_EMAIL.get('to')) @mock.patch( 'google.appengine.api.urlfetch.fetch', mock.MagicMock(side_effect=_MockFetch)) @mock.patch.object( update_bug_with_results, '_RietveldIssueJSONURL', mock.MagicMock( return_value='https://test-rietveld.appspot.com/api/200037/1')) @mock.patch.object( update_bug_with_results.issue_tracker_service.IssueTrackerService, 'AddBugComment') def testGet_InternalOnlyTryJob_AddsInternalOnlyBugLabel( self, mock_update_bug): try_job.TryJob( bug_id=12345, rietveld_issue_id=200037, rietveld_patchset_id=1, status='started', bot='win_perf', internal_only=True).put() # Create bug. bug_data.Bug(id=12345).put() self.testapp.get('/update_bug_with_results') mock_update_bug.assert_called_once_with( mock.ANY, mock.ANY, cc_list=mock.ANY, merge_issue=None, labels=['Restrict-View-Google'], owner=mock.ANY) def testValidateAndConvertBuildbucketResponse_NoResults(self): buildbucket_response_scheduled = r"""{ "build": { "status": "SCHEDULED", "id": "9043191319901995952" } }""" with self.assertRaises(update_bug_with_results.UnexpectedJsonError): update_bug_with_results._ValidateAndConvertBuildbucketResponse( json.loads(buildbucket_response_scheduled)) def testValidateAndConvertBuildbucketResponse_Failed(self): buildbucket_response_failed = r"""{ "build": { "status": "COMPLETED", "url": "http://build.chromium.org/linux_perf_bisector/builds/41", "failure_reason": "BUILD_FAILURE", "result": "FAILURE", "id": "9043547105089652704" } }""" converted_response = ( update_bug_with_results._ValidateAndConvertBuildbucketResponse( json.loads(buildbucket_response_failed))) self.assertIn('http', converted_response['url']) self.assertEqual(converted_response['result'], update_bug_with_results.FAILURE) def testValidateAndConvertBuildbucketResponse_Success(self): buildbucket_response_success = r"""{ "build": { "status": "COMPLETED", "url": "http://build.chromium.org/linux_perf_bisector/builds/47", "id": "9043278384371361584", "result": "SUCCESS" } }""" converted_response = ( update_bug_with_results._ValidateAndConvertBuildbucketResponse( json.loads(buildbucket_response_success))) self.assertIn('http', converted_response['url']) self.assertEqual(converted_response['result'], update_bug_with_results.SUCCESS) if __name__ == '__main__': unittest.main()
bsd-3-clause
ParashRahman/Database-Project
Part1/record_violation.py
1
15513
from application import Application from error_checker import ErrorChecker from errors import InvalidDateException import add_person class RecordViolation(Application): def start_application(self, c): self.cursor = c self.list_of_inputs = [ None for i in range(8) ] self.get_violation_no(0) self.fields = [ "Violator no.", # 1 "Vehicle id", # 2 "Office no.", # 3 "Violation type", # 4 "Violation date", # 5 "Place", # 6 "Description", # 7 "Insert into database", # 8 "Exit: Cancel entering violation" ] # 9 self.cursor.execute( "SELECT * FROM ticket" ) self.metadata = self.cursor.description while ( True ): self.print_field_options( ) choice = self.get_input( len(self.fields) ) if ( choice == 1 ): self.get_violator_no(choice) elif ( choice == 2 ): self.get_vehicle_id(choice) elif ( choice == 3 ): self.get_office_no(choice) elif ( choice == 4 ): self.get_violation_type(choice) elif ( choice == 5 ): self.get_violation_date(choice) elif ( choice == 6 ): self.get_violation_place(choice) elif ( choice == 7 ): self.get_violation_description(choice) # Enter data into db option elif ( choice == 8 ): inserted = self.insert_into_database() if ( inserted ): return else: continue # Exit option elif ( choice == 9 ): return # helper function for printing options def print_field_options( self, fields = None, showEmpty = True ): if ( fields == None ): fields = self.fields print( "Enter a field option to edit: " ) for i in range( len( fields ) ): print ( "[{:}] ".format( i+1 ) + fields[i] + (" EMPTY" if showEmpty and i < 7 and not self.list_of_inputs[i+1] else "") ) # returns the integer input choice def get_input( self, num_choices, prompt = "Choose a field to edit or an option: ", fields = None, showEmpty = True ): if ( fields == None ): fields = self.fields print( prompt ) try: string_input = input() choice = int(string_input) except: choice = "Invalid" while ( type( choice ) is not int or choice >= num_choices + 1 or choice <= 0 ): self.print_field_options(fields, showEmpty) print( "Enter a valid integer choice: " ) try: string_input = input() choice = int(string_input) except: choice = "Invalid" return choice ################################### # GENERATE VIOLATION NO. ################################### def get_violation_no( self, index ): # gets the list of ids and adds 1 to the max numbers = self.cursor.execute( "SELECT ticket_no FROM ticket" ).fetchall() self.list_of_inputs[index] = max([ ID[0] for ID in numbers ]) + 1 ################################### # GET VIOLATOR NO. ################################### def get_violator_no(self, index): # initial get and check user_input = input("Enter the violator's SIN " "(Enter nothing to cancel): ") # initial check if user wants to cancel if ( len( user_input ) == 0 ): return # initial check for if violator exists exists = False self.cursor.execute("SELECT SIN FROM people") rows = self.cursor.fetchall() rows = [ row[0].strip().lower() for row in rows ] if ( user_input.strip().lower() in rows ): exists = True # While the input string is too long or the violator does not exist short_enough = ErrorChecker.check_error(self.metadata[index], user_input) while ( not short_enough or not exists): if ( not short_enough ): user_input = input("Your input was too long. " "Enter the violator's SIN " "(Enter nothing to cancel): ") elif ( not exists ): char_answer = "" while ( char_answer.strip().lower() not in [ 'y', 'n' ] ): char_answer = input( "The violator is not in the database. " "Would you like to add the person? (y/n): " ) if ( char_answer == 'y' ): a = add_person.AddPerson() a.start_application(self.cursor) self.cursor.execute("SELECT SIN FROM people") rows = self.cursor.fetchall() rows = [ row[0].strip().lower() for row in rows ] user_input = input("Enter the violator's SIN (Enter " "nothing to cancel): ") if ( len( user_input ) == 0 ): return if ( user_input.strip().lower() in rows ): exists = True else: exists = False short_enough = ErrorChecker.check_error(self.metadata[index], user_input) self.list_of_inputs[index] = "'{:}'".format(user_input.strip().lower()) ################################### # GET VEHICLE ID ################################### def get_vehicle_id(self, index): # initial get and check user_input = input("Enter the vehicle serial number " "(Enter nothing to cancel): ") # initial check if user wants to cancel if ( len( user_input ) == 0 ): return # initial check for if violator exists exists = False self.cursor.execute("SELECT serial_no FROM vehicle") rows = self.cursor.fetchall() rows = [ row[0].strip().lower() for row in rows ] if ( user_input.strip().lower() in rows ): exists = True # While the input string is too long or the violator does not exist short_enough = ErrorChecker.check_error(self.metadata[index], user_input) while ( not short_enough or not exists): if ( not short_enough ): user_input = input("Your input was too long. " "Enter the vehicle serial number " "(Enter nothing to cancel): ") elif ( not exists ): user_input = input("The vehicle is not in the database. " "Enter the violator's SIN (Enter " "nothing to cancel): ") if ( len( user_input ) == 0 ): return if ( user_input.strip().lower() in rows ): exists = True else: exists = False short_enough = ErrorChecker.check_error(self.metadata[index], user_input) self.list_of_inputs[index] = "'{:}'".format(user_input.strip().lower()) ################################### # GET OFFICE NO. ################################### def get_office_no(self, index): # initial get and check user_input = input("Enter the office number " "(Enter nothing to cancel): ") # initial check if user wants to cancel if ( len( user_input ) == 0 ): return # initial check for if violator exists exists = False self.cursor.execute("SELECT SIN FROM people") rows = self.cursor.fetchall() rows = [ row[0].strip().lower() for row in rows ] if ( user_input.strip().lower() in rows ): exists = True # While the input string is too long or the violator does not exist short_enough = ErrorChecker.check_error(self.metadata[index], user_input) while ( not short_enough or not exists): if ( not short_enough ): user_input = input("Your input was too long. " "Enter the office number " "(Enter nothing to cancel): ") elif ( not exists ): user_input = input("The office is not in the database. " "Enter the office number (Enter " "nothing to cancel): ") if ( len( user_input ) == 0 ): return if ( user_input.strip().lower() in rows ): exists = True else: exists = False short_enough = ErrorChecker.check_error(self.metadata[index], user_input) self.list_of_inputs[index] = "'{:}'".format(user_input.strip().lower()) ################################### # GET VIOLATION TYPE ################################### def get_violation_type(self, index): self.cursor.execute( "SELECT * FROM ticket_type" ) list_of_types = self.cursor.fetchall() prompt_types = [ row[0] + " $" + str(row[1]) for row in list_of_types ] self.print_field_options( prompt_types, False ) user_input = self.get_input(len( prompt_types ), "Pick a violation type", prompt_types, False ) self.list_of_inputs[index] = "'{:}'".format(list_of_types[user_input-1][0]) ################################### # GET VIOLATION DATE ################################### def get_violation_date(self, index): while ( True ): date_input = input ( "Enter the date ( DD/MM/YYYY ) " "(Enter nothing to cancel): ") if ( len( date_input ) == 0 ): return date_input = date_input.split('/') try: if len(date_input) != 3: raise InvalidDateException() for component in date_input: if ( not ErrorChecker.check_str_int(component) ): raise InvalidDateException() date_input = [ int(comp) for comp in date_input ] if (not ErrorChecker.check_error(self.metadata[index], date_input)): raise InvalidDateException() break except ( InvalidDateException ): print( "Your date was invalid" ) if ( date_input != None ): d = date_input[0] m = date_input[1] y = date_input[2] self.list_of_inputs[index] = [ "'{:}/{:}/{:}'".format(d,m,y), "'DD/MM/YYYY'" ] ################################### # GET VIOLATOR PLACE ################################### def get_violation_place(self, index): while ( True ): user_input = input("Enter the place of the violation " "(Enter nothing to cancel): ") if ( len( user_input ) == 0 ): return if ( ErrorChecker.check_error( self.metadata[index], user_input ) ): break else: print( "Your input was too long" ) self.list_of_inputs[index] = "'{:}'".format(user_input) ################################### # GET VIOLATOR DESCRIPTION ################################### def get_violation_description(self, index): while ( True ): user_input = input("Enter the description of the violation " "(Enter nothing to cancel): ") if ( len( user_input ) == 0 ): return if ( ErrorChecker.check_error( self.metadata[index], user_input ) ): break else: print( "Your input was too long" ) self.list_of_inputs[index] = "'{:}'".format(user_input) ################################### # INSERT INTO DATABASE ################################### def insert_into_database(self): # check if fields are empty unfinished = False for inp in self.list_of_inputs: if ( inp == None ): unfinished = True if ( unfinished ): print( "You have left some fields blank." ) char_answer = "" while ( char_answer.strip().lower() not in [ 'y', 'n' ] ): char_answer = input( "Would you like to continue saving (y/n)? " ) if ( char_answer == 'n' ): return False # change all Nones in input to "NULL" for i in range( len( self.list_of_inputs ) ): if ( self.list_of_inputs[i] == None ): self.list_of_inputs[i] = "NULL" # prepare date for insertion if ( self.list_of_inputs[5] != "NULL" ): self.list_of_inputs[5] = "TO_DATE( {:}, {:} )".format( self.list_of_inputs[5][0], self.list_of_inputs[5][1] ) # attempt to charge primary owner if vehicle entered # and violator is not if ( self.list_of_inputs[2] != "NULL" and self.list_of_inputs[1] == "NULL" ): statement = "SELECT o.owner_id FROM owner o, " \ "vehicle v where v.serial_no = o.vehicle_id " \ "and o.is_primary_owner = 'y' and v.serial_no = " + \ self.list_of_inputs[2] primary_owner = self.cursor.execute( statement ).fetchall() if ( len( primary_owner ) == 0 ): # Do nothing pass else: primary_owner = "'{:}'".format( primary_owner[0][0] ) self.list_of_inputs[1] = primary_owner statement = "INSERT INTO ticket VALUES( " \ "{:}, {:}, {:}, {:}, {:}, {:}, {:}, {:} )".format( self.list_of_inputs[0], self.list_of_inputs[1], self.list_of_inputs[2], self.list_of_inputs[3], self.list_of_inputs[4], self.list_of_inputs[5], self.list_of_inputs[6], self.list_of_inputs[7] ) self.cursor.execute( statement ) return True def change_owner(self,owner_sin,vehicle_id,is_primary_owner): statement="delete from owner where vehicle_id='{}'".format(str(vehicle_id)) self.cursor.execute(statement) value_statement='('+"'"+str(owner_sin)+"'"+','+"'"+str(vehicle_id)+"'"+','+"'"+str(is_primary_owner)+"'"+')' statement2="insert into owner values"+value_statement try: self.cursor.execute(statement2) except Exception as e: print("Error! cannot add an owner record") return
apache-2.0
hybrideagle/django
tests/model_inheritance/tests.py
283
17419
from __future__ import unicode_literals from operator import attrgetter from django.core.exceptions import FieldError, ValidationError from django.core.management import call_command from django.db import connection from django.test import TestCase, TransactionTestCase from django.test.utils import CaptureQueriesContext from django.utils import six from .models import ( Base, Chef, CommonInfo, Copy, GrandChild, GrandParent, ItalianRestaurant, MixinModel, ParkingLot, Place, Post, Restaurant, Student, SubBase, Supplier, Title, Worker, ) class ModelInheritanceTests(TestCase): def test_abstract(self): # The Student and Worker models both have 'name' and 'age' fields on # them and inherit the __unicode__() method, just as with normal Python # subclassing. This is useful if you want to factor out common # information for programming purposes, but still completely # independent separate models at the database level. w1 = Worker.objects.create(name="Fred", age=35, job="Quarry worker") Worker.objects.create(name="Barney", age=34, job="Quarry worker") s = Student.objects.create(name="Pebbles", age=5, school_class="1B") self.assertEqual(six.text_type(w1), "Worker Fred") self.assertEqual(six.text_type(s), "Student Pebbles") # The children inherit the Meta class of their parents (if they don't # specify their own). self.assertQuerysetEqual( Worker.objects.values("name"), [ {"name": "Barney"}, {"name": "Fred"}, ], lambda o: o ) # Since Student does not subclass CommonInfo's Meta, it has the effect # of completely overriding it. So ordering by name doesn't take place # for Students. self.assertEqual(Student._meta.ordering, []) # However, the CommonInfo class cannot be used as a normal model (it # doesn't exist as a model). self.assertRaises(AttributeError, lambda: CommonInfo.objects.all()) def test_reverse_relation_for_different_hierarchy_tree(self): # Even though p.supplier for a Place 'p' (a parent of a Supplier), a # Restaurant object cannot access that reverse relation, since it's not # part of the Place-Supplier Hierarchy. self.assertQuerysetEqual(Place.objects.filter(supplier__name="foo"), []) self.assertRaises(FieldError, Restaurant.objects.filter, supplier__name="foo") def test_model_with_distinct_accessors(self): # The Post model has distinct accessors for the Comment and Link models. post = Post.objects.create(title="Lorem Ipsum") post.attached_comment_set.create(content="Save $ on V1agr@", is_spam=True) post.attached_link_set.create( content="The Web framework for perfections with deadlines.", url="http://www.djangoproject.com/" ) # The Post model doesn't have an attribute called # 'attached_%(class)s_set'. self.assertRaises( AttributeError, getattr, post, "attached_%(class)s_set" ) def test_meta_fields_and_ordering(self): # Make sure Restaurant and ItalianRestaurant have the right fields in # the right order. self.assertEqual( [f.name for f in Restaurant._meta.fields], ["id", "name", "address", "place_ptr", "rating", "serves_hot_dogs", "serves_pizza", "chef"] ) self.assertEqual( [f.name for f in ItalianRestaurant._meta.fields], ["id", "name", "address", "place_ptr", "rating", "serves_hot_dogs", "serves_pizza", "chef", "restaurant_ptr", "serves_gnocchi"], ) self.assertEqual(Restaurant._meta.ordering, ["-rating"]) def test_custompk_m2m(self): b = Base.objects.create() b.titles.add(Title.objects.create(title="foof")) s = SubBase.objects.create(sub_id=b.id) b = Base.objects.get(pk=s.id) self.assertNotEqual(b.pk, s.pk) # Low-level test for related_val self.assertEqual(s.titles.related_val, (s.id,)) # Higher level test for correct query values (title foof not # accidentally found). self.assertQuerysetEqual(s.titles.all(), []) def test_update_parent_filtering(self): """ Test that updating a field of a model subclass doesn't issue an UPDATE query constrained by an inner query. Refs #10399 """ supplier = Supplier.objects.create( name='Central market', address='610 some street', ) # Capture the expected query in a database agnostic way with CaptureQueriesContext(connection) as captured_queries: Place.objects.filter(pk=supplier.pk).update(name=supplier.name) expected_sql = captured_queries[0]['sql'] # Capture the queries executed when a subclassed model instance is saved. with CaptureQueriesContext(connection) as captured_queries: supplier.save(update_fields=('name',)) for query in captured_queries: sql = query['sql'] if 'UPDATE' in sql: self.assertEqual(expected_sql, sql) def test_eq(self): # Equality doesn't transfer in multitable inheritance. self.assertNotEqual(Place(id=1), Restaurant(id=1)) self.assertNotEqual(Restaurant(id=1), Place(id=1)) def test_mixin_init(self): m = MixinModel() self.assertEqual(m.other_attr, 1) class ModelInheritanceDataTests(TestCase): @classmethod def setUpTestData(cls): cls.restaurant = Restaurant.objects.create( name="Demon Dogs", address="944 W. Fullerton", serves_hot_dogs=True, serves_pizza=False, rating=2, ) chef = Chef.objects.create(name="Albert") cls.italian_restaurant = ItalianRestaurant.objects.create( name="Ristorante Miron", address="1234 W. Ash", serves_hot_dogs=False, serves_pizza=False, serves_gnocchi=True, rating=4, chef=chef, ) def test_filter_inherited_model(self): self.assertQuerysetEqual( ItalianRestaurant.objects.filter(address="1234 W. Ash"), [ "Ristorante Miron", ], attrgetter("name") ) def test_update_inherited_model(self): self.italian_restaurant.address = "1234 W. Elm" self.italian_restaurant.save() self.assertQuerysetEqual( ItalianRestaurant.objects.filter(address="1234 W. Elm"), [ "Ristorante Miron", ], attrgetter("name") ) def test_parent_fields_available_for_filtering_in_child_model(self): # Parent fields can be used directly in filters on the child model. self.assertQuerysetEqual( Restaurant.objects.filter(name="Demon Dogs"), [ "Demon Dogs", ], attrgetter("name") ) self.assertQuerysetEqual( ItalianRestaurant.objects.filter(address="1234 W. Ash"), [ "Ristorante Miron", ], attrgetter("name") ) def test_filter_on_parent_returns_object_of_parent_type(self): # Filters against the parent model return objects of the parent's type. p = Place.objects.get(name="Demon Dogs") self.assertIs(type(p), Place) def test_parent_child_one_to_one_link(self): # Since the parent and child are linked by an automatically created # OneToOneField, you can get from the parent to the child by using the # child's name. self.assertEqual( Place.objects.get(name="Demon Dogs").restaurant, Restaurant.objects.get(name="Demon Dogs") ) self.assertEqual( Place.objects.get(name="Ristorante Miron").restaurant.italianrestaurant, ItalianRestaurant.objects.get(name="Ristorante Miron") ) self.assertEqual( Restaurant.objects.get(name="Ristorante Miron").italianrestaurant, ItalianRestaurant.objects.get(name="Ristorante Miron") ) def test_parent_child_one_to_one_link_on_nonrelated_objects(self): # This won't work because the Demon Dogs restaurant is not an Italian # restaurant. self.assertRaises( ItalianRestaurant.DoesNotExist, lambda: Place.objects.get(name="Demon Dogs").restaurant.italianrestaurant ) def test_inherited_does_not_exist_exception(self): # An ItalianRestaurant which does not exist is also a Place which does # not exist. self.assertRaises( Place.DoesNotExist, ItalianRestaurant.objects.get, name="The Noodle Void" ) def test_inherited_multiple_objects_returned_exception(self): # MultipleObjectsReturned is also inherited. self.assertRaises( Place.MultipleObjectsReturned, Restaurant.objects.get, id__lt=12321 ) def test_related_objects_for_inherited_models(self): # Related objects work just as they normally do. s1 = Supplier.objects.create(name="Joe's Chickens", address="123 Sesame St") s1.customers = [self.restaurant, self.italian_restaurant] s2 = Supplier.objects.create(name="Luigi's Pasta", address="456 Sesame St") s2.customers = [self.italian_restaurant] # This won't work because the Place we select is not a Restaurant (it's # a Supplier). p = Place.objects.get(name="Joe's Chickens") self.assertRaises( Restaurant.DoesNotExist, lambda: p.restaurant ) self.assertEqual(p.supplier, s1) self.assertQuerysetEqual( self.italian_restaurant.provider.order_by("-name"), [ "Luigi's Pasta", "Joe's Chickens" ], attrgetter("name") ) self.assertQuerysetEqual( Restaurant.objects.filter(provider__name__contains="Chickens"), [ "Ristorante Miron", "Demon Dogs", ], attrgetter("name") ) self.assertQuerysetEqual( ItalianRestaurant.objects.filter(provider__name__contains="Chickens"), [ "Ristorante Miron", ], attrgetter("name"), ) ParkingLot.objects.create( name="Main St", address="111 Main St", main_site=s1 ) ParkingLot.objects.create( name="Well Lit", address="124 Sesame St", main_site=self.italian_restaurant ) self.assertEqual( Restaurant.objects.get(lot__name="Well Lit").name, "Ristorante Miron" ) def test_update_works_on_parent_and_child_models_at_once(self): # The update() command can update fields in parent and child classes at # once (although it executed multiple SQL queries to do so). rows = Restaurant.objects.filter( serves_hot_dogs=True, name__contains="D" ).update( name="Demon Puppies", serves_hot_dogs=False ) self.assertEqual(rows, 1) r1 = Restaurant.objects.get(pk=self.restaurant.pk) self.assertFalse(r1.serves_hot_dogs) self.assertEqual(r1.name, "Demon Puppies") def test_values_works_on_parent_model_fields(self): # The values() command also works on fields from parent models. self.assertQuerysetEqual( ItalianRestaurant.objects.values("name", "rating"), [ {"rating": 4, "name": "Ristorante Miron"}, ], lambda o: o ) def test_select_related_works_on_parent_model_fields(self): # select_related works with fields from the parent object as if they # were a normal part of the model. self.assertNumQueries( 2, lambda: ItalianRestaurant.objects.all()[0].chef ) self.assertNumQueries( 1, lambda: ItalianRestaurant.objects.select_related("chef")[0].chef ) def test_select_related_defer(self): """ #23370 - Should be able to defer child fields when using select_related() from parent to child. """ qs = (Restaurant.objects .select_related("italianrestaurant") .defer("italianrestaurant__serves_gnocchi") .order_by("rating")) # Test that the field was actually deferred with self.assertNumQueries(2): objs = list(qs.all()) self.assertTrue(objs[1].italianrestaurant.serves_gnocchi) # Test that model fields where assigned correct values self.assertEqual(qs[0].name, 'Demon Dogs') self.assertEqual(qs[0].rating, 2) self.assertEqual(qs[1].italianrestaurant.name, 'Ristorante Miron') self.assertEqual(qs[1].italianrestaurant.rating, 4) def test_update_query_counts(self): """ Test that update queries do not generate non-necessary queries. Refs #18304. """ with self.assertNumQueries(3): self.italian_restaurant.save() def test_filter_inherited_on_null(self): # Refs #12567 Supplier.objects.create( name="Central market", address="610 some street", ) self.assertQuerysetEqual( Place.objects.filter(supplier__isnull=False), [ "Central market", ], attrgetter("name") ) self.assertQuerysetEqual( Place.objects.filter(supplier__isnull=True).order_by("name"), [ "Demon Dogs", "Ristorante Miron", ], attrgetter("name") ) def test_exclude_inherited_on_null(self): # Refs #12567 Supplier.objects.create( name="Central market", address="610 some street", ) self.assertQuerysetEqual( Place.objects.exclude(supplier__isnull=False).order_by("name"), [ "Demon Dogs", "Ristorante Miron", ], attrgetter("name") ) self.assertQuerysetEqual( Place.objects.exclude(supplier__isnull=True), [ "Central market", ], attrgetter("name") ) class InheritanceSameModelNameTests(TransactionTestCase): available_apps = ['model_inheritance'] def setUp(self): # The Title model has distinct accessors for both # model_inheritance.Copy and model_inheritance_same_model_name.Copy # models. self.title = Title.objects.create(title='Lorem Ipsum') def test_inheritance_related_name(self): self.assertEqual( self.title.attached_model_inheritance_copy_set.create( content='Save $ on V1agr@', url='http://v1agra.com/', title='V1agra is spam', ), Copy.objects.get( content='Save $ on V1agr@', )) def test_inheritance_with_same_model_name(self): with self.modify_settings( INSTALLED_APPS={'append': ['model_inheritance.same_model_name']}): call_command('migrate', verbosity=0, run_syncdb=True) from .same_model_name.models import Copy copy = self.title.attached_same_model_name_copy_set.create( content='The Web framework for perfectionists with deadlines.', url='http://www.djangoproject.com/', title='Django Rocks' ) self.assertEqual( copy, Copy.objects.get( content='The Web framework for perfectionists with deadlines.', )) # We delete the copy manually so that it doesn't block the flush # command under Oracle (which does not cascade deletions). copy.delete() def test_related_name_attribute_exists(self): # The Post model doesn't have an attribute called 'attached_%(app_label)s_%(class)s_set'. self.assertFalse(hasattr(self.title, 'attached_%(app_label)s_%(class)s_set')) class InheritanceUniqueTests(TestCase): @classmethod def setUpTestData(cls): cls.grand_parent = GrandParent.objects.create( email='grand_parent@example.com', first_name='grand', last_name='parent', ) def test_unique(self): grand_child = GrandChild( email=self.grand_parent.email, first_name='grand', last_name='child', ) msg = 'Grand parent with this Email already exists.' with self.assertRaisesMessage(ValidationError, msg): grand_child.validate_unique() def test_unique_together(self): grand_child = GrandChild( email='grand_child@example.com', first_name=self.grand_parent.first_name, last_name=self.grand_parent.last_name, ) msg = 'Grand parent with this First name and Last name already exists.' with self.assertRaisesMessage(ValidationError, msg): grand_child.validate_unique()
bsd-3-clause
Natim/sentry
src/sentry/migrations/0053_auto__del_projectmember__del_unique_projectmember_project_user.py
36
19314
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Removing unique constraint on 'ProjectMember', fields ['project', 'user'] db.delete_unique('sentry_projectmember', ['project_id', 'user_id']) # Deleting model 'ProjectMember' db.delete_table('sentry_projectmember') def backwards(self, orm): # Adding model 'ProjectMember' db.create_table('sentry_projectmember', ( ('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)), ('project', self.gf('sentry.db.models.fields.FlexibleForeignKey')(related_name='member_set', to=orm['sentry.Project'])), ('user', self.gf('sentry.db.models.fields.FlexibleForeignKey')(related_name='sentry_project_set', to=orm['sentry.User'])), ('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)), ('type', self.gf('django.db.models.fields.IntegerField')(default=0)), ('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)), )) db.send_create_signal('sentry', ['ProjectMember']) # Adding unique constraint on 'ProjectMember', fields ['project', 'user'] db.create_unique('sentry_projectmember', ['project_id', 'user_id']) models = { 'sentry.user': { 'Meta': {'object_name': 'User', 'db_table': "'auth_user'"}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 4, 5, 7, 57, 35, 828536)'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 4, 5, 7, 57, 35, 828402)'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'sentry.event': { 'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"}, 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}), 'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}), 'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}), 'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}), 'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'}) }, 'sentry.filtervalue': { 'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'sentry.group': { 'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"}, 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}), 'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}), 'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}), 'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True'}) }, 'sentry.groupbookmark': { 'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'}, 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}), 'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"}) }, 'sentry.groupmeta': { 'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'}, 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'value': ('django.db.models.fields.TextField', [], {}) }, 'sentry.messagecountbyminute': { 'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'}, 'date': ('django.db.models.fields.DateTimeField', [], {}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, 'sentry.messagefiltervalue': { 'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'}, 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'sentry.messageindex': { 'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'}, 'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'sentry.option': { 'Meta': {'object_name': 'Option'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}), 'value': ('picklefield.fields.PickledObjectField', [], {}) }, 'sentry.pendingteammember': { 'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}), 'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'sentry.project': { 'Meta': {'object_name': 'Project'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}), 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True', 'db_index': 'True'}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'}) }, 'sentry.projectcountbyminute': { 'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'}, 'date': ('django.db.models.fields.DateTimeField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, 'sentry.projectdomain': { 'Meta': {'unique_together': "(('project', 'domain'),)", 'object_name': 'ProjectDomain'}, 'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'domain_set'", 'to': "orm['sentry.Project']"}) }, 'sentry.projectkey': { 'Meta': {'object_name': 'ProjectKey'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}), 'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}), 'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}), 'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}) }, 'sentry.projectoption': { 'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'value': ('picklefield.fields.PickledObjectField', [], {}) }, 'sentry.searchdocument': { 'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}) }, 'sentry.searchtoken': { 'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'}, 'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}), 'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'token': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'sentry.team': { 'Meta': {'object_name': 'Team'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}) }, 'sentry.teammember': { 'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}), 'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"}) }, 'sentry.view': { 'Meta': {'object_name': 'View'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}), 'verbose_name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}) } } complete_apps = ['sentry']
bsd-3-clause
termie/nova-migration-demo
nova/tests/test_instance_types.py
2
4177
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 Ken Pepple # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for instance types code """ import time from nova import context from nova import db from nova import exception from nova import flags from nova import log as logging from nova import test from nova import utils from nova.compute import instance_types from nova.db.sqlalchemy.session import get_session from nova.db.sqlalchemy import models FLAGS = flags.FLAGS LOG = logging.getLogger('nova.tests.compute') class InstanceTypeTestCase(test.TestCase): """Test cases for instance type code""" def setUp(self): super(InstanceTypeTestCase, self).setUp() session = get_session() max_flavorid = session.query(models.InstanceTypes).\ order_by("flavorid desc").\ first() max_id = session.query(models.InstanceTypes).\ order_by("id desc").\ first() self.flavorid = max_flavorid["flavorid"] + 1 self.id = max_id["id"] + 1 self.name = str(int(time.time())) def test_instance_type_create_then_delete(self): """Ensure instance types can be created""" starting_inst_list = instance_types.get_all_types() instance_types.create(self.name, 256, 1, 120, self.flavorid) new = instance_types.get_all_types() self.assertNotEqual(len(starting_inst_list), len(new), 'instance type was not created') instance_types.destroy(self.name) self.assertEqual(1, instance_types.get_instance_type(self.id)["deleted"]) self.assertEqual(starting_inst_list, instance_types.get_all_types()) instance_types.purge(self.name) self.assertEqual(len(starting_inst_list), len(instance_types.get_all_types()), 'instance type not purged') def test_get_all_instance_types(self): """Ensures that all instance types can be retrieved""" session = get_session() total_instance_types = session.query(models.InstanceTypes).\ count() inst_types = instance_types.get_all_types() self.assertEqual(total_instance_types, len(inst_types)) def test_invalid_create_args_should_fail(self): """Ensures that instance type creation fails with invalid args""" self.assertRaises( exception.InvalidInput, instance_types.create, self.name, 0, 1, 120, self.flavorid) self.assertRaises( exception.InvalidInput, instance_types.create, self.name, 256, -1, 120, self.flavorid) self.assertRaises( exception.InvalidInput, instance_types.create, self.name, 256, 1, "aa", self.flavorid) def test_non_existant_inst_type_shouldnt_delete(self): """Ensures that instance type creation fails with invalid args""" self.assertRaises(exception.ApiError, instance_types.destroy, "sfsfsdfdfs") def test_repeated_inst_types_should_raise_api_error(self): """Ensures that instance duplicates raises ApiError""" new_name = self.name + "dup" instance_types.create(new_name, 256, 1, 120, self.flavorid + 1) instance_types.destroy(new_name) self.assertRaises( exception.ApiError, instance_types.create, new_name, 256, 1, 120, self.flavorid)
apache-2.0
zmalik/mesos
src/python/cli_new/lib/cli/plugins/base.py
4
5418
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Plugin's Base Class """ import sys import cli from cli.docopt import docopt PLUGIN_NAME = "base-plugin" PLUGIN_CLASS = "PluginBase" VERSION = "Mesos Plugin Base 1.0" SHORT_HELP = "This is the base plugin from which all other plugins inherit." USAGE = \ """ {short_help} Usage: mesos {plugin} (-h | --help) mesos {plugin} --version mesos {plugin} <command> (-h | --help) mesos {plugin} <command> [<args>...] [options] Options: -h --help Show this screen. --version Show version info. Commands: {commands} """ SUBCOMMAND_USAGE = \ """{short_help} Usage: mesos {plugin} {command} (-h | --help) mesos {plugin} {command} --version mesos {plugin} {command} {arguments} [options] Options: {flags} Description: {long_help} """ class PluginBase(object): """ Base class from which all CLI plugins should inherit. """ # pylint: disable=R0903 COMMANDS = {} def __setup__(self, command, argv): pass def __module_reference__(self): return sys.modules[self.__module__] def __init__(self, settings, config): # pylint: disable=C0103 self.PLUGIN_NAME = PLUGIN_NAME self.PLUGIN_CLASS = PLUGIN_CLASS self.VERSION = VERSION self.SHORT_HELP = SHORT_HELP self.USAGE = USAGE module = self.__module_reference__() if hasattr(module, "PLUGIN_NAME"): self.PLUGIN_NAME = getattr(module, "PLUGIN_NAME") if hasattr(module, "PLUGIN_CLASS"): self.PLUGIN_CLASS = getattr(module, "PLUGIN_CLASS") if hasattr(module, "VERSION"): self.VERSION = getattr(module, "VERSION") if hasattr(module, "SHORT_HELP"): self.SHORT_HELP = getattr(module, "SHORT_HELP") if hasattr(module, "USAGE"): self.USAGE = getattr(module, "USAGE") self.settings = settings self.config = config def __autocomplete__(self, command, current_word, argv): # pylint: disable=W0612,W0613,R0201 return ("default", []) def __autocomplete_base__(self, current_word, argv): option = "default" # <command> comp_words = list(self.COMMANDS.keys()) comp_words = cli.util.completions(comp_words, current_word, argv) if comp_words != None: return (option, comp_words) # <args>... # pylint: disable=R0204 comp_words = self.__autocomplete__(argv[0], current_word, argv[1:]) # In general, we expect a tuple to be returned from __autocomplete__, # with the first element being a valid autocomplete option, and the # second being a list of completion words. However, in the common # case we usually use the default option, so it's OK for a plugin to # just return a list. We will add the "default" option for them. if isinstance(comp_words, tuple): option, comp_words = comp_words return (option, comp_words) def main(self, argv): """ Main method takes argument from top level mesos and parses them to call the appropriate method. """ command_strings = cli.util.format_commands_help(self.COMMANDS) usage = self.USAGE.format( plugin=self.PLUGIN_NAME, short_help=self.SHORT_HELP, commands=command_strings) arguments = docopt( usage, argv=argv, version=self.VERSION, program="mesos " + self.PLUGIN_NAME, options_first=True) cmd = arguments["<command>"] argv = arguments["<args>"] if cmd in self.COMMANDS.keys(): if "external" not in self.COMMANDS[cmd]: argument_format, short_help, long_help, flag_format = \ cli.util.format_subcommands_help(self.COMMANDS[cmd]) usage = SUBCOMMAND_USAGE.format( plugin=self.PLUGIN_NAME, command=cmd, arguments=argument_format, flags=flag_format, short_help=short_help, long_help=long_help) arguments = docopt( usage, argv=argv, program="mesos " + self.PLUGIN_NAME + " " + cmd, version=self.VERSION, options_first=True) if "alias" in self.COMMANDS[cmd]: cmd = self.COMMANDS[cmd]["alias"] self.__setup__(cmd, argv) getattr(self, cmd.replace("-", "_"))(arguments) else: self.main(["--help"])
apache-2.0
kinsamanka/machinekit
lib/python/rs274/interpret.py
36
5499
# This is a component of AXIS, a front-end for emc # Copyright 2004, 2005, 2006 Jeff Epler <jepler@unpythonic.net> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA import math, gcode class Translated: g92_offset_x = g92_offset_y = g92_offset_z = 0 g92_offset_a = g92_offset_b = g92_offset_c = 0 g92_offset_u = g92_offset_v = g92_offset_w = 0 g5x_offset_x = g5x_offset_y = g5x_offset_z = 0 g5x_offset_a = g5x_offset_b = g5x_offset_c = 0 g5x_offset_u = g5x_offset_v = g5x_offset_w = 0 rotation_xy = 0 def rotate_and_translate(self, x,y,z,a,b,c,u,v,w): x += self.g92_offset_x y += self.g92_offset_y z += self.g92_offset_z a += self.g92_offset_a b += self.g92_offset_b c += self.g92_offset_c u += self.g92_offset_u v += self.g92_offset_v w += self.g92_offset_w if self.rotation_xy: rotx = x * self.rotation_cos - y * self.rotation_sin y = x * self.rotation_sin + y * self.rotation_cos x = rotx x += self.g5x_offset_x y += self.g5x_offset_y z += self.g5x_offset_z a += self.g5x_offset_a b += self.g5x_offset_b c += self.g5x_offset_c u += self.g5x_offset_u v += self.g5x_offset_v w += self.g5x_offset_w return [x, y, z, a, b, c, u, v, w] def straight_traverse(self, *args): self.straight_traverse_translated(*self.rotate_and_translate(*args)) def straight_feed(self, *args): self.straight_feed_translated(*self.rotate_and_translate(*args)) def set_g5x_offset(self, index, x, y, z, a, b, c, u=None, v=None, w=None): self.g5x_index = index self.g5x_offset_x = x self.g5x_offset_y = y self.g5x_offset_z = z self.g5x_offset_a = a self.g5x_offset_b = b self.g5x_offset_c = c self.g5x_offset_u = u self.g5x_offset_v = v self.g5x_offset_w = w def set_g92_offset(self, x, y, z, a, b, c, u=None, v=None, w=None): self.g92_offset_x = x self.g92_offset_y = y self.g92_offset_z = z self.g92_offset_a = a self.g92_offset_b = b self.g92_offset_c = c self.g92_offset_u = u self.g92_offset_v = v self.g92_offset_w = w def set_xy_rotation(self, theta): self.rotation_xy = theta t = math.radians(theta) self.rotation_sin = math.sin(t) self.rotation_cos = math.cos(t) class ArcsToSegmentsMixin: plane = 1 arcdivision = 64 def set_plane(self, plane): self.plane = plane def arc_feed(self, x1, y1, cx, cy, rot, z1, a, b, c, u, v, w): self.lo = tuple(self.lo) segs = gcode.arc_to_segments(self, x1, y1, cx, cy, rot, z1, a, b, c, u, v, w, self.arcdivision) self.straight_arcsegments(segs) class PrintCanon: def set_g5x_offset(self, *args): print "set_g5x_offset", args def set_g92_offset(self, *args): print "set_g92_offset", args def next_line(self, state): print "next_line", state.sequence_number self.state = state def set_plane(self, plane): print "set plane", plane def set_feed_rate(self, arg): print "set feed rate", arg def comment(self, arg): print "#", arg def straight_traverse(self, *args): print "straight_traverse %.4g %.4g %.4g %.4g %.4g %.4g" % args def straight_feed(self, *args): print "straight_feed %.4g %.4g %.4g %.4g %.4g %.4g" % args def dwell(self, arg): if arg < .1: print "dwell %f ms" % (1000 * arg) else: print "dwell %f seconds" % arg def arc_feed(self, *args): print "arc_feed %.4g %.4g %.4g %.4g %.4g %.4g %.4g %.4g %.4g" % args class StatMixin: def __init__(self, s, r): self.s = s self.tools = list(s.tool_table) self.random = r def change_tool(self, pocket): if self.random: self.tools[0], self.tools[pocket] = self.tools[pocket], self.tools[0] elif pocket==0: self.tools[0] = -1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0 else: self.tools[0] = self.tools[pocket] def get_tool(self, pocket): if pocket >= 0 and pocket < len(self.tools): return tuple(self.tools[pocket]) return -1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0 def get_external_angular_units(self): return self.s.angular_units or 1.0 def get_external_length_units(self): return self.s.linear_units or 1.0 def get_axis_mask(self): return self.s.axis_mask def get_block_delete(self): return self.s.block_delete # vim:ts=8:sts=4:et:
lgpl-2.1
mgedmin/ansible
test/units/parsing/yaml/test_objects.py
14
4311
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # # Copyright 2016, Adrian Likins <alikins@redhat.com> # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.compat.tests import unittest from ansible.parsing import vault from ansible.parsing.yaml.loader import AnsibleLoader # module under test from ansible.parsing.yaml import objects from units.mock.yaml_helper import YamlTestUtils class TestAnsibleVaultUnicodeNoVault(unittest.TestCase, YamlTestUtils): def test_empty_init(self): self.assertRaises(TypeError, objects.AnsibleVaultEncryptedUnicode) def test_empty_string_init(self): seq = ''.encode('utf8') self.assert_values(seq) def test_empty_byte_string_init(self): seq = b'' self.assert_values(seq) def _assert_values(self, avu, seq): self.assertIsInstance(avu, objects.AnsibleVaultEncryptedUnicode) self.assertTrue(avu.vault is None) # AnsibleVaultEncryptedUnicode without a vault should never == any string self.assertNotEquals(avu, seq) def assert_values(self, seq): avu = objects.AnsibleVaultEncryptedUnicode(seq) self._assert_values(avu, seq) def test_single_char(self): seq = 'a'.encode('utf8') self.assert_values(seq) def test_string(self): seq = 'some letters' self.assert_values(seq) def test_byte_string(self): seq = 'some letters'.encode('utf8') self.assert_values(seq) class TestAnsibleVaultEncryptedUnicode(unittest.TestCase, YamlTestUtils): def setUp(self): self.vault_password = "hunter42" self.good_vault = vault.VaultLib(self.vault_password) self.wrong_vault_password = 'not-hunter42' self.wrong_vault = vault.VaultLib(self.wrong_vault_password) self.vault = self.good_vault def _loader(self, stream): return AnsibleLoader(stream, vault_password=self.vault_password) def test_dump_load_cycle(self): aveu = self._from_plaintext('the test string for TestAnsibleVaultEncryptedUnicode.test_dump_load_cycle') self._dump_load_cycle(aveu) def assert_values(self, avu, seq): self.assertIsInstance(avu, objects.AnsibleVaultEncryptedUnicode) self.assertEquals(avu, seq) self.assertTrue(avu.vault is self.vault) self.assertIsInstance(avu.vault, vault.VaultLib) def _from_plaintext(self, seq): return objects.AnsibleVaultEncryptedUnicode.from_plaintext(seq, vault=self.vault) def _from_ciphertext(self, ciphertext): avu = objects.AnsibleVaultEncryptedUnicode(ciphertext) avu.vault = self.vault return avu def test_empty_init(self): self.assertRaises(TypeError, objects.AnsibleVaultEncryptedUnicode) def test_empty_string_init_from_plaintext(self): seq = '' avu = self._from_plaintext(seq) self.assert_values(avu,seq) def test_empty_unicode_init_from_plaintext(self): seq = u'' avu = self._from_plaintext(seq) self.assert_values(avu,seq) def test_string_from_plaintext(self): seq = 'some letters' avu = self._from_plaintext(seq) self.assert_values(avu,seq) def test_unicode_from_plaintext(self): seq = u'some letters' avu = self._from_plaintext(seq) self.assert_values(avu,seq) # TODO/FIXME: make sure bad password fails differently than 'thats not encrypted' def test_empty_string_wrong_password(self): seq = '' self.vault = self.wrong_vault avu = self._from_plaintext(seq) self.assert_values(avu, seq)
gpl-3.0
Admin-backups/AR-B-P-B
www/cgi-bin/show_info.py
7
3356
#! /usr/bin/env python # -*- coding: utf-8 -*- import json import cgi import urllib2 #取得本机外网IP myip = urllib2.urlopen('http://members.3322.org/dyndns/getip').read() myip=myip.strip() #加载SSR JSON文件 f = file("/usr/local/shadowsocksr/mudb.json"); json = json.load(f); # 接受表达提交的数据 form = cgi.FieldStorage() # 解析处理提交的数据 getport = form['port'].value getpasswd = form['passwd'].value #判断端口是否找到 portexist=0 passwdcorrect=0 #循环查找端口 for x in json: #当输入的端口与json端口一样时视为找到 if(str(x[u"port"]) == str(getport)): portexist=1 if(str(x[u"passwd"]) == str(getpasswd)): passwdcorrect=1 jsonmethod=str(x[u"method"]) jsonobfs=str(x[u"obfs"]) jsonprotocol=str(x[u"protocol"]) break if(portexist==0): getport = "未找到此端口,请检查是否输入错误!" myip = "" getpasswd = "" jsonmethod = "" jsonprotocol = "" jsonobfs = "" if(portexist!=0 and passwdcorrect==0): getport = "连接密码输入错误,请重试" myip = "" getpasswd = "" jsonmethod = "" jsonprotocol = "" jsonobfs = "" header = ''' <!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <meta content="IE=edge" http-equiv="X-UA-Compatible"> <meta content="initial-scale=1.0, width=device-width" name="viewport"> <title>连接信息</title> <!-- css --> <link href="../css/base.min.css" rel="stylesheet"> <!-- favicon --> <!-- ... --> <!-- ie --> <!--[if lt IE 9]> <script src="../js/html5shiv.js" type="text/javascript"></script> <script src="../js/respond.js" type="text/javascript"></script> <![endif]--> </head> <body> <div class="content"> <div class="content-heading"> <div class="container"> <h1 class="heading">&nbsp;&nbsp;连接信息</h1> </div> </div> <div class="content-inner"> <div class="container"> ''' footer = ''' </div> </div> </div> <footer class="footer"> <div class="container"> <p>Function Club</p> </div> </footer> <script src="../js/base.min.js" type="text/javascript"></script> </body> </html> ''' #打印返回的内容 print header formhtml = ''' <div class="card-wrap"> <div class="row"> <div class="col-lg-4 col-sm-6"> <div class="card card-green"> <a class="card-side" href="/"><span class="card-heading">连接信息</span></a> <div class="card-main"> <div class="card-inner"> <p> <strong>服务器地址:</strong> %s </br></br> <strong>连接端口:</strong> %s </br></br> <strong>连接密码:</strong> %s </br></br> <strong>加密方式: </strong> %s </br></br> <strong>协议方式: </strong> </br>%s </br></br> <strong>混淆方式:</strong> </br>%s </p> </div> <div class="card-action"> <ul class="nav nav-list pull-left"> <li> <a href="../index.html"><span class="icon icon-check"></span>&nbsp;返回</a> </li> </ul> </div> </div> </div> </div> </div> </div> ''' print formhtml % (myip,getport,getpasswd,jsonmethod,jsonprotocol,jsonobfs) print footer f.close();
gpl-3.0
MingdaZhou/gnuradio
gr-channels/python/channels/distortion_3_gen.py
60
2399
#!/usr/bin/env python ################################################## # Gnuradio Python Flow Graph # Title: Third Order Distortion # Author: mettus # Generated: Thu Aug 1 12:37:59 2013 ################################################## from gnuradio import blocks from gnuradio import gr from gnuradio.filter import firdes import math class distortion_3_gen(gr.hier_block2): def __init__(self, beta=0): gr.hier_block2.__init__( self, "Third Order Distortion", gr.io_signature(1, 1, gr.sizeof_gr_complex*1), gr.io_signature(1, 1, gr.sizeof_gr_complex*1), ) ################################################## # Parameters ################################################## self.beta = beta ################################################## # Blocks ################################################## self.blocks_null_source_0 = blocks.null_source(gr.sizeof_float*1) self.blocks_multiply_xx_0 = blocks.multiply_vcc(1) self.blocks_multiply_const_vxx_0 = blocks.multiply_const_vcc((beta, )) self.blocks_float_to_complex_0 = blocks.float_to_complex(1) self.blocks_complex_to_mag_squared_0 = blocks.complex_to_mag_squared(1) self.blocks_add_xx_0 = blocks.add_vcc(1) ################################################## # Connections ################################################## self.connect((self.blocks_float_to_complex_0, 0), (self.blocks_multiply_xx_0, 1)) self.connect((self.blocks_null_source_0, 0), (self.blocks_float_to_complex_0, 1)) self.connect((self.blocks_complex_to_mag_squared_0, 0), (self.blocks_float_to_complex_0, 0)) self.connect((self.blocks_multiply_const_vxx_0, 0), (self.blocks_add_xx_0, 1)) self.connect((self.blocks_multiply_xx_0, 0), (self.blocks_multiply_const_vxx_0, 0)) self.connect((self, 0), (self.blocks_complex_to_mag_squared_0, 0)) self.connect((self, 0), (self.blocks_multiply_xx_0, 0)) self.connect((self, 0), (self.blocks_add_xx_0, 0)) self.connect((self.blocks_add_xx_0, 0), (self, 0)) # QT sink close method reimplementation def get_beta(self): return self.beta def set_beta(self, beta): self.beta = beta self.blocks_multiply_const_vxx_0.set_k((self.beta, ))
gpl-3.0
kou/arrow
docs/source/conf.py
4
13167
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import datetime import os import sys from unittest import mock import pyarrow sys.path.extend([ os.path.join(os.path.dirname(__file__), '..', '../..') ]) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'IPython.sphinxext.ipython_directive', 'IPython.sphinxext.ipython_console_highlighting', 'breathe' ] # Show members for classes in .. autosummary autodoc_default_options = { 'members': None, 'undoc-members': None, 'show-inheritance': None, 'inherited-members': None } # Breathe configuration breathe_projects = {"arrow_cpp": "../../cpp/apidoc/xml"} breathe_default_project = "arrow_cpp" # Overriden conditionally below autodoc_mock_imports = [] # ipython directive options ipython_mplbackend = '' # numpydoc configuration napoleon_use_rtype = False # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst'] autosummary_generate = True # The encoding of source files. # # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Apache Arrow' copyright = f'2016-{datetime.datetime.now().year} Apache Software Foundation' author = u'Apache Software Foundation' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = os.environ.get('ARROW_DOCS_VERSION', pyarrow.__version__) # The full version, including alpha/beta/rc tags. release = os.environ.get('ARROW_DOCS_VERSION', pyarrow.__version__) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # # today = '' # # Else, today_fmt is used as the format for a strftime call. # # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The reST default role (used for this markup: `text`) to use for all # documents. # # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'pydata_sphinx_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { "show_toc_level": 2, "google_analytics_id": "UA-107500873-1", } # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. # "<project> v<release> documentation" by default. # html_title = u'Apache Arrow v{}'.format(version) # A shorter title for the navigation bar. Default is the same as html_title. # # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = "_static/arrow.png" # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or # 32x32 pixels large. # html_favicon = "_static/favicon.ico" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom fixes to the RTD theme html_css_files = ['theme_overrides.css'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # # html_extra_path = [] # If not None, a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. # The empty string is equivalent to '%b %d, %Y'. # # html_last_updated_fmt = None # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = { # '**': ['sidebar-logo.html', 'sidebar-search-bs.html', 'sidebar-nav-bs.html'], '**': ['docs-sidebar.html'], } # Additional templates that should be rendered to pages, maps page names to # template names. # # html_additional_pages = {} # If false, no module index is generated. # # html_domain_indices = True # If false, no index is generated. # # html_use_index = True # If true, the index is split into individual pages for each letter. # # html_split_index = False # If true, links to the reST sources are added to the pages. # # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' # # html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # 'ja' uses this config value. # 'zh' user can custom change `jieba` dictionary path. # # html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. # # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'arrowdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'arrow.tex', u'Apache Arrow Documentation', u'Apache Arrow Team', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # # latex_use_parts = False # If true, show page references after internal links. # # latex_show_pagerefs = False # If true, show URL addresses after external links. # # latex_show_urls = False # Documents to append as an appendix to all manuals. # # latex_appendices = [] # It false, will not define \strong, \code, itleref, \crossref ... but only # \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added # packages. # # latex_keep_old_macro_names = True # If false, no module index is generated. # # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'arrow', u'Apache Arrow Documentation', [author], 1) ] # If true, show URL addresses after external links. # # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'arrow', u'Apache Arrow Documentation', author, 'Apache Arrow', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # # texinfo_appendices = [] # If false, no module index is generated. # # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # # texinfo_no_detailmenu = False # -- Customization -------------------------------------------------------- # Conditional API doc generation # Sphinx has two features for conditional inclusion: # - The "only" directive # https://www.sphinx-doc.org/en/master/usage/restructuredtext/directives.html#including-content-based-on-tags # - The "ifconfig" extension # https://www.sphinx-doc.org/en/master/usage/extensions/ifconfig.html # # Both have issues, but "ifconfig" seems to work in this setting. try: import pyarrow.cuda cuda_enabled = True except ImportError: cuda_enabled = False # Mock pyarrow.cuda to avoid autodoc warnings. # XXX I can't get autodoc_mock_imports to work, so mock manually instead # (https://github.com/sphinx-doc/sphinx/issues/2174#issuecomment-453177550) pyarrow.cuda = sys.modules['pyarrow.cuda'] = mock.Mock() try: import pyarrow.flight flight_enabled = True except ImportError: flight_enabled = False pyarrow.flight = sys.modules['pyarrow.flight'] = mock.Mock() def setup(app): # Use a config value to indicate whether CUDA API docs can be generated. # This will also rebuild appropriately when the value changes. app.add_config_value('cuda_enabled', cuda_enabled, 'env') app.add_config_value('flight_enabled', flight_enabled, 'env')
apache-2.0
EricCline/CEM_inc
env/lib/python2.7/site-packages/IPython/nbconvert/filters/tests/test_highlight.py
8
2628
""" Module with tests for Highlight """ #----------------------------------------------------------------------------- # Copyright (c) 2013, the IPython Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from ...tests.base import TestsBase from ..highlight import Highlight2HTML, Highlight2Latex from IPython.config import Config import xml #----------------------------------------------------------------------------- # Class #----------------------------------------------------------------------------- highlight2html = Highlight2HTML() highlight2latex = Highlight2Latex() c = Config() c.Highlight2HTML.default_language='ruby' highlight2html_ruby = Highlight2HTML(config=c) class TestHighlight(TestsBase): """Contains test functions for highlight.py""" #Hello world test, magics test, blank string test tests = [ """ #Hello World Example def say(text): print(text) end say('Hello World!') """, """ %%pylab plot(x,y, 'r') """ ] tokens = [ ['Hello World Example', 'say', 'text', 'print', 'def'], ['pylab', 'plot']] def test_highlight2html(self): """highlight2html test""" for index, test in enumerate(self.tests): self._try_highlight(highlight2html, test, self.tokens[index]) def test_highlight2latex(self): """highlight2latex test""" for index, test in enumerate(self.tests): self._try_highlight(highlight2latex, test, self.tokens[index]) def test_parse_html_many_lang(self): ht = highlight2html(self.tests[0]) rb = highlight2html_ruby(self.tests[0]) for lang,tkns in [ ( ht, ('def','print') ), ( rb, ('def','end' ) ) ]: root = xml.etree.ElementTree.fromstring(lang) assert self._extract_tokens(root,'k') == set(tkns) def _extract_tokens(self, root, cls): return set(map(lambda x:x.text,root.findall(".//*[@class='"+cls+"']"))) def _try_highlight(self, method, test, tokens): """Try highlighting source, look for key tokens""" results = method(test) for token in tokens: assert token in results
mit
kingmotley/SickRage
lib/unidecode/x0ac.py
253
4709
data = ( 'ga', # 0x00 'gag', # 0x01 'gagg', # 0x02 'gags', # 0x03 'gan', # 0x04 'ganj', # 0x05 'ganh', # 0x06 'gad', # 0x07 'gal', # 0x08 'galg', # 0x09 'galm', # 0x0a 'galb', # 0x0b 'gals', # 0x0c 'galt', # 0x0d 'galp', # 0x0e 'galh', # 0x0f 'gam', # 0x10 'gab', # 0x11 'gabs', # 0x12 'gas', # 0x13 'gass', # 0x14 'gang', # 0x15 'gaj', # 0x16 'gac', # 0x17 'gak', # 0x18 'gat', # 0x19 'gap', # 0x1a 'gah', # 0x1b 'gae', # 0x1c 'gaeg', # 0x1d 'gaegg', # 0x1e 'gaegs', # 0x1f 'gaen', # 0x20 'gaenj', # 0x21 'gaenh', # 0x22 'gaed', # 0x23 'gael', # 0x24 'gaelg', # 0x25 'gaelm', # 0x26 'gaelb', # 0x27 'gaels', # 0x28 'gaelt', # 0x29 'gaelp', # 0x2a 'gaelh', # 0x2b 'gaem', # 0x2c 'gaeb', # 0x2d 'gaebs', # 0x2e 'gaes', # 0x2f 'gaess', # 0x30 'gaeng', # 0x31 'gaej', # 0x32 'gaec', # 0x33 'gaek', # 0x34 'gaet', # 0x35 'gaep', # 0x36 'gaeh', # 0x37 'gya', # 0x38 'gyag', # 0x39 'gyagg', # 0x3a 'gyags', # 0x3b 'gyan', # 0x3c 'gyanj', # 0x3d 'gyanh', # 0x3e 'gyad', # 0x3f 'gyal', # 0x40 'gyalg', # 0x41 'gyalm', # 0x42 'gyalb', # 0x43 'gyals', # 0x44 'gyalt', # 0x45 'gyalp', # 0x46 'gyalh', # 0x47 'gyam', # 0x48 'gyab', # 0x49 'gyabs', # 0x4a 'gyas', # 0x4b 'gyass', # 0x4c 'gyang', # 0x4d 'gyaj', # 0x4e 'gyac', # 0x4f 'gyak', # 0x50 'gyat', # 0x51 'gyap', # 0x52 'gyah', # 0x53 'gyae', # 0x54 'gyaeg', # 0x55 'gyaegg', # 0x56 'gyaegs', # 0x57 'gyaen', # 0x58 'gyaenj', # 0x59 'gyaenh', # 0x5a 'gyaed', # 0x5b 'gyael', # 0x5c 'gyaelg', # 0x5d 'gyaelm', # 0x5e 'gyaelb', # 0x5f 'gyaels', # 0x60 'gyaelt', # 0x61 'gyaelp', # 0x62 'gyaelh', # 0x63 'gyaem', # 0x64 'gyaeb', # 0x65 'gyaebs', # 0x66 'gyaes', # 0x67 'gyaess', # 0x68 'gyaeng', # 0x69 'gyaej', # 0x6a 'gyaec', # 0x6b 'gyaek', # 0x6c 'gyaet', # 0x6d 'gyaep', # 0x6e 'gyaeh', # 0x6f 'geo', # 0x70 'geog', # 0x71 'geogg', # 0x72 'geogs', # 0x73 'geon', # 0x74 'geonj', # 0x75 'geonh', # 0x76 'geod', # 0x77 'geol', # 0x78 'geolg', # 0x79 'geolm', # 0x7a 'geolb', # 0x7b 'geols', # 0x7c 'geolt', # 0x7d 'geolp', # 0x7e 'geolh', # 0x7f 'geom', # 0x80 'geob', # 0x81 'geobs', # 0x82 'geos', # 0x83 'geoss', # 0x84 'geong', # 0x85 'geoj', # 0x86 'geoc', # 0x87 'geok', # 0x88 'geot', # 0x89 'geop', # 0x8a 'geoh', # 0x8b 'ge', # 0x8c 'geg', # 0x8d 'gegg', # 0x8e 'gegs', # 0x8f 'gen', # 0x90 'genj', # 0x91 'genh', # 0x92 'ged', # 0x93 'gel', # 0x94 'gelg', # 0x95 'gelm', # 0x96 'gelb', # 0x97 'gels', # 0x98 'gelt', # 0x99 'gelp', # 0x9a 'gelh', # 0x9b 'gem', # 0x9c 'geb', # 0x9d 'gebs', # 0x9e 'ges', # 0x9f 'gess', # 0xa0 'geng', # 0xa1 'gej', # 0xa2 'gec', # 0xa3 'gek', # 0xa4 'get', # 0xa5 'gep', # 0xa6 'geh', # 0xa7 'gyeo', # 0xa8 'gyeog', # 0xa9 'gyeogg', # 0xaa 'gyeogs', # 0xab 'gyeon', # 0xac 'gyeonj', # 0xad 'gyeonh', # 0xae 'gyeod', # 0xaf 'gyeol', # 0xb0 'gyeolg', # 0xb1 'gyeolm', # 0xb2 'gyeolb', # 0xb3 'gyeols', # 0xb4 'gyeolt', # 0xb5 'gyeolp', # 0xb6 'gyeolh', # 0xb7 'gyeom', # 0xb8 'gyeob', # 0xb9 'gyeobs', # 0xba 'gyeos', # 0xbb 'gyeoss', # 0xbc 'gyeong', # 0xbd 'gyeoj', # 0xbe 'gyeoc', # 0xbf 'gyeok', # 0xc0 'gyeot', # 0xc1 'gyeop', # 0xc2 'gyeoh', # 0xc3 'gye', # 0xc4 'gyeg', # 0xc5 'gyegg', # 0xc6 'gyegs', # 0xc7 'gyen', # 0xc8 'gyenj', # 0xc9 'gyenh', # 0xca 'gyed', # 0xcb 'gyel', # 0xcc 'gyelg', # 0xcd 'gyelm', # 0xce 'gyelb', # 0xcf 'gyels', # 0xd0 'gyelt', # 0xd1 'gyelp', # 0xd2 'gyelh', # 0xd3 'gyem', # 0xd4 'gyeb', # 0xd5 'gyebs', # 0xd6 'gyes', # 0xd7 'gyess', # 0xd8 'gyeng', # 0xd9 'gyej', # 0xda 'gyec', # 0xdb 'gyek', # 0xdc 'gyet', # 0xdd 'gyep', # 0xde 'gyeh', # 0xdf 'go', # 0xe0 'gog', # 0xe1 'gogg', # 0xe2 'gogs', # 0xe3 'gon', # 0xe4 'gonj', # 0xe5 'gonh', # 0xe6 'god', # 0xe7 'gol', # 0xe8 'golg', # 0xe9 'golm', # 0xea 'golb', # 0xeb 'gols', # 0xec 'golt', # 0xed 'golp', # 0xee 'golh', # 0xef 'gom', # 0xf0 'gob', # 0xf1 'gobs', # 0xf2 'gos', # 0xf3 'goss', # 0xf4 'gong', # 0xf5 'goj', # 0xf6 'goc', # 0xf7 'gok', # 0xf8 'got', # 0xf9 'gop', # 0xfa 'goh', # 0xfb 'gwa', # 0xfc 'gwag', # 0xfd 'gwagg', # 0xfe 'gwags', # 0xff )
gpl-3.0
mateusz-blaszkowski/PerfKitBenchmarker
perfkitbenchmarker/linux_benchmarks/hbase_ycsb_benchmark.py
2
8607
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Runs YCSB against HBase. HBase is a scalable NoSQL database built on Hadoop. https://hbase.apache.org/ A running installation consists of: * An HDFS NameNode. * HDFS DataNodes. * An HBase master node. * HBase regionservers. * A zookeeper cluster (https://zookeeper.apache.org/). See: http://hbase.apache.org/book.html#_distributed. This benchmark provisions: * A single node functioning as HDFS NameNode, HBase master, and zookeeper quorum member. * '--num_vms - 1' nodes serving as both HDFS DataNodes and HBase region servers (so region servers and data are co-located). By default only the master node runs Zookeeper. Some regionservers may be added to the zookeeper quorum with the --hbase_zookeeper_nodes flag. HBase web UI on 15030. HDFS web UI on 50070. """ import functools import logging import os import posixpath from perfkitbenchmarker import configs from perfkitbenchmarker import data from perfkitbenchmarker import flags from perfkitbenchmarker import vm_util from perfkitbenchmarker.linux_packages import hadoop from perfkitbenchmarker.linux_packages import hbase from perfkitbenchmarker.linux_packages import ycsb FLAGS = flags.FLAGS flags.DEFINE_integer('hbase_zookeeper_nodes', 1, 'Number of Zookeeper nodes.') flags.DEFINE_boolean('hbase_use_snappy', True, 'Whether to use snappy compression.') BENCHMARK_NAME = 'hbase_ycsb' BENCHMARK_CONFIG = """ hbase_ycsb: description: > Run YCSB against HBase. Specify the HBase cluster size with --num_vms. Specify the number of YCSB VMs with --ycsb_client_vms. vm_groups: clients: vm_spec: *default_single_core master: vm_spec: *default_single_core disk_spec: *default_500_gb workers: vm_spec: *default_single_core disk_spec: *default_500_gb """ HBASE_SITE = 'hbase-site.xml' CREATE_TABLE_SCRIPT = 'hbase/create-ycsb-table.hbaseshell.j2' TABLE_NAME = 'usertable' COLUMN_FAMILY = 'cf' TABLE_SPLIT_COUNT = 200 def GetConfig(user_config): config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) num_vms = max(FLAGS.num_vms, 2) if FLAGS['num_vms'].present and FLAGS.num_vms < 2: raise ValueError('hbase_ycsb requires at least 2 HBase VMs.') if FLAGS['ycsb_client_vms'].present: config['vm_groups']['clients']['vm_count'] = FLAGS.ycsb_client_vms if FLAGS['num_vms'].present: config['vm_groups']['workers']['vm_count'] = num_vms - 1 return config def CheckPrerequisites(): """Verifies that the required resources are present. Raises: perfkitbenchmarker.data.ResourceNotFound: On missing resource. """ hbase.CheckPrerequisites() hadoop.CheckPrerequisites() ycsb.CheckPrerequisites() def CreateYCSBTable(vm, table_name=TABLE_NAME, family=COLUMN_FAMILY, n_splits=TABLE_SPLIT_COUNT, limit_filesize=True, use_snappy=True): """Create a table for use with YCSB. Args: vm: Virtual machine from which to create the table. table_name: Name for the table. family: Column family name. limit_filesize: boolean. Should the filesize be limited to 4GB? n_splits: Initial number of regions for the table. Default follows HBASE-4163. """ # See: https://issues.apache.org/jira/browse/HBASE-4163 template_path = data.ResourcePath(CREATE_TABLE_SCRIPT) remote = posixpath.join(hbase.HBASE_DIR, os.path.basename(os.path.splitext(template_path)[0])) vm.RenderTemplate(template_path, remote, context={'table_name': table_name, 'family': family, 'limit_filesize': limit_filesize, 'n_splits': n_splits, 'use_snappy': use_snappy}) # TODO(connormccoy): on HBase update, add '-n' flag. command = "{0}/hbase shell {1}".format(hbase.HBASE_BIN, remote) vm.RemoteCommand(command, should_log=True) def _GetVMsByRole(vm_groups): """Partition "vms" by role in the benchmark. * The first VM is the master. * The first FLAGS.hbase_zookeeper_nodes form the Zookeeper quorum. * The last FLAGS.ycsb_client_vms are loader nodes. * The nodes which are neither the master nor loaders are HBase region servers. Args: vm_groups: The benchmark_spec's vm_groups dict. Returns: A dictionary with keys 'vms', 'hbase_vms', 'master', 'zk_quorum', 'workers', and 'clients'. """ hbase_vms = vm_groups['master'] + vm_groups['workers'] vms = hbase_vms + vm_groups['clients'] return {'vms': vms, 'hbase_vms': hbase_vms, 'master': vm_groups['master'][0], 'zk_quorum': hbase_vms[:FLAGS.hbase_zookeeper_nodes], 'workers': vm_groups['workers'], 'clients': vm_groups['clients']} def Prepare(benchmark_spec): """Prepare the virtual machines to run hadoop. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. """ by_role = _GetVMsByRole(benchmark_spec.vm_groups) loaders = by_role['clients'] assert loaders, 'No loader VMs: {0}'.format(by_role) # HBase cluster hbase_vms = by_role['hbase_vms'] assert hbase_vms, 'No HBase VMs: {0}'.format(by_role) master = by_role['master'] zk_quorum = by_role['zk_quorum'] assert zk_quorum, 'No zookeeper quorum: {0}'.format(by_role) workers = by_role['workers'] assert workers, 'No workers: {0}'.format(by_role) hbase_install_fns = [functools.partial(vm.Install, 'hbase') for vm in hbase_vms] ycsb_install_fns = [functools.partial(vm.Install, 'ycsb') for vm in loaders] vm_util.RunThreaded(lambda f: f(), hbase_install_fns + ycsb_install_fns) hadoop.ConfigureAndStart(master, workers, start_yarn=False) hbase.ConfigureAndStart(master, workers, zk_quorum) CreateYCSBTable(master, use_snappy=FLAGS.hbase_use_snappy) # Populate hbase-site.xml on the loaders. master.PullFile( vm_util.GetTempDir(), posixpath.join(hbase.HBASE_CONF_DIR, HBASE_SITE)) def PushHBaseSite(vm): conf_dir = posixpath.join(ycsb.YCSB_DIR, 'hbase-binding', 'conf') vm.RemoteCommand('mkdir -p {}'.format(conf_dir)) vm.PushFile( os.path.join(vm_util.GetTempDir(), HBASE_SITE), posixpath.join(conf_dir, HBASE_SITE)) vm_util.RunThreaded(PushHBaseSite, loaders) def Run(benchmark_spec): """Spawn YCSB and gather the results. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects. """ by_role = _GetVMsByRole(benchmark_spec.vm_groups) loaders = by_role['clients'] logging.info('Loaders: %s', loaders) executor = ycsb.YCSBExecutor('hbase-10') metadata = {'ycsb_client_vms': len(loaders), 'hbase_cluster_size': len(by_role['hbase_vms']), 'hbase_zookeeper_nodes': FLAGS.hbase_zookeeper_nodes} # By default YCSB uses a BufferedMutator for Puts / Deletes. # This leads to incorrect update latencies, since since the call returns # before the request is acked by the server. # Disable this behavior during the benchmark run. run_kwargs = {'columnfamily': COLUMN_FAMILY, 'clientbuffering': 'false'} load_kwargs = run_kwargs.copy() load_kwargs['clientbuffering'] = 'true' samples = list(executor.LoadAndRun(loaders, load_kwargs=load_kwargs, run_kwargs=run_kwargs)) for sample in samples: sample.metadata.update(metadata) return samples def Cleanup(benchmark_spec): """Cleanup. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. """ by_role = _GetVMsByRole(benchmark_spec.vm_groups) hbase.Stop(by_role['master']) hadoop.StopHDFS(by_role['master']) vm_util.RunThreaded(hadoop.CleanDatanode, by_role['workers'])
apache-2.0
zoidbergwill/Diamond
src/diamond/handler/tsdb.py
53
5910
# coding=utf-8 """ Send metrics to a [OpenTSDB](http://opentsdb.net/) server. [OpenTSDB](http://opentsdb.net/) is a distributed, scalable Time Series Database (TSDB) written on top of [HBase](http://hbase.org/). OpenTSDB was written to address a common need: store, index and serve metrics collected from computer systems (network gear, operating systems, applications) at a large scale, and make this data easily accessible and graphable. Thanks to HBase's scalability, OpenTSDB allows you to collect many thousands of metrics from thousands of hosts and applications, at a high rate (every few seconds). OpenTSDB will never delete or downsample data and can easily store billions of data points. As a matter of fact, StumbleUpon uses it to keep track of hundred of thousands of time series and collects over 1 billion data points per day in their main production datacenter. Imagine having the ability to quickly plot a graph showing the number of DELETE statements going to your MySQL database along with the number of slow queries and temporary files created, and correlate this with the 99th percentile of your service's latency. OpenTSDB makes generating such graphs on the fly a trivial operation, while manipulating millions of data point for very fine grained, real-time monitoring. ==== Notes We don't automatically make the metrics via mkmetric, so we recommand you run with the null handler and log the output and extract the key values to mkmetric yourself. - enable it in `diamond.conf` : ` handlers = diamond.handler.tsdb.TSDBHandler ` """ from Handler import Handler import socket class TSDBHandler(Handler): """ Implements the abstract Handler class, sending data to graphite """ RETRY = 3 def __init__(self, config=None): """ Create a new instance of the TSDBHandler class """ # Initialize Handler Handler.__init__(self, config) # Initialize Data self.socket = None # Initialize Options self.host = self.config['host'] self.port = int(self.config['port']) self.timeout = int(self.config['timeout']) self.metric_format = str(self.config['format']) self.tags = str(self.config['tags']) # Connect self._connect() def get_default_config_help(self): """ Returns the help text for the configuration options for this handler """ config = super(TSDBHandler, self).get_default_config_help() config.update({ 'host': '', 'port': '', 'timeout': '', 'format': '', 'tags': '', }) return config def get_default_config(self): """ Return the default config for the handler """ config = super(TSDBHandler, self).get_default_config() config.update({ 'host': '', 'port': 1234, 'timeout': 5, 'format': '{Collector}.{Metric} {timestamp} {value} hostname={host}' '{tags}', 'tags': '', }) return config def __del__(self): """ Destroy instance of the TSDBHandler class """ self._close() def process(self, metric): """ Process a metric by sending it to TSDB """ metric_str = self.metric_format.format( Collector=metric.getCollectorPath(), Path=metric.path, Metric=metric.getMetricPath(), host=metric.host, timestamp=metric.timestamp, value=metric.value, tags=self.tags ) # Just send the data as a string self._send("put " + str(metric_str) + "\n") def _send(self, data): """ Send data to TSDB. Data that can not be sent will be queued. """ retry = self.RETRY # Attempt to send any data in the queue while retry > 0: # Check socket if not self.socket: # Log Error self.log.error("TSDBHandler: Socket unavailable.") # Attempt to restablish connection self._connect() # Decrement retry retry -= 1 # Try again continue try: # Send data to socket self.socket.sendall(data) # Done break except socket.error, e: # Log Error self.log.error("TSDBHandler: Failed sending data. %s.", e) # Attempt to restablish connection self._close() # Decrement retry retry -= 1 # try again continue def _connect(self): """ Connect to the TSDB server """ # Create socket self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if socket is None: # Log Error self.log.error("TSDBHandler: Unable to create socket.") # Close Socket self._close() return # Set socket timeout self.socket.settimeout(self.timeout) # Connect to graphite server try: self.socket.connect((self.host, self.port)) # Log self.log.debug("Established connection to TSDB server %s:%d", self.host, self.port) except Exception, ex: # Log Error self.log.error("TSDBHandler: Failed to connect to %s:%i. %s", self.host, self.port, ex) # Close Socket self._close() return def _close(self): """ Close the socket """ if self.socket is not None: self.socket.close() self.socket = None
mit
foodiesgit/dogepartyd
pycoin-0.26/pycoin/tx/script/microcode.py
4
11968
# -*- coding: utf-8 -*- """ Implement instructions of the Bitcoin VM. The MIT License (MIT) Copyright (c) 2013 by Richard Kiss Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import binascii import hashlib from . import ScriptError from .opcodes import OPCODE_TO_INT from .tools import bytes_to_int, int_to_bytes from ...encoding import hash160, h2b, double_sha256, ripemd160 bytes_from_ints = (lambda x: ''.join(chr(c) for c in x)) if bytes == str else bytes bytes_to_ints = (lambda x: (ord(c) for c in x)) if bytes == str else lambda x: x VCH_TRUE = b'\1\1' VCH_FALSE = b'\0' do_OP_NOP = do_OP_NOP1 = do_OP_NOP2 = do_OP_NOP3 = do_OP_NOP4 = do_OP_NOP5 = lambda s: None do_OP_NOP6 = do_OP_NOP7 = do_OP_NOP8 = do_OP_NOP9 = do_OP_NOP10 = lambda s: None def do_OP_VERIFY(stack): pass def do_OP_RETURN(stack): raise ScriptError("OP_RETURN encountered") def do_OP_2DROP(stack): """ >>> s = [1, 2, 3] >>> do_OP_2DROP(s) >>> print(s) [1] """ stack.pop() stack.pop() def do_OP_2DUP(stack): #// (x1 x2 -- x1 x2 x1 x2) """ >>> s = [1, 2] >>> do_OP_2DUP(s) >>> print(s) [1, 2, 1, 2] """ stack.append(stack[-2]) stack.append(stack[-2]) def do_OP_3DUP(stack): #// (x1 x2 x3 -- x1 x2 x3 x1 x2 x3) """ >>> s = [1, 2, 3] >>> do_OP_3DUP(s) >>> print(s) [1, 2, 3, 1, 2, 3] """ stack.append(stack[-3]) stack.append(stack[-3]) stack.append(stack[-3]) def do_OP_2OVER(stack): #// (x1 x2 x3 x4 -- x1 x2 x3 x4 x1 x2) """ >>> s = [1, 2, 3, 4] >>> do_OP_2OVER(s) >>> print(s) [1, 2, 3, 4, 1, 2] """ stack.append(stack[-4]) stack.append(stack[-4]) def do_OP_2ROT(stack): """ >>> s = [1, 2, 3, 4, 5, 6] >>> do_OP_2ROT(s) >>> print(s) [3, 4, 5, 6, 1, 2] """ stack.append(stack.pop(-6)) stack.append(stack.pop(-6)) def do_OP_2SWAP(stack): """ >>> s = [1, 2, 3, 4] >>> do_OP_2SWAP(s) >>> print(s) [3, 4, 1, 2] """ stack.append(stack.pop(-4)) stack.append(stack.pop(-4)) def do_OP_IFDUP(stack): """ >>> s = [1, 2] >>> do_OP_IFDUP(s) >>> print(s) [1, 2, 2] >>> s = [1, 2, 0] >>> do_OP_IFDUP(s) >>> print(s) [1, 2, 0] """ if stack[-1]: stack.append(stack[-1]) def do_OP_DEPTH(stack): """ >>> s = [1, 2, 1, 2, 1, 2] >>> do_OP_DEPTH(s) >>> print(s) [1, 2, 1, 2, 1, 2, 6] """ stack.append(len(stack)) def do_OP_DROP(stack): """ >>> s = [1, 2] >>> do_OP_DROP(s) >>> print(s) [1] """ stack.pop() def do_OP_DUP(stack): """ >>> s = [1, 2] >>> do_OP_DUP(s) >>> print(s) [1, 2, 2] """ stack.append(stack[-1]) def do_OP_NIP(stack): """ >>> s = [1, 2] >>> do_OP_NIP(s) >>> print(s) [2] """ v = stack.pop() stack.pop() stack.append(v) def do_OP_OVER(stack): """ >>> s = [1, 2] >>> do_OP_OVER(s) >>> print(s) [1, 2, 1] """ stack.append(stack[-2]) def do_OP_PICK(stack): """ >>> s = ['a', 'b', 'c', 'd', b'\2'] >>> do_OP_PICK(s) >>> print(s) ['a', 'b', 'c', 'd', 'b'] """ v = bytes_to_int(stack.pop()) stack.append(stack[-v-1]) def do_OP_ROLL(stack): """ >>> s = ['a', 'b', 'c', 'd', b'\2'] >>> do_OP_ROLL(s) >>> print(s) ['a', 'c', 'd', 'b'] """ v = bytes_to_int(stack.pop()) stack.append(stack.pop(-v-1)) def do_OP_ROT(stack): """ >>> s = [1, 2, 3] >>> do_OP_ROT(s) >>> print(s) [2, 3, 1] """ stack.append(stack.pop(-3)) def do_OP_SWAP(stack): """ >>> s = [1, 2, 3] >>> do_OP_SWAP(s) >>> print(s) [1, 3, 2] """ stack.append(stack.pop(-2)) def do_OP_TUCK(stack): """ >>> s = [1, 2, 3] >>> do_OP_TUCK(s) >>> print(s) [1, 3, 2, 3] """ v1 = stack.pop() v2 = stack.pop() stack.append(v1) stack.append(v2) stack.append(v1) def do_OP_CAT(stack): """ >>> s = ["foo", "bar"] >>> do_OP_CAT(s) >>> print(s) ['foobar'] """ v1 = stack.pop() v2 = stack.pop() stack.append(v2 + v1) def do_OP_SUBSTR(stack): """ >>> s = ['abcdef', b'\3', b'\2'] >>> do_OP_SUBSTR(s) >>> print(s) ['de'] """ pos = bytes_to_int(stack.pop()) length = bytes_to_int(stack.pop()) stack.append(stack.pop()[length:length+pos]) def do_OP_LEFT(stack): """ >>> s = [b'abcdef', b'\\3'] >>> do_OP_LEFT(s) >>> print(len(s)==1 and s[0]==b'abc') True >>> s = [b'abcdef', b'\\0'] >>> do_OP_LEFT(s) >>> print(len(s) ==1 and s[0]==b'') True """ pos = bytes_to_int(stack.pop()) stack.append(stack.pop()[:pos]) def do_OP_RIGHT(stack): """ >>> s = [b'abcdef', b'\\3'] >>> do_OP_RIGHT(s) >>> print(s==[b'def']) True >>> s = [b'abcdef', b'\\0'] >>> do_OP_RIGHT(s) >>> print(s==[b'']) True """ pos = bytes_to_int(stack.pop()) if pos > 0: stack.append(stack.pop()[-pos:]) else: stack.pop() stack.append(b'') def do_OP_SIZE(stack): """ >>> s = [b'abcdef'] >>> do_OP_SIZE(s) >>> print(s == [b'abcdef', b'\x06']) True >>> s = [b'abcdef'*1000] >>> do_OP_SIZE(s) >>> print(binascii.hexlify(s[-1]) == b'1770') True """ stack.append(int_to_bytes(len(stack[-1]))) def do_OP_INVERT(stack): """ >>> s = [h2b('5dcf39822aebc166')] >>> do_OP_INVERT(s) >>> print(binascii.hexlify(s[0]) == b'a230c67dd5143e99') True """ v = stack.pop() # use bytes_from_ints and bytes_to_ints so it works with # Python 2.7 and 3.3. Ugh stack.append(bytes_from_ints((s^0xff) for s in bytes_to_ints(v))) def make_same_size(v1, v2): larger = max(len(v1), len(v2)) nulls = b'\0' * larger v1 = (v1 + nulls)[:larger] v2 = (v2 + nulls)[:larger] return v1, v2 def make_bitwise_bin_op(binop): """ >>> s = [h2b('5dcf39832aebc166'), h2b('ff00f086') ] >>> do_OP_AND(s) >>> print(binascii.hexlify(s[0]) == b'5d00308200000000') True >>> s = [h2b('5dcf39832aebc166'), h2b('ff00f086') ] >>> do_OP_OR(s) >>> print(binascii.hexlify(s[0]) == b'ffcff9872aebc166') True >>> s = [h2b('5dcf39832aebc166'), h2b('ff00f086') ] >>> do_OP_XOR(s) >>> print(binascii.hexlify(s[0]) == b'a2cfc9052aebc166') True >>> s = [] """ def f(stack): v1 = stack.pop() v2 = stack.pop() v1, v2 = make_same_size(v1, v2) stack.append(bytes_from_ints(binop(c1, c2) for c1, c2 in zip(bytes_to_ints(v1), bytes_to_ints(v2)))) return f do_OP_AND = make_bitwise_bin_op(lambda x,y: x & y) do_OP_OR = make_bitwise_bin_op(lambda x,y: x | y) do_OP_XOR = make_bitwise_bin_op(lambda x,y: x ^ y) def make_bool(v): if v: return VCH_TRUE return VCH_FALSE def do_OP_EQUAL(stack): """ >>> s = [b'string1', b'string1'] >>> do_OP_EQUAL(s) >>> print(s == [VCH_TRUE]) True >>> s = [b'string1', b'string2'] >>> do_OP_EQUAL(s) >>> print(s == [VCH_FALSE]) True """ v1 = stack.pop() v2 = stack.pop() stack.append(make_bool(v1 == v2)) do_OP_EQUALVERIFY = lambda s: do_OP_EQUAL(s) def make_bin_op(binop): def f(stack): v1 = bytes_to_int(stack.pop()) v2 = bytes_to_int(stack.pop()) stack.append(int_to_bytes(binop(v2, v1))) return f do_OP_ADD = make_bin_op(lambda x,y: x+y) do_OP_SUB = make_bin_op(lambda x,y: x-y) do_OP_MUL = make_bin_op(lambda x,y: x*y) do_OP_DIV = make_bin_op(lambda x,y: x//y) do_OP_MOD = make_bin_op(lambda x,y: x%y) do_OP_LSHIFT = make_bin_op(lambda x,y: x<<y) do_OP_RSHIFT = make_bin_op(lambda x,y: x>>y) do_OP_BOOLAND = make_bin_op(lambda x,y: x and y) do_OP_BOOLOR = make_bin_op(lambda x,y: x or y) do_OP_NUMEQUAL = make_bin_op(lambda x,y: x==y) do_OP_NUMEQUALVERIFY = make_bin_op(lambda x,y: x==y) do_OP_NUMNOTEQUAL = make_bin_op(lambda x,y: x!=y) do_OP_LESSTHAN = make_bin_op(lambda x,y: x<y) do_OP_GREATERTHAN = make_bin_op(lambda x,y: x>y) do_OP_LESSTHANOREQUAL = make_bin_op(lambda x,y: x<=y) do_OP_GREATERTHANOREQUAL = make_bin_op(lambda x,y: x>=y) do_OP_MIN = make_bin_op(min) do_OP_MAX = make_bin_op(max) def do_OP_WITHIN(stack): """ >>> s = [b'c', b'b', b'a'] >>> do_OP_WITHIN(s) >>> print(s == [VCH_TRUE]) True >>> s = [b'b', b'c', b'a'] >>> do_OP_WITHIN(s) >>> print(s == [VCH_FALSE]) True """ v3 = stack.pop() v2 = stack.pop() v1 = stack.pop() ok = (v3 <= v2 <= v1) stack.append(make_bool(ok)) def do_OP_RIPEMD160(stack): """ >>> s = [b'foo'] >>> do_OP_RIPEMD160(s) >>> print(s == [bytearray([66, 207, 162, 17, 1, 142, 164, 146, 253, 238, 69, 172, 99, 123, 121, 114, 160, 173, 104, 115])]) True """ stack.append(ripemd160(stack.pop()).digest()) def do_OP_SHA1(stack): """ >>> s = [b'foo'] >>> do_OP_SHA1(s) >>> print(s == [bytearray([11, 238, 199, 181, 234, 63, 15, 219, 201, 93, 13, 212, 127, 60, 91, 194, 117, 218, 138, 51])]) True """ stack.append(hashlib.sha1(stack.pop()).digest()) def do_OP_SHA256(stack): """ >>> s = [b'foo'] >>> do_OP_SHA256(s) >>> print(s == [bytearray([44, 38, 180, 107, 104, 255, 198, 143, 249, 155, 69, 60, 29, 48, 65, 52, 19, 66, 45, 112, 100, 131, 191, 160, 249, 138, 94, 136, 98, 102, 231, 174])]) True """ stack.append(hashlib.sha256(stack.pop()).digest()) def do_OP_HASH160(stack): """ >>> s = [b'foo'] >>> do_OP_HASH160(s) >>> print(s == [bytearray([225, 207, 124, 129, 3, 71, 107, 109, 127, 233, 228, 151, 154, 161, 14, 124, 83, 31, 207, 66])]) True """ stack.append(hash160(stack.pop())) def do_OP_HASH256(stack): """ >>> s = [b'foo'] >>> do_OP_HASH256(s) >>> print(s == [bytearray([199, 173, 232, 143, 199, 162, 20, 152, 166, 165, 229, 195, 133, 225, 246, 139, 237, 130, 43, 114, 170, 99, 196, 169, 164, 138, 2, 194, 70, 110, 226, 158])]) True """ stack.append(double_sha256(stack.pop())) def make_unary_num_op(unary_f): def f(stack): stack.append(int_to_bytes(unary_f(bytes_to_int(stack.pop())))) return f do_OP_1ADD = make_unary_num_op(lambda x: x+1) do_OP_1SUB = make_unary_num_op(lambda x: x-1) do_OP_2MUL = make_unary_num_op(lambda x: x<<1) do_OP_2DIV = make_unary_num_op(lambda x: x>>1) do_OP_NEGATE = make_unary_num_op(lambda x: -x) do_OP_ABS = make_unary_num_op(lambda x: abs(x)) do_OP_NOT = make_unary_num_op(lambda x: make_bool(x == 0)) do_OP_0NOTEQUAL = make_unary_num_op(lambda x: make_bool(x != 0)) def build_ops_lookup(): d = {} the_globals = globals() for opcode_name, opcode_int in OPCODE_TO_INT.items(): do_f_name = "do_%s" % opcode_name if do_f_name in the_globals: d[opcode_int] = the_globals[do_f_name] return d MICROCODE_LOOKUP = build_ops_lookup() if __name__ == "__main__": import doctest doctest.testmod()
mit
justin-ho/passwd-mng
pycrypto-2.6.1/pycrypto-2.6.1/lib/Crypto/Hash/SHA224.py
124
2851
# -*- coding: utf-8 -*- # # =================================================================== # The contents of this file are dedicated to the public domain. To # the extent that dedication to the public domain is not available, # everyone is granted a worldwide, perpetual, royalty-free, # non-exclusive license to exercise all rights associated with the # contents of this file for any purpose whatsoever. # No rights are reserved. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # =================================================================== """SHA-224 cryptographic hash algorithm. SHA-224 belongs to the SHA-2_ family of cryptographic hashes. It produces the 224 bit digest of a message. >>> from Crypto.Hash import SHA224 >>> >>> h = SHA224.new() >>> h.update(b'Hello') >>> print h.hexdigest() *SHA* stands for Secure Hash Algorithm. .. _SHA-2: http://csrc.nist.gov/publications/fips/fips180-2/fips180-2.pdf """ _revision__ = "$Id$" __all__ = ['new', 'digest_size', 'SHA224Hash' ] from Crypto.Util.py3compat import * from Crypto.Hash.hashalgo import HashAlgo try: import hashlib hashFactory = hashlib.sha224 except ImportError: from Crypto.Hash import _SHA224 hashFactory = _SHA224 class SHA224Hash(HashAlgo): """Class that implements a SHA-224 hash :undocumented: block_size """ #: ASN.1 Object identifier (OID):: #: #: id-sha224 OBJECT IDENTIFIER ::= { #: joint-iso-itu-t(2) country(16) us(840) organization(1) gov(101) csor(3) #: nistalgorithm(4) hashalgs(2) 4 #: } #: #: This value uniquely identifies the SHA-224 algorithm. oid = b('\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x04') digest_size = 28 block_size = 64 def __init__(self, data=None): HashAlgo.__init__(self, hashFactory, data) def new(self, data=None): return SHA224Hash(data) def new(data=None): """Return a fresh instance of the hash object. :Parameters: data : byte string The very first chunk of the message to hash. It is equivalent to an early call to `SHA224Hash.update()`. Optional. :Return: A `SHA224Hash` object """ return SHA224Hash().new(data) #: The size of the resulting hash in bytes. digest_size = SHA224Hash.digest_size #: The internal block size of the hash algorithm in bytes. block_size = SHA224Hash.block_size
gpl-3.0
fifengine/fifengine-demos
pychan_demo/colortester.py
1
7852
# -*- coding: utf-8 -*- # #################################################################### # Copyright (C) 2005-2013 by the FIFE team # http://www.fifengine.net # This file is part of FIFE. # # FIFE is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # #################################################################### """ pychan demo app for testing rgba colors on widgets """ from builtins import str from pychan_demo import PyChanExample from fife.extensions import pychan class ColorExample(PyChanExample): """ a small app (^^) to show how fifechan uses colors on various widgets """ def __init__(self): super(ColorExample,self).__init__('gui/colortester.xml') def start(self): """ load XML file and setup callbacks """ self.widget = pychan.loadXML(self.xmlFile) self.widget.mapEvents({ 'base_rslider': self.update_basecolor, 'base_gslider': self.update_basecolor, 'base_bslider': self.update_basecolor, 'base_aslider': self.update_basecolor, 'background_rslider': self.update_background_color, 'background_gslider': self.update_background_color, 'background_bslider': self.update_background_color, 'background_aslider': self.update_background_color, 'foreground_rslider': self.update_foreground_color, 'foreground_gslider': self.update_foreground_color, 'foreground_bslider': self.update_foreground_color, 'foreground_aslider': self.update_foreground_color, 'selection_rslider': self.update_selection_color, 'selection_gslider': self.update_selection_color, 'selection_bslider': self.update_selection_color, 'selection_aslider': self.update_selection_color, 'closeButton':self.stop, }) # alpha value needs to be set, otherwise you don't see colors ;-) self.widget.findChild(name="base_aslider").value = float(255) self.widget.findChild(name="background_aslider").value = float(255) self.widget.findChild(name="foreground_aslider").value = float(255) self.widget.findChild(name="selection_aslider").value = float(255) # init stuff self.update_basecolor() self.update_background_color() self.update_foreground_color() self.update_selection_color() self.widget.show() def update_basecolor(self): """ Update rgba base colors of all examples and show the values """ r = int(self.widget.findChild(name="base_rslider").value) g = int(self.widget.findChild(name="base_gslider").value) b = int(self.widget.findChild(name="base_bslider").value) a = int(self.widget.findChild(name="base_aslider").value) # update slider labels self.widget.findChild(name="base_rvalue").text = str(r) self.widget.findChild(name="base_gvalue").text = str(g) self.widget.findChild(name="base_bvalue").text = str(b) self.widget.findChild(name="base_avalue").text = str(a) rgba = (r, g, b, a) self.widget.findChild(name="example1").base_color = rgba self.widget.findChild(name="example2").base_color = rgba self.widget.findChild(name="example3").base_color = rgba self.widget.findChild(name="example4").base_color = rgba self.widget.findChild(name="example5").base_color = rgba self.widget.findChild(name="example6").base_color = rgba self.widget.findChild(name="example7").base_color = rgba self.widget.findChild(name="example8").base_color = rgba self.widget.findChild(name="example9").base_color = rgba def update_background_color(self): """ Update rgba background colors of all examples and show the values """ r = int(self.widget.findChild(name="background_rslider").value) g = int(self.widget.findChild(name="background_gslider").value) b = int(self.widget.findChild(name="background_bslider").value) a = int(self.widget.findChild(name="background_aslider").value) # update slider labels self.widget.findChild(name="background_rvalue").text = str(r) self.widget.findChild(name="background_gvalue").text = str(g) self.widget.findChild(name="background_bvalue").text = str(b) self.widget.findChild(name="background_avalue").text = str(a) rgba = (r, g, b, a) self.widget.findChild(name="example1").background_color = rgba self.widget.findChild(name="example2").background_color = rgba self.widget.findChild(name="example3").background_color = rgba self.widget.findChild(name="example4").background_color = rgba self.widget.findChild(name="example5").background_color = rgba self.widget.findChild(name="example6").background_color = rgba self.widget.findChild(name="example7").background_color = rgba self.widget.findChild(name="example8").background_color = rgba self.widget.findChild(name="example9").background_color = rgba def update_selection_color(self): """ Update rgba selection colors of all examples and show the values """ r = int(self.widget.findChild(name="selection_rslider").value) g = int(self.widget.findChild(name="selection_gslider").value) b = int(self.widget.findChild(name="selection_bslider").value) a = int(self.widget.findChild(name="selection_aslider").value) # update slider labels self.widget.findChild(name="selection_rvalue").text = str(r) self.widget.findChild(name="selection_gvalue").text = str(g) self.widget.findChild(name="selection_bvalue").text = str(b) self.widget.findChild(name="selection_avalue").text = str(a) rgba = (r, g, b, a) self.widget.findChild(name="example1").selection_color = rgba self.widget.findChild(name="example2").selection_color = rgba self.widget.findChild(name="example3").selection_color = rgba self.widget.findChild(name="example4").selection_color = rgba self.widget.findChild(name="example5").selection_color = rgba self.widget.findChild(name="example6").selection_color = rgba self.widget.findChild(name="example7").selection_color = rgba self.widget.findChild(name="example8").selection_color = rgba self.widget.findChild(name="example9").selection_color = rgba def update_foreground_color(self): """ Update rgba foreground colors of all examples and show the values """ r = int(self.widget.findChild(name="foreground_rslider").value) g = int(self.widget.findChild(name="foreground_gslider").value) b = int(self.widget.findChild(name="foreground_bslider").value) a = int(self.widget.findChild(name="foreground_aslider").value) # update slider labels self.widget.findChild(name="foreground_rvalue").text = str(r) self.widget.findChild(name="foreground_gvalue").text = str(g) self.widget.findChild(name="foreground_bvalue").text = str(b) self.widget.findChild(name="foreground_avalue").text = str(a) rgba = (r, g, b, a) self.widget.findChild(name="example1").foreground_color = rgba self.widget.findChild(name="example2").foreground_color = rgba self.widget.findChild(name="example3").foreground_color = rgba self.widget.findChild(name="example4").foreground_color = rgba self.widget.findChild(name="example5").foreground_color = rgba self.widget.findChild(name="example6").foreground_color = rgba self.widget.findChild(name="example7").foreground_color = rgba self.widget.findChild(name="example8").foreground_color = rgba self.widget.findChild(name="example9").foreground_color = rgba
lgpl-2.1
ceos-seo/data_cube_utilities
data_cube_utilities/trend.py
1
2098
from functools import partial from itertools import islice, product import numpy as np import xarray as xr def __where_not_nan(arr: np.ndarray): """Finds position of not nan values in an nd-array Args: arr (numpy.ndarray): nd-array with nan values Returns: data (xr.DataArray): nd-array with indices of finite(not nan) values """ return np.where(np.isfinite(arr)) def __flatten_shallow(arr): """Flattens first two axes of nd-array Args: arr (numpy.ndarray): nd-array with dimensions (n, m) Returns: arr (numpy.ndarray): nd-array with dimensions (n*m) """ # TODO: Done in a hurry, Find numpy native way of resizing return arr.reshape(arr.shape[0] * arr.shape[1]) def __linear_fit(da: xr.DataArray): """Applies linear regression on a 1-D xr.DataArray. Args: da (xr.DataArray): 1-D Data-Array being manipulated. Returns: data (xr.DataArray): DataArray with a single element(slope of regression). """ xs = np.array(list(range(len(da.time)))) ys = __flatten_shallow(da.values) not_nan = __where_not_nan(ys)[0].astype(int) xs = xs[not_nan] ys = ys[not_nan] pf = np.polyfit(xs,ys, 1) return xr.DataArray(pf[0]) def linear(da: xr.DataArray): """Reduces xarray along a time component. The reduction yields a slope for each spatial coordinate in the xarray. Args: da (xr.DataArray): 3-D Data-Array being manipulated. `latitude` and `longitude` are required dimensions. Returns: linear_trend_product (xr.DataArray): 2-D Data-Array """ # TODO: Decouple from coordinate system, and allow regression along multiple components. stacked = da.stack(allpoints = ['latitude', 'longitude']) trend = stacked.groupby('allpoints').apply(__linear_fit) unstacked = trend.unstack('allpoints') return unstacked.rename(dict(allpoints_level_0 = "latitude", allpoints_level_1 = "longitude"))
apache-2.0
overtherain/scriptfile
software/googleAppEngine/lib/django_1_2/docs/_ext/djangodocs.py
39
9475
""" Sphinx plugins for Django documentation. """ import os import re from docutils import nodes, transforms try: import json except ImportError: try: import simplejson as json except ImportError: try: from django.utils import simplejson as json except ImportError: json = None from sphinx import addnodes, roles from sphinx.builders.html import StandaloneHTMLBuilder from sphinx.writers.html import SmartyPantsHTMLTranslator from sphinx.util.console import bold from sphinx.util.compat import Directive # RE for option descriptions without a '--' prefix simple_option_desc_re = re.compile( r'([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)') def setup(app): app.add_crossref_type( directivename = "setting", rolename = "setting", indextemplate = "pair: %s; setting", ) app.add_crossref_type( directivename = "templatetag", rolename = "ttag", indextemplate = "pair: %s; template tag" ) app.add_crossref_type( directivename = "templatefilter", rolename = "tfilter", indextemplate = "pair: %s; template filter" ) app.add_crossref_type( directivename = "fieldlookup", rolename = "lookup", indextemplate = "pair: %s; field lookup type", ) app.add_description_unit( directivename = "django-admin", rolename = "djadmin", indextemplate = "pair: %s; django-admin command", parse_node = parse_django_admin_node, ) app.add_description_unit( directivename = "django-admin-option", rolename = "djadminopt", indextemplate = "pair: %s; django-admin command-line option", parse_node = parse_django_adminopt_node, ) app.add_config_value('django_next_version', '0.0', True) app.add_directive('versionadded', VersionDirective) app.add_directive('versionchanged', VersionDirective) app.add_transform(SuppressBlockquotes) app.add_builder(DjangoStandaloneHTMLBuilder) class VersionDirective(Directive): has_content = True required_arguments = 1 optional_arguments = 1 final_argument_whitespace = True option_spec = {} def run(self): env = self.state.document.settings.env arg0 = self.arguments[0] is_nextversion = env.config.django_next_version == arg0 ret = [] node = addnodes.versionmodified() ret.append(node) if not is_nextversion: if len(self.arguments) == 1: linktext = 'Please, see the release notes </releases/%s>' % (arg0) try: xrefs = roles.XRefRole()('doc', linktext, linktext, self.lineno, self.state) # Sphinx >= 1.0 except AttributeError: xrefs = roles.xfileref_role('doc', linktext, linktext, self.lineno, self.state) # Sphinx < 1.0 node.extend(xrefs[0]) node['version'] = arg0 else: node['version'] = "Development version" node['type'] = self.name if len(self.arguments) == 2: inodes, messages = self.state.inline_text(self.arguments[1], self.lineno+1) node.extend(inodes) if self.content: self.state.nested_parse(self.content, self.content_offset, node) ret = ret + messages env.note_versionchange(node['type'], node['version'], node, self.lineno) return ret class SuppressBlockquotes(transforms.Transform): """ Remove the default blockquotes that encase indented list, tables, etc. """ default_priority = 300 suppress_blockquote_child_nodes = ( nodes.bullet_list, nodes.enumerated_list, nodes.definition_list, nodes.literal_block, nodes.doctest_block, nodes.line_block, nodes.table ) def apply(self): for node in self.document.traverse(nodes.block_quote): if len(node.children) == 1 and isinstance(node.children[0], self.suppress_blockquote_child_nodes): node.replace_self(node.children[0]) class DjangoHTMLTranslator(SmartyPantsHTMLTranslator): """ Django-specific reST to HTML tweaks. """ # Don't use border=1, which docutils does by default. def visit_table(self, node): self.body.append(self.starttag(node, 'table', CLASS='docutils')) # <big>? Really? def visit_desc_parameterlist(self, node): self.body.append('(') self.first_param = 1 def depart_desc_parameterlist(self, node): self.body.append(')') # # Don't apply smartypants to literal blocks # def visit_literal_block(self, node): self.no_smarty += 1 SmartyPantsHTMLTranslator.visit_literal_block(self, node) def depart_literal_block(self, node): SmartyPantsHTMLTranslator.depart_literal_block(self, node) self.no_smarty -= 1 # # Turn the "new in version" stuff (versionadded/versionchanged) into a # better callout -- the Sphinx default is just a little span, # which is a bit less obvious that I'd like. # # FIXME: these messages are all hardcoded in English. We need to change # that to accomodate other language docs, but I can't work out how to make # that work. # version_text = { 'deprecated': 'Deprecated in Django %s', 'versionchanged': 'Changed in Django %s', 'versionadded': 'New in Django %s', } def visit_versionmodified(self, node): self.body.append( self.starttag(node, 'div', CLASS=node['type']) ) title = "%s%s" % ( self.version_text[node['type']] % node['version'], len(node) and ":" or "." ) self.body.append('<span class="title">%s</span> ' % title) def depart_versionmodified(self, node): self.body.append("</div>\n") # Give each section a unique ID -- nice for custom CSS hooks def visit_section(self, node): old_ids = node.get('ids', []) node['ids'] = ['s-' + i for i in old_ids] node['ids'].extend(old_ids) SmartyPantsHTMLTranslator.visit_section(self, node) node['ids'] = old_ids def parse_django_admin_node(env, sig, signode): command = sig.split(' ')[0] env._django_curr_admin_command = command title = "django-admin.py %s" % sig signode += addnodes.desc_name(title, title) return sig def parse_django_adminopt_node(env, sig, signode): """A copy of sphinx.directives.CmdoptionDesc.parse_signature()""" try: from sphinx.domains.std import option_desc_re # Sphinx >= 1.0 except ImportError: from sphinx.directives.desc import option_desc_re # Sphinx < 1.0 count = 0 firstname = '' for m in option_desc_re.finditer(sig): optname, args = m.groups() if count: signode += addnodes.desc_addname(', ', ', ') signode += addnodes.desc_name(optname, optname) signode += addnodes.desc_addname(args, args) if not count: firstname = optname count += 1 if not count: for m in simple_option_desc_re.finditer(sig): optname, args = m.groups() if count: signode += addnodes.desc_addname(', ', ', ') signode += addnodes.desc_name(optname, optname) signode += addnodes.desc_addname(args, args) if not count: firstname = optname count += 1 if not firstname: raise ValueError return firstname class DjangoStandaloneHTMLBuilder(StandaloneHTMLBuilder): """ Subclass to add some extra things we need. """ name = 'djangohtml' def finish(self): super(DjangoStandaloneHTMLBuilder, self).finish() if json is None: self.warn("cannot create templatebuiltins.js due to missing simplejson dependency") return self.info(bold("writing templatebuiltins.js...")) try: # Sphinx < 1.0 xrefs = self.env.reftargets.items() templatebuiltins = dict([('ttags', [n for ((t,n),(l,a)) in xrefs if t == 'ttag' and l == 'ref/templates/builtins']), ('tfilters', [n for ((t,n),(l,a)) in xrefs if t == 'tfilter' and l == 'ref/templates/builtins'])]) except AttributeError: # Sphinx >= 1.0 xrefs = self.env.domaindata["std"]["objects"] templatebuiltins = dict([('ttags', [n for ((t,n), (l,a)) in xrefs.items() if t == 'templatetag' and l == 'ref/templates/builtins' ]), ('tfilters', [n for ((t,n), (l,a)) in xrefs.items() if t == 'templatefilter' and t == 'ref/templates/builtins'])]) outfilename = os.path.join(self.outdir, "templatebuiltins.js") f = open(outfilename, 'wb') f.write('var django_template_builtins = ') json.dump(templatebuiltins, f) f.write(';\n') f.close();
mit
duncanhawthorne/robot-robot
libs/future/backports/email/header.py
82
24448
# Copyright (C) 2002-2007 Python Software Foundation # Author: Ben Gertzfield, Barry Warsaw # Contact: email-sig@python.org """Header encoding and decoding functionality.""" from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import from future.builtins import bytes, range, str, super, zip __all__ = [ 'Header', 'decode_header', 'make_header', ] import re import binascii from future.backports import email from future.backports.email import base64mime from future.backports.email.errors import HeaderParseError import future.backports.email.charset as _charset # Helpers from future.backports.email.quoprimime import _max_append, header_decode Charset = _charset.Charset NL = '\n' SPACE = ' ' BSPACE = b' ' SPACE8 = ' ' * 8 EMPTYSTRING = '' MAXLINELEN = 78 FWS = ' \t' USASCII = Charset('us-ascii') UTF8 = Charset('utf-8') # Match encoded-word strings in the form =?charset?q?Hello_World?= ecre = re.compile(r''' =\? # literal =? (?P<charset>[^?]*?) # non-greedy up to the next ? is the charset \? # literal ? (?P<encoding>[qb]) # either a "q" or a "b", case insensitive \? # literal ? (?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string \?= # literal ?= ''', re.VERBOSE | re.IGNORECASE | re.MULTILINE) # Field name regexp, including trailing colon, but not separating whitespace, # according to RFC 2822. Character range is from tilde to exclamation mark. # For use with .match() fcre = re.compile(r'[\041-\176]+:$') # Find a header embedded in a putative header value. Used to check for # header injection attack. _embeded_header = re.compile(r'\n[^ \t]+:') def decode_header(header): """Decode a message header value without converting charset. Returns a list of (string, charset) pairs containing each of the decoded parts of the header. Charset is None for non-encoded parts of the header, otherwise a lower-case string containing the name of the character set specified in the encoded string. header may be a string that may or may not contain RFC2047 encoded words, or it may be a Header object. An email.errors.HeaderParseError may be raised when certain decoding error occurs (e.g. a base64 decoding exception). """ # If it is a Header object, we can just return the encoded chunks. if hasattr(header, '_chunks'): return [(_charset._encode(string, str(charset)), str(charset)) for string, charset in header._chunks] # If no encoding, just return the header with no charset. if not ecre.search(header): return [(header, None)] # First step is to parse all the encoded parts into triplets of the form # (encoded_string, encoding, charset). For unencoded strings, the last # two parts will be None. words = [] for line in header.splitlines(): parts = ecre.split(line) first = True while parts: unencoded = parts.pop(0) if first: unencoded = unencoded.lstrip() first = False if unencoded: words.append((unencoded, None, None)) if parts: charset = parts.pop(0).lower() encoding = parts.pop(0).lower() encoded = parts.pop(0) words.append((encoded, encoding, charset)) # Now loop over words and remove words that consist of whitespace # between two encoded strings. import sys droplist = [] for n, w in enumerate(words): if n>1 and w[1] and words[n-2][1] and words[n-1][0].isspace(): droplist.append(n-1) for d in reversed(droplist): del words[d] # The next step is to decode each encoded word by applying the reverse # base64 or quopri transformation. decoded_words is now a list of the # form (decoded_word, charset). decoded_words = [] for encoded_string, encoding, charset in words: if encoding is None: # This is an unencoded word. decoded_words.append((encoded_string, charset)) elif encoding == 'q': word = header_decode(encoded_string) decoded_words.append((word, charset)) elif encoding == 'b': paderr = len(encoded_string) % 4 # Postel's law: add missing padding if paderr: encoded_string += '==='[:4 - paderr] try: word = base64mime.decode(encoded_string) except binascii.Error: raise HeaderParseError('Base64 decoding error') else: decoded_words.append((word, charset)) else: raise AssertionError('Unexpected encoding: ' + encoding) # Now convert all words to bytes and collapse consecutive runs of # similarly encoded words. collapsed = [] last_word = last_charset = None for word, charset in decoded_words: if isinstance(word, str): word = bytes(word, 'raw-unicode-escape') if last_word is None: last_word = word last_charset = charset elif charset != last_charset: collapsed.append((last_word, last_charset)) last_word = word last_charset = charset elif last_charset is None: last_word += BSPACE + word else: last_word += word collapsed.append((last_word, last_charset)) return collapsed def make_header(decoded_seq, maxlinelen=None, header_name=None, continuation_ws=' '): """Create a Header from a sequence of pairs as returned by decode_header() decode_header() takes a header value string and returns a sequence of pairs of the format (decoded_string, charset) where charset is the string name of the character set. This function takes one of those sequence of pairs and returns a Header instance. Optional maxlinelen, header_name, and continuation_ws are as in the Header constructor. """ h = Header(maxlinelen=maxlinelen, header_name=header_name, continuation_ws=continuation_ws) for s, charset in decoded_seq: # None means us-ascii but we can simply pass it on to h.append() if charset is not None and not isinstance(charset, Charset): charset = Charset(charset) h.append(s, charset) return h class Header(object): def __init__(self, s=None, charset=None, maxlinelen=None, header_name=None, continuation_ws=' ', errors='strict'): """Create a MIME-compliant header that can contain many character sets. Optional s is the initial header value. If None, the initial header value is not set. You can later append to the header with .append() method calls. s may be a byte string or a Unicode string, but see the .append() documentation for semantics. Optional charset serves two purposes: it has the same meaning as the charset argument to the .append() method. It also sets the default character set for all subsequent .append() calls that omit the charset argument. If charset is not provided in the constructor, the us-ascii charset is used both as s's initial charset and as the default for subsequent .append() calls. The maximum line length can be specified explicitly via maxlinelen. For splitting the first line to a shorter value (to account for the field header which isn't included in s, e.g. `Subject') pass in the name of the field in header_name. The default maxlinelen is 78 as recommended by RFC 2822. continuation_ws must be RFC 2822 compliant folding whitespace (usually either a space or a hard tab) which will be prepended to continuation lines. errors is passed through to the .append() call. """ if charset is None: charset = USASCII elif not isinstance(charset, Charset): charset = Charset(charset) self._charset = charset self._continuation_ws = continuation_ws self._chunks = [] if s is not None: self.append(s, charset, errors) if maxlinelen is None: maxlinelen = MAXLINELEN self._maxlinelen = maxlinelen if header_name is None: self._headerlen = 0 else: # Take the separating colon and space into account. self._headerlen = len(header_name) + 2 def __str__(self): """Return the string value of the header.""" self._normalize() uchunks = [] lastcs = None lastspace = None for string, charset in self._chunks: # We must preserve spaces between encoded and non-encoded word # boundaries, which means for us we need to add a space when we go # from a charset to None/us-ascii, or from None/us-ascii to a # charset. Only do this for the second and subsequent chunks. # Don't add a space if the None/us-ascii string already has # a space (trailing or leading depending on transition) nextcs = charset if nextcs == _charset.UNKNOWN8BIT: original_bytes = string.encode('ascii', 'surrogateescape') string = original_bytes.decode('ascii', 'replace') if uchunks: hasspace = string and self._nonctext(string[0]) if lastcs not in (None, 'us-ascii'): if nextcs in (None, 'us-ascii') and not hasspace: uchunks.append(SPACE) nextcs = None elif nextcs not in (None, 'us-ascii') and not lastspace: uchunks.append(SPACE) lastspace = string and self._nonctext(string[-1]) lastcs = nextcs uchunks.append(string) return EMPTYSTRING.join(uchunks) # Rich comparison operators for equality only. BAW: does it make sense to # have or explicitly disable <, <=, >, >= operators? def __eq__(self, other): # other may be a Header or a string. Both are fine so coerce # ourselves to a unicode (of the unencoded header value), swap the # args and do another comparison. return other == str(self) def __ne__(self, other): return not self == other def append(self, s, charset=None, errors='strict'): """Append a string to the MIME header. Optional charset, if given, should be a Charset instance or the name of a character set (which will be converted to a Charset instance). A value of None (the default) means that the charset given in the constructor is used. s may be a byte string or a Unicode string. If it is a byte string (i.e. isinstance(s, str) is false), then charset is the encoding of that byte string, and a UnicodeError will be raised if the string cannot be decoded with that charset. If s is a Unicode string, then charset is a hint specifying the character set of the characters in the string. In either case, when producing an RFC 2822 compliant header using RFC 2047 rules, the string will be encoded using the output codec of the charset. If the string cannot be encoded to the output codec, a UnicodeError will be raised. Optional `errors' is passed as the errors argument to the decode call if s is a byte string. """ if charset is None: charset = self._charset elif not isinstance(charset, Charset): charset = Charset(charset) if not isinstance(s, str): input_charset = charset.input_codec or 'us-ascii' if input_charset == _charset.UNKNOWN8BIT: s = s.decode('us-ascii', 'surrogateescape') else: s = s.decode(input_charset, errors) # Ensure that the bytes we're storing can be decoded to the output # character set, otherwise an early error is raised. output_charset = charset.output_codec or 'us-ascii' if output_charset != _charset.UNKNOWN8BIT: try: s.encode(output_charset, errors) except UnicodeEncodeError: if output_charset!='us-ascii': raise charset = UTF8 self._chunks.append((s, charset)) def _nonctext(self, s): """True if string s is not a ctext character of RFC822. """ return s.isspace() or s in ('(', ')', '\\') def encode(self, splitchars=';, \t', maxlinelen=None, linesep='\n'): r"""Encode a message header into an RFC-compliant format. There are many issues involved in converting a given string for use in an email header. Only certain character sets are readable in most email clients, and as header strings can only contain a subset of 7-bit ASCII, care must be taken to properly convert and encode (with Base64 or quoted-printable) header strings. In addition, there is a 75-character length limit on any given encoded header field, so line-wrapping must be performed, even with double-byte character sets. Optional maxlinelen specifies the maximum length of each generated line, exclusive of the linesep string. Individual lines may be longer than maxlinelen if a folding point cannot be found. The first line will be shorter by the length of the header name plus ": " if a header name was specified at Header construction time. The default value for maxlinelen is determined at header construction time. Optional splitchars is a string containing characters which should be given extra weight by the splitting algorithm during normal header wrapping. This is in very rough support of RFC 2822's `higher level syntactic breaks': split points preceded by a splitchar are preferred during line splitting, with the characters preferred in the order in which they appear in the string. Space and tab may be included in the string to indicate whether preference should be given to one over the other as a split point when other split chars do not appear in the line being split. Splitchars does not affect RFC 2047 encoded lines. Optional linesep is a string to be used to separate the lines of the value. The default value is the most useful for typical Python applications, but it can be set to \r\n to produce RFC-compliant line separators when needed. """ self._normalize() if maxlinelen is None: maxlinelen = self._maxlinelen # A maxlinelen of 0 means don't wrap. For all practical purposes, # choosing a huge number here accomplishes that and makes the # _ValueFormatter algorithm much simpler. if maxlinelen == 0: maxlinelen = 1000000 formatter = _ValueFormatter(self._headerlen, maxlinelen, self._continuation_ws, splitchars) lastcs = None hasspace = lastspace = None for string, charset in self._chunks: if hasspace is not None: hasspace = string and self._nonctext(string[0]) import sys if lastcs not in (None, 'us-ascii'): if not hasspace or charset not in (None, 'us-ascii'): formatter.add_transition() elif charset not in (None, 'us-ascii') and not lastspace: formatter.add_transition() lastspace = string and self._nonctext(string[-1]) lastcs = charset hasspace = False lines = string.splitlines() if lines: formatter.feed('', lines[0], charset) else: formatter.feed('', '', charset) for line in lines[1:]: formatter.newline() if charset.header_encoding is not None: formatter.feed(self._continuation_ws, ' ' + line.lstrip(), charset) else: sline = line.lstrip() fws = line[:len(line)-len(sline)] formatter.feed(fws, sline, charset) if len(lines) > 1: formatter.newline() if self._chunks: formatter.add_transition() value = formatter._str(linesep) if _embeded_header.search(value): raise HeaderParseError("header value appears to contain " "an embedded header: {!r}".format(value)) return value def _normalize(self): # Step 1: Normalize the chunks so that all runs of identical charsets # get collapsed into a single unicode string. chunks = [] last_charset = None last_chunk = [] for string, charset in self._chunks: if charset == last_charset: last_chunk.append(string) else: if last_charset is not None: chunks.append((SPACE.join(last_chunk), last_charset)) last_chunk = [string] last_charset = charset if last_chunk: chunks.append((SPACE.join(last_chunk), last_charset)) self._chunks = chunks class _ValueFormatter(object): def __init__(self, headerlen, maxlen, continuation_ws, splitchars): self._maxlen = maxlen self._continuation_ws = continuation_ws self._continuation_ws_len = len(continuation_ws) self._splitchars = splitchars self._lines = [] self._current_line = _Accumulator(headerlen) def _str(self, linesep): self.newline() return linesep.join(self._lines) def __str__(self): return self._str(NL) def newline(self): end_of_line = self._current_line.pop() if end_of_line != (' ', ''): self._current_line.push(*end_of_line) if len(self._current_line) > 0: if self._current_line.is_onlyws(): self._lines[-1] += str(self._current_line) else: self._lines.append(str(self._current_line)) self._current_line.reset() def add_transition(self): self._current_line.push(' ', '') def feed(self, fws, string, charset): # If the charset has no header encoding (i.e. it is an ASCII encoding) # then we must split the header at the "highest level syntactic break" # possible. Note that we don't have a lot of smarts about field # syntax; we just try to break on semi-colons, then commas, then # whitespace. Eventually, this should be pluggable. if charset.header_encoding is None: self._ascii_split(fws, string, self._splitchars) return # Otherwise, we're doing either a Base64 or a quoted-printable # encoding which means we don't need to split the line on syntactic # breaks. We can basically just find enough characters to fit on the # current line, minus the RFC 2047 chrome. What makes this trickier # though is that we have to split at octet boundaries, not character # boundaries but it's only safe to split at character boundaries so at # best we can only get close. encoded_lines = charset.header_encode_lines(string, self._maxlengths()) # The first element extends the current line, but if it's None then # nothing more fit on the current line so start a new line. try: first_line = encoded_lines.pop(0) except IndexError: # There are no encoded lines, so we're done. return if first_line is not None: self._append_chunk(fws, first_line) try: last_line = encoded_lines.pop() except IndexError: # There was only one line. return self.newline() self._current_line.push(self._continuation_ws, last_line) # Everything else are full lines in themselves. for line in encoded_lines: self._lines.append(self._continuation_ws + line) def _maxlengths(self): # The first line's length. yield self._maxlen - len(self._current_line) while True: yield self._maxlen - self._continuation_ws_len def _ascii_split(self, fws, string, splitchars): # The RFC 2822 header folding algorithm is simple in principle but # complex in practice. Lines may be folded any place where "folding # white space" appears by inserting a linesep character in front of the # FWS. The complication is that not all spaces or tabs qualify as FWS, # and we are also supposed to prefer to break at "higher level # syntactic breaks". We can't do either of these without intimate # knowledge of the structure of structured headers, which we don't have # here. So the best we can do here is prefer to break at the specified # splitchars, and hope that we don't choose any spaces or tabs that # aren't legal FWS. (This is at least better than the old algorithm, # where we would sometimes *introduce* FWS after a splitchar, or the # algorithm before that, where we would turn all white space runs into # single spaces or tabs.) parts = re.split("(["+FWS+"]+)", fws+string) if parts[0]: parts[:0] = [''] else: parts.pop(0) for fws, part in zip(*[iter(parts)]*2): self._append_chunk(fws, part) def _append_chunk(self, fws, string): self._current_line.push(fws, string) if len(self._current_line) > self._maxlen: # Find the best split point, working backward from the end. # There might be none, on a long first line. for ch in self._splitchars: for i in range(self._current_line.part_count()-1, 0, -1): if ch.isspace(): fws = self._current_line[i][0] if fws and fws[0]==ch: break prevpart = self._current_line[i-1][1] if prevpart and prevpart[-1]==ch: break else: continue break else: fws, part = self._current_line.pop() if self._current_line._initial_size > 0: # There will be a header, so leave it on a line by itself. self.newline() if not fws: # We don't use continuation_ws here because the whitespace # after a header should always be a space. fws = ' ' self._current_line.push(fws, part) return remainder = self._current_line.pop_from(i) self._lines.append(str(self._current_line)) self._current_line.reset(remainder) class _Accumulator(list): def __init__(self, initial_size=0): self._initial_size = initial_size super().__init__() def push(self, fws, string): self.append((fws, string)) def pop_from(self, i=0): popped = self[i:] self[i:] = [] return popped def pop(self): if self.part_count()==0: return ('', '') return super().pop() def __len__(self): return sum((len(fws)+len(part) for fws, part in self), self._initial_size) def __str__(self): return EMPTYSTRING.join((EMPTYSTRING.join((fws, part)) for fws, part in self)) def reset(self, startval=None): if startval is None: startval = [] self[:] = startval self._initial_size = 0 def is_onlyws(self): return self._initial_size==0 and (not self or str(self).isspace()) def part_count(self): return super().__len__()
mit
ville-k/tensorflow
tensorflow/python/framework/function_test.py
16
43635
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Tests for functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re import time import numpy as np from tensorflow.core.framework import function_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl from tensorflow.python.framework import function from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import functional_ops from tensorflow.python.ops import gen_logging_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import init_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import logging_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging def _OptimizerOptions(): for cse in [False, True]: for inline in [False, True]: for cfold in [False, True]: yield config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions( optimizer_options=config_pb2.OptimizerOptions( opt_level=config_pb2.OptimizerOptions.L0, do_common_subexpression_elimination=cse, do_function_inlining=inline, do_constant_folding=cfold))) class FunctionTest(test.TestCase): def testDefineFunction2Args(self): @function.Defun(dtypes.float32, dtypes.float32, func_name="APlus2B") def APlus2B(a, b): return a + b * 2 with ops.Graph().as_default(): call = APlus2B([1.0], [2.0]) self.assertEqual("APlus2B", call.op.name) with session.Session() as sess: self.assertAllEqual([5.0], sess.run(call)) def testDefineFunctionDuplicateOutputs(self): @function.Defun(dtypes.float32, func_name="Duplicate") def Duplicate(a): b = a + 1.0 return b, b g = ops.Graph() with g.as_default(): Duplicate([3.0]) func_sig = g.as_graph_def().library.function[0].signature # The names given to both outputs should be different # even though the same tensor is emitted to both. out_names = [a.name for a in func_sig.output_arg] self.assertEqual(2, len(out_names)) self.assertNotEqual(out_names[0], out_names[1]) def testGradientFunc(self): @function.Defun(dtypes.float32, func_name="XSquarePlusOneFn") def XSquarePlusOne(x): return x * x + 1.0 @function.Defun(dtypes.float32, dtypes.float32) def XSquarePlusOneGrad(x, dy): dx = functional_ops._symbolic_gradient( input=[x, dy], Tout=[dtypes.float32], f="XSquarePlusOneFn", name="dx") return dx g = ops.Graph() with g.as_default(): call_f = XSquarePlusOne([2.0]) call_g = XSquarePlusOneGrad([2.0], [0.1]) with session.Session() as sess: self.assertAllClose([5.0], sess.run(call_f)) self.assertAllClose([0.4], sess.run(call_g)) def testTanhSymGrad(self): @function.Defun(dtypes.float32) def Forward(x): return math_ops.reduce_sum(math_ops.tanh(x)) g = ops.Graph() with g.as_default(): x = array_ops.placeholder(dtypes.float32) y = Forward(x) dx = gradients_impl.gradients([y], [x]) inp = np.array([-1, 1, 2, -2], dtype=np.float32) feed = {x: inp} cfg = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions( optimizer_options=config_pb2.OptimizerOptions( opt_level=config_pb2.OptimizerOptions.L1, do_function_inlining=True))) with session.Session(graph=g, config=cfg) as sess: out, = sess.run(dx, feed) self.assertAllClose(1 - np.square(np.tanh(inp)), out) def testCustomGradient(self): dtype = dtypes.float32 @function.Defun(dtype, dtype, dtype) def XentLossGrad(logits, labels, dloss): dlogits = array_ops.reshape(dloss, [-1, 1]) * ( nn_ops.softmax(logits) - labels) dlabels = array_ops.zeros_like(labels) # Takes exp(dlogits) to differentiate it from the "correct" gradient. return math_ops.exp(dlogits), dlabels @function.Defun(dtype, dtype, grad_func=XentLossGrad) def XentLoss(logits, labels): return math_ops.reduce_sum(labels * math_ops.log(nn_ops.softmax(logits)), 1) g = ops.Graph() with g.as_default(): logits = array_ops.placeholder(dtype) labels = array_ops.placeholder(dtype) loss = XentLoss(logits, labels) dlogits = gradients_impl.gradients([loss], [logits]) x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32) prob = np.exp(x) / np.sum(np.exp(x), 1, keepdims=1) y = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32) for cfg in _OptimizerOptions(): tf_logging.info("cfg = %s", cfg) with session.Session(graph=g, config=cfg) as sess: out, = sess.run(dlogits, {logits: x, labels: y}) self.assertAllClose(out, np.exp(prob - y)) def testCustomGradientError(self): dtype = dtypes.float32 @function.Defun(dtype, dtype, dtype) def Grad(x, dy, dz): # Should have returned 1 result. return x, dy + dz @function.Defun(dtype, grad_func=Grad) def Forward(x): return x, x g = ops.Graph() with g.as_default(): inp = array_ops.placeholder(dtype) out = math_ops.add_n(Forward(inp)) dinp = gradients_impl.gradients(out, [inp]) x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32) with session.Session(graph=g) as sess: with self.assertRaisesRegexp( errors_impl.InvalidArgumentError, "SymGrad expects to return 1.*but get 2.*instead"): _ = sess.run(dinp, {inp: x}) def testSymGradShape(self): g = ops.Graph() with g.as_default(): x = array_ops.placeholder(dtypes.float32, [25, 4]) y = array_ops.placeholder(dtypes.float32, [200, 100]) dz = array_ops.placeholder(dtypes.float32, [1]) # We assume Foo is a function of (x, y) -> (z) Then, Foo's # gradient function is (x, y, dz) -> (dx, dy). dx's shape # should be the same as x's; and dy's shape should be the same # as y's. dx, dy = functional_ops._symbolic_gradient( input=[x, y, dz], Tout=[dtypes.float32] * 2, f="Foo") self.assertEqual(x.get_shape(), dx.get_shape()) self.assertEqual(y.get_shape(), dy.get_shape()) def testSymGradAttr(self): @function.Defun(noinline=True) def Foo(x): return x * 2 self.assertTrue( Foo.instantiate([dtypes.float32]).definition.attr["_noinline"].b) g = ops.Graph() with g.as_default(): x = constant_op.constant(3.0) y = Foo(x) dx, = gradients_impl.gradients(y, [x]) cfg = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions( optimizer_options=config_pb2.OptimizerOptions( opt_level=config_pb2.OptimizerOptions.L0, do_common_subexpression_elimination=True, do_function_inlining=True, do_constant_folding=True))) with self.test_session(graph=g, config=cfg): self.assertAllClose(y.eval(), 6.) self.assertAllClose(dx.eval(), 2.) def testZNoDepOnY(self): @function.Defun(dtypes.float32, dtypes.float32) def Foo(x, y): # pylint: disable=unused-argument return x * 2 with ops.Graph().as_default(): # z = Foo(x, y). z doe x = constant_op.constant(1.0) y = constant_op.constant(2.0) z = Foo(x, y) dx, dy = gradients_impl.gradients([z], [x, y]) with session.Session() as sess: dx_val, dy_val = sess.run([dx, dy]) self.assertEqual([2.0], dx_val) self.assertEqual([0.0], dy_val) def testDefineFunctionNoArgs(self): @function.Defun(func_name="AConstant") def AConstant(): return constant_op.constant([42]) with ops.Graph().as_default(): call = AConstant() self.assertEqual("AConstant", call.op.name) with session.Session() as sess: self.assertAllEqual([42], sess.run(call)) def testDefineFunctionNames(self): @function.Defun(dtypes.float32, func_name="Foo") def Foo(a): return a + 1 with ops.Graph().as_default(): call1 = Foo([1.0]) self.assertEqual("Foo", call1.op.name) call2 = Foo([1.0]) self.assertEqual("Foo_1", call2.op.name) # pylint: disable=unexpected-keyword-arg call3 = Foo([1.0], name="mine") self.assertEqual("mine", call3.op.name) with ops.name_scope("my"): call4 = Foo([1.0], name="precious") self.assertEqual("my/precious", call4.op.name) def testNoOp(self): @function.Defun(dtypes.float32) def Foo(x): y = logging_ops.Print(x, [x], "Hello") with ops.control_dependencies([y]): z = control_flow_ops.no_op() with ops.control_dependencies([z]): return x * 2 with ops.Graph().as_default(), self.test_session(): z = Foo(constant_op.constant(3.0)) self.assertAllEqual(z.eval(), 6.0) def testAssertOp(self): @function.Defun(dtypes.float32) def Foo(x): check = gen_logging_ops._assert(math_ops.greater(x, 0), [x]) with ops.control_dependencies([check]): return x * 2 g = ops.Graph() with g.as_default(), self.test_session(): self.assertAllEqual(Foo(constant_op.constant(3.0)).eval(), 6.0) with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "assertion failed.*-3"): self.assertAllEqual(Foo(constant_op.constant(-3.0)).eval(), 6.0) def testAssertWrapper(self): @function.Defun(dtypes.float32) def MyFn(x): with ops.control_dependencies( [control_flow_ops.Assert(math_ops.less_equal(x, 10.0), [x])]): return array_ops.identity(x) with self.test_session(): self.assertEqual(1.0, MyFn(1.0).eval()) with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "assertion"): _ = MyFn(100.0).eval() def testControlFlowStrictness(self): """Inlined functions must not execute in a untaken control flow branch.""" @function.Defun(dtypes.int32) def AssertFail(x): # Assertion that always fails and does not have a data dependency on `x`. assert_false = control_flow_ops.Assert(False, [42]) with ops.control_dependencies([assert_false]): return array_ops.identity(x) with ops.device("CPU"): pred = array_ops.placeholder(dtypes.bool) x = array_ops.placeholder(dtypes.int32) cond = control_flow_ops.cond(pred, lambda: x + 1, lambda: AssertFail(x)) # pylint: disable=unnecessary-lambda loop = control_flow_ops.while_loop(lambda y: pred, lambda y: AssertFail(y), [x]) # pylint: enable=unnecessary-lambda # Enables inlining. config = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions( optimizer_options=config_pb2.OptimizerOptions( opt_level=config_pb2.OptimizerOptions.L0, do_common_subexpression_elimination=True, do_function_inlining=True, do_constant_folding=True))) with session.Session(config=config) as sess: # Since the 'False' branch is not taken, the assertion should not fire. self.assertEqual(4, sess.run(cond, {pred: True, x: 3})) # The assertion should still fire if the False branch is taken. with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "assertion"): sess.run(cond, {pred: False, x: 3}) # Similarly for loops. self.assertEqual(3, sess.run(loop, {pred: False, x: 3})) with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "assertion"): sess.run(loop, {pred: True, x: 3}) def testVar(self): @function.Defun(dtypes.float32) def Foo(x): return x * x + 1 g = ops.Graph() with g.as_default(): v = variables.Variable(constant_op.constant(10.0)) z = Foo(v) with self.test_session(graph=g): variables.global_variables_initializer().run() self.assertAllEqual(z.eval(), 101.) def testResourceVarAsImplicitInput(self): g = ops.Graph() with g.as_default(), ops.device("cpu:0"): v = variable_scope.get_variable( "var", (4, 4), dtypes.float32, use_resource=True) @function.Defun() def Foo(): return array_ops.identity(v) y = v.value() z = Foo() with self.test_session(graph=g): v.initializer.run() self.assertAllEqual(y.eval(), z.eval()) def testDefineErrors(self): with ops.Graph().as_default(): with self.assertRaisesRegexp(ValueError, "can not return None"): @function.Defun() def NoResult(): pass _ = NoResult.definition with self.assertRaisesRegexp(ValueError, "can not return None"): @function.Defun() def TwoNone(): return None, None _ = TwoNone.definition with self.assertRaisesRegexp(ValueError, "are not supported"): @function.Defun() def DefaultArg(unused_a=12): return constant_op.constant([1]) _ = DefaultArg.definition with self.assertRaisesRegexp(ValueError, "are not supported"): @function.Defun() def KwArgs(**unused_kwargs): return constant_op.constant([1]) _ = KwArgs.definition with self.assertRaisesRegexp(ValueError, "specified input types"): @function.Defun(dtypes.float32) def PlusMinusV2(a, b): return a + b, b - a _ = PlusMinusV2.definition with self.assertRaisesRegexp(ValueError, "specified input types"): @function.Defun(dtypes.float32, dtypes.float32, dtypes.float32) def PlusMinusV3(a, b): return a + b, b - a _ = PlusMinusV3.definition def testCallErrors(self): @function.Defun() def Const(): return constant_op.constant(1) @function.Defun(dtypes.int32) def PlusOne(a): return a + 1 @function.Defun(dtypes.int32, dtypes.int32) def PlusMinus(a, b): return a + b, b - a with ops.Graph().as_default(): _ = Const() # pylint: disable=too-many-function-args # pylint: disable=unexpected-keyword-arg # pylint: disable=no-value-for-parameter with self.assertRaisesRegexp(ValueError, "arguments: 0"): _ = Const(1) with self.assertRaisesRegexp(ValueError, "arguments: 0"): _ = Const(1, 2) with self.assertRaisesRegexp(ValueError, "arguments: 1"): _ = PlusOne() _ = PlusOne(1) with self.assertRaisesRegexp(ValueError, "arguments: 1"): _ = PlusOne(1, 2) with self.assertRaisesRegexp(ValueError, "arguments: 2"): _ = PlusMinus() with self.assertRaisesRegexp(ValueError, "arguments: 2"): _ = PlusMinus(1) _ = PlusMinus(1, 2) _ = PlusOne(1, name="p1") with self.assertRaisesRegexp(ValueError, "Unknown keyword arguments"): _ = PlusOne(1, device="/gpu:0") def testFunctionDecorator(self): @function.Defun(dtypes.float32, func_name="Minus1") def Minus1(b): return b - 1.0 with ops.Graph().as_default(): call1 = Minus1([2.]) self.assertTrue(isinstance(Minus1, function._DefinedFunction)) self.assertEqual(Minus1.name, "Minus1") # pylint: disable=unexpected-keyword-arg call2 = Minus1(call1, name="next") # pylint: enable=unexpected-keyword-arg self.assertEqual("next", call2.op.name) with session.Session() as sess: self.assertAllEqual([1], sess.run(call1)) self.assertAllEqual([0], sess.run(call2)) def testNestedFunction(self): @function.Defun(dtypes.float32) def Cube(x): return x * x * x @function.Defun(dtypes.float32, dtypes.float32) def CubeXPlusY(x, y): return Cube(x) + y with ops.Graph().as_default(): z = CubeXPlusY(3.0, -2.0) with self.test_session(): self.assertAllEqual(z.eval(), 25.0) def testNestedDefinedFunction(self): @function.Defun(dtypes.float32, dtypes.float32) def CubeXPlusY(x, y): @function.Defun(dtypes.float32) def Cube(x): return x * x * x return Cube(x) + y with ops.Graph().as_default(): z = CubeXPlusY(3.0, -2.0) with self.test_session(): self.assertAllEqual(z.eval(), 25.0) def testUnusedFunction(self): invoked = False # pylint: disable=unused-variable @function.Defun() def Unused(): invoked = True return constant_op.constant(42.) self.assertFalse(invoked) g = ops.Graph() with g.as_default(): @function.Defun() def Unused2(): invoked = True return constant_op.constant(7.) constant_op.constant(3.) # pylint: enable=unused-variable self.assertFalse(invoked) gdef = g.as_graph_def() self.assertEqual(0, len(gdef.library.function)) def testReduction(self): g = ops.Graph() # BN0 is computing batch normed matrix along rows. def BN0(x): mean = math_ops.reduce_mean(x, [0]) var = math_ops.reduce_mean(math_ops.square(x - mean)) # biased var rstd = math_ops.rsqrt(var + 1e-8) return (x - mean) * rstd # Wraps BatchNorm in a tf function. @function.Defun(dtypes.float32) def BN1(x): return BN0(x) with g.as_default(): x = array_ops.placeholder(dtypes.float32) y0 = BN0(x) # A plain graph y1 = BN1(x) # A tf function dx0, = gradients_impl.gradients([y0], [x]) dx1, = gradients_impl.gradients([y1], [x]) # Both should produce the same result and gradient. with self.test_session(graph=g) as sess: vals = sess.run([y0, y1, dx0, dx1], {x: np.random.uniform(size=(3, 7))}) self.assertAllClose(vals[0], vals[1]) self.assertAllClose(vals[2], vals[3]) def testDeclare(self): foo = function.Declare("Foo", [("x", dtypes.float32)], [("y", dtypes.float32)]) @function.Defun(dtypes.float32, func_name="Foo", out_names=["y"]) def FooImpl(x): return x * x + 1 x = array_ops.placeholder(dtypes.float32) y = foo(x) g = ops.get_default_graph() FooImpl.add_to_graph(g) with self.test_session(): rand = np.random.uniform(size=(3, 3)) expected = rand * rand + 1.0 self.assertAllClose(expected, y.eval(feed_dict={x: rand})) def testDeclareUsedInDefun(self): foo = function.Declare("Foo", [("x", dtypes.float32)], [("y", dtypes.float32)]) @function.Defun() def Bar(x): return foo(x) @function.Defun(dtypes.float32, func_name="Foo", out_names=["y"]) def FooImpl(x): return x * x + 1 x = array_ops.placeholder(dtypes.float32) y = Bar(x) g = ops.get_default_graph() FooImpl.add_to_graph(g) with self.test_session(): rand = np.random.uniform(size=(3, 3)) expected = rand * rand + 1.0 self.assertAllClose(expected, y.eval(feed_dict={x: rand})) def testDeclareTypeMistake(self): foo = function.Declare("Foo", [("x", dtypes.float32)], [("y", dtypes.float32)]) @function.Defun(dtypes.float32, func_name="Foo", out_names=["y"]) def Foo(x): return x * x + 1 g = ops.Graph() with g.as_default(): y = foo(2.0) with self.test_session(graph=g): with self.assertRaisesRegexp(errors_impl.NotFoundError, "not registered"): _ = y.eval() g = ops.Graph() with g.as_default(): Foo.add_to_graph(g) y = foo(2) with self.test_session(graph=g): with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "int32.*float"): _ = y.eval() g = ops.Graph() with g.as_default(): Foo.add_to_graph(g) with self.assertRaisesRegexp( ValueError, "Expected number of arguments: 1, received: 2"): _ = foo(2.0, 2.0) g = ops.Graph() with g.as_default(): Foo.add_to_graph(g) y = foo(2.0) with self.test_session(graph=g): self.assertAllEqual(y.eval(), 5.0) def testCapture(self): g = ops.Graph() with g.as_default(): w = variables.Variable(constant_op.constant([[1.0]])) b = variables.Variable(constant_op.constant([2.0])) # Foo() captures w and b. @function.Defun(dtypes.float32) def Foo(x): # Plus() captures b. @function.Defun(dtypes.float32) def Plus(y): return y + b return Plus(math_ops.matmul(w, x)) y = Foo(constant_op.constant([[10.]])) with self.test_session(graph=g): variables.global_variables_initializer().run() self.assertAllEqual(y.eval(), [[12.0]]) def testCaptureControls(self): g = ops.Graph() with g.as_default(): x = constant_op.constant([10.0]) x = logging_ops.Print(x, [x], "outer") @function.Defun(dtypes.float32) def Foo(y): with ops.control_dependencies([x]): y = logging_ops.Print(y, [y], "inner") return y with self.assertRaisesRegexp(ValueError, "not an element of this graph."): # NOTE: We still do not support capturing control deps. _ = Foo(x) def testStableName(self): @function.Defun() def Foo(x, y, z): return math_ops.tanh(math_ops.matmul(x, y) + z) self.assertEqual("Foo_d643acf7", Foo.instantiate([dtypes.float32] * 3).name) def testSignatureHash(self): # Foo.Inner and Bar.Inner have identical function body but have # different signatures. They should be treated as two different functions. @function.Defun() def Foo(x): @function.Defun() def Inner(x): return x + 10. return Inner(x) @function.Defun() def Bar(x): @function.Defun() def Inner(x, unused_y, unused_z): return x + 10. return Inner(x, 2., 3.) g = ops.Graph() with g.as_default(): x = constant_op.constant(10.0) y = Foo(x) z = Bar(x) with self.test_session(graph=g) as sess: v0, v1 = sess.run([y, z]) self.assertAllEqual(v0, 20.) self.assertAllEqual(v1, 20.) def testShapeFunction(self): @function.Defun(dtypes.float32, shape_func=lambda op: [op.inputs[0].get_shape()]) def Foo(x): return x + 1.0 @function.Defun( shape_func=lambda op: [[1] + op.inputs[0].get_shape().as_list()]) def Bar(x): return array_ops.stack([x]) g = ops.Graph() with g.as_default(): x = Foo([1.0, 2.0]) self.assertEqual(x.get_shape().as_list(), [2]) y = Bar(array_ops.zeros([1, 2, 3])) self.assertAllEqual(y.get_shape().as_list(), [1, 1, 2, 3]) def testVariableReuse(self): def LinearWithReuse(input_tensor, reuse=None): size = input_tensor.shape.dims[1] with variable_scope.variable_scope("linear", reuse=reuse): w = variable_scope.get_variable("w", shape=[size, size], dtype=input_tensor.dtype) return math_ops.matmul(input_tensor, w) @function.Defun(dtypes.float32) def Foo(inputs): inputs = array_ops.reshape(inputs, [32, 100]) hidden = LinearWithReuse(inputs) return LinearWithReuse(hidden, reuse=True) input_op = array_ops.placeholder(shape=[32, 100], dtype=dtypes.float32) output_op = Foo(input_op) global_vars = variables.global_variables() self.assertEqual(len(global_vars), 1) self.assertEqual(global_vars[0].name, "linear/w:0") with session.Session() as sess: sess.run(variables.global_variables_initializer()) output_val = sess.run(output_op, feed_dict={input_op: np.random.rand(32, 100)}) self.assertEqual(output_val.shape, (32, 100)) def testFunctionCallInDifferentVariableScopes(self): @function.Defun(dtypes.float32) def Foo(inputs): var = variable_scope.get_variable("var", shape=[10], dtype=dtypes.float32, initializer=init_ops.ones_initializer()) return inputs + var input_op = array_ops.placeholder(shape=[10], dtype=dtypes.float32) with variable_scope.variable_scope("vs1"): out1_op = Foo(input_op) with variable_scope.variable_scope("vs2"): out2_op = Foo(input_op) global_vars = variables.global_variables() self.assertEqual(len(global_vars), 1) self.assertEqual(global_vars[0].name, "vs1/var:0") with session.Session() as sess: sess.run(variables.global_variables_initializer()) out1, out2 = sess.run([out1_op, out2_op], feed_dict={input_op: np.linspace(1, 10, 10)}) self.assertAllEqual(out1, np.linspace(2, 11, 10)) self.assertAllEqual(out2, np.linspace(2, 11, 10)) def testTwoInputsSameOp(self): g = ops.Graph() with g.as_default(): m = array_ops.placeholder(dtypes.float32) s, u, v = linalg_ops.svd(m) ss = math_ops.reduce_sum(s) uu = math_ops.reduce_sum(u) vv = math_ops.reduce_sum(v) result = ss + uu + vv f = function._graph_to_function_def( g, g.get_operations()[1:], # skip the placeholder [s, u, v], [result]) self.assertEqual(len(f.signature.input_arg), 3) class FunctionsFromProtos(test.TestCase): def expectFunctionsEqual(self, func, grad_func=None, new_func=None): if new_func is None: # Make a copy of func.definition to avoid any bugs masked by using the # same object serialized_fdef = func.definition.SerializeToString() # Serialize and then deserialize `func` to create `new_func` fdef = function_pb2.FunctionDef.FromString(serialized_fdef) new_func = function._from_definition(fdef, grad_func=grad_func) self.assertEqual(func.name, new_func.name) self.assertEqual(func.definition, new_func.definition) self.assertEqual(func.grad_func_name, new_func.grad_func_name) self.assertEqual(func.declared_input_types, new_func.declared_input_types) self.assertEqual(func.captured_inputs, new_func.captured_inputs) def testBasic(self): @function.Defun(dtypes.float32, dtypes.float32) def Foo(x, y): return x + y self.expectFunctionsEqual(Foo) def testGradFunc(self): @function.Defun(dtypes.float32, dtypes.float32) def G(x, dy): return x * dy @function.Defun(dtypes.float32, grad_func=G) def F(x): return math_ops.exp(x) - math_ops.exp(-x) self.expectFunctionsEqual(F, grad_func=G) def testCapturedInputs(self): c = constant_op.constant(10, dtypes.int64) @function.Defun(dtypes.int64) def Foo(x): return x + c new_func = function._from_definition(Foo.definition) self.assertEqual(Foo.name, new_func.name) self.assertEqual(Foo.definition, new_func.definition) self.assertEqual(Foo.grad_func_name, new_func.grad_func_name) # Captured inputs are added as regular inputs to the function definition self.assertEqual(new_func.declared_input_types, Foo.declared_input_types + (dtypes.int64,)) self.assertEqual(len(new_func.captured_inputs), 0) def testNestedFunctions(self): @function.Defun(dtypes.float32) def Outer(x): @function.Defun(dtypes.float32) def Inner(y): return y + 1 return Inner(Inner(x)) self.expectFunctionsEqual(Outer) def testFromLibrary(self): # Define some functions with different gradient functions. Note that many of # the below functions are identical since function bodies don't matter for # this test. @function.Defun(dtypes.float32, dtypes.float32) def G1(x, dy): return x * dy @function.Defun(dtypes.float32, dtypes.float32) def G2(x, dy): return x * dy # F1 and F2 have the same gradient function @function.Defun(dtypes.float32, grad_func=G1) def F1(x): return math_ops.exp(x) - math_ops.exp(-x) @function.Defun(dtypes.float32, grad_func=G1) def F2(x): return math_ops.exp(x) - math_ops.exp(-x) # F3 has a different gradient function @function.Defun(dtypes.float32, grad_func=G2) def F3(x): return math_ops.exp(x) - math_ops.exp(-x) # F4 has no gradient function @function.Defun(dtypes.float32) def F4(x): return math_ops.exp(x) - math_ops.exp(-x) # Instantiate all functions g = ops.Graph() with g.as_default(): c = constant_op.constant(1.0, dtypes.float32) f1 = F1(c) f2 = F2(c) f3 = F3(c) f4 = F4(c) gradients_impl.gradients([f1, f2, f3, f4], c) library = g.as_graph_def().library new_funcs = function._from_library(library) def CheckNewFunc(func): new_func = [f for f in new_funcs if f.name == func.name] self.assertEqual(len(new_func), 1) self.expectFunctionsEqual(func, new_func=new_func[0]) CheckNewFunc(G1) CheckNewFunc(G2) CheckNewFunc(F1) CheckNewFunc(F2) CheckNewFunc(F3) CheckNewFunc(F4) def testFromLibraryEmptyLib(self): library = function_pb2.FunctionDefLibrary() self.assertEqual(len(function._from_library(library)), 0) def testFromLibraryMissingFuncDef(self): @function.Defun(dtypes.float32, dtypes.float32) def G1(x, dy): return x * dy @function.Defun(dtypes.float32) def F1(x): return math_ops.exp(x) - math_ops.exp(-x) gradient = function_pb2.GradientDef() gradient.function_name = F1.name gradient.gradient_func = G1.name # Create invalid function def that is missing G1 function def library = function_pb2.FunctionDefLibrary() library.gradient.extend([gradient]) library.function.extend([F1.definition]) with self.assertRaisesRegexp( ValueError, "FunctionDefLibrary missing 'G1_........' FunctionDef"): function._from_library(library) # Create invalid function def that is missing F1 function def library = function_pb2.FunctionDefLibrary() library.gradient.extend([gradient]) library.function.extend([G1.definition]) with self.assertRaisesRegexp( ValueError, "FunctionDefLibrary missing 'F1_........' FunctionDef"): function._from_library(library) def testFromLibraryCyclicGradFuncs(self): @function.Defun(dtypes.float32) def F1(x): return math_ops.exp(x) - math_ops.exp(-x) @function.Defun(dtypes.float32) def F2(x): return math_ops.exp(x) - math_ops.exp(-x) # Create invalid function def library where F1 has gradient function F2 and # F2 has gradient function F1 library = function_pb2.FunctionDefLibrary() library.function.extend([F1.definition, F2.definition]) gradient1 = function_pb2.GradientDef() gradient1.function_name = F1.name gradient1.gradient_func = F2.name gradient2 = function_pb2.GradientDef() gradient2.function_name = F2.name gradient2.gradient_func = F1.name library.gradient.extend([gradient1, gradient2]) with self.assertRaisesRegexp( ValueError, "FunctionDefLibrary contains cyclic gradient functions!"): function._from_library(library) class FunctionOverloadTest(test.TestCase): def testBasic(self): @function.Defun() def Sinh(x): return 1 / 2. * (math_ops.exp(x) - math_ops.exp(-x)) g = ops.Graph() with g.as_default(): x = Sinh(constant_op.constant(0.25, dtypes.float32)) y = Sinh(constant_op.constant(0.25, dtypes.float64)) with self.test_session(graph=g): self.assertAllClose(x.eval(), np.sinh(0.25)) self.assertAllClose(y.eval(), np.sinh(0.25)) def testGradient(self): @function.Defun(func_name="Spec") def G(x, dy): return x * dy @function.Defun(grad_func=G) def F(x): return math_ops.exp(x) - math_ops.exp(-x) for dtype in [dtypes.float32, dtypes.float64]: g = ops.Graph() with g.as_default(): x = constant_op.constant(0.25, dtype) y = F(x) dx, = gradients_impl.gradients(y, x) with self.test_session(graph=g): self.assertAllClose(dx.eval(), 0.25) def testDocString(self): @function.Defun() def Foo(x): """Successor of x.""" return x + 1 g = ops.Graph() with g.as_default(): _ = Foo(1) self.assertEqual(g.as_graph_def().library.function[0].signature.description, "Successor of x.") class UnrollLSTMTest(test.TestCase): BATCH_SIZE = 16 LSTM_DIMS = 32 NUM_UNROLL = 20 def _Weights(self): dims = self.LSTM_DIMS return random_ops.random_uniform([2 * dims, 4 * dims], -1, 1, seed=123456) def _Input(self): return random_ops.random_uniform( [self.NUM_UNROLL, self.BATCH_SIZE, self.LSTM_DIMS], seed=654321) # Helper to construct a LSTM cell graph. @classmethod def LSTMCell(cls, x, mprev, cprev, weights): xm = array_ops.concat([x, mprev], 1) i_i, i_g, f_g, o_g = array_ops.split( value=math_ops.matmul(xm, weights), num_or_size_splits=4, axis=1) new_c = math_ops.sigmoid(f_g) * cprev + math_ops.sigmoid( i_g) * math_ops.tanh(i_i) new_c = clip_ops.clip_by_value(new_c, -50.0, 50.0) new_m = math_ops.sigmoid(o_g) * math_ops.tanh(new_c) return new_m, new_c def _BuildForward(self, weights, inp, mode="cell"): def Loop(cell, w, i): x = array_ops.unstack(i, self.NUM_UNROLL) m = array_ops.zeros_like(x[0]) c = array_ops.zeros_like(x[0]) for i in range(self.NUM_UNROLL): m, c = cell(x[i], m, c, w) return m cell = UnrollLSTMTest.LSTMCell if mode == "complete": # Constructs the complete graph in python. return Loop(cell, weights, inp) cell = function.Defun(dtypes.float32, dtypes.float32, dtypes.float32, dtypes.float32)(cell) if mode == "cell": # Just represent the LSTM as a function. return Loop(cell, weights, inp) if mode == "loop": # Wraps the whole loop as a function. @function.Defun(dtypes.float32, dtypes.float32) def LSTMLoop(w, i): return Loop(cell, w, i) return LSTMLoop(weights, inp) if mode == "loop10": # Wraps 10 lstm steps into one function, and the whole loop # into another calling the formers. # Groups 10 steps at a time. @function.Defun(dtypes.float32, dtypes.float32, dtypes.float32, *([dtypes.float32] * 10)) def Loop10(w, m, c, *args): for x in args: m, c = cell(x, m, c, w) return m, c @function.Defun(dtypes.float32, dtypes.float32) def LSTMLoop10(weights, inp): x = array_ops.unstack(inp, self.NUM_UNROLL) m = array_ops.zeros_like(x[0]) c = array_ops.zeros_like(x[0]) assert self.NUM_UNROLL % 10 == 0 for i in range(0, self.NUM_UNROLL, 10): m, c = Loop10(weights, m, c, *x[i:i + 10]) return m return LSTMLoop10(weights, inp) def testUnrollLSTM(self): # Run one step of the unrolled lstm graph. def RunForward(mode, cfg=None): tf_logging.info("mode = %s", mode) g = ops.Graph() start = time.time() with g.as_default(): weights = self._Weights() inp = self._Input() m = self._BuildForward(weights, inp, mode) gdef = g.as_graph_def() finish = time.time() tf_logging.info("time: %f txt size: %d gdef bin size: %d", finish - start, len(str(gdef)), len(gdef.SerializeToString())) with g.as_default(), session.Session(config=cfg) as sess: return sess.run(m) mv0 = RunForward("complete") for cfg in _OptimizerOptions(): tf_logging.info("cfg = %s", cfg) mv1 = RunForward("cell", cfg) mv2 = RunForward("loop", cfg) mv3 = RunForward("loop10", cfg) self.assertAllClose(mv0, mv1, rtol=1e-4) self.assertAllClose(mv0, mv2, rtol=1e-4) self.assertAllClose(mv0, mv3, rtol=1e-4) def testUnrollLSTMGrad(self): # Run one step of the unrolled lstm graph. def RunForwardBackward(mode, cfg=None): tf_logging.info("mode = %s", mode) g = ops.Graph() start = time.time() with g.as_default(): weights = self._Weights() inp = self._Input() m = self._BuildForward(weights, inp, mode) loss = math_ops.reduce_sum(math_ops.square(m)) dw = gradients_impl.gradients([loss], [weights]) gdef = g.as_graph_def() finish = time.time() tf_logging.info("time: %f txt size: %d gdef bin size: %d", finish - start, len(str(gdef)), len(gdef.SerializeToString())) with g.as_default(), session.Session(config=cfg) as sess: return sess.run(dw) d0 = RunForwardBackward("complete") for cfg in _OptimizerOptions(): tf_logging.info("cfg = %s", cfg) d1 = RunForwardBackward("cell", cfg) d2 = RunForwardBackward("loop", cfg) d3 = RunForwardBackward("loop10", cfg) self.assertAllClose(d0, d1, rtol=1e-4, atol=1e-4) self.assertAllClose(d0, d2, rtol=1e-4, atol=1e-4) self.assertAllClose(d0, d3, rtol=1e-4, atol=1e-4) class FunctionInlineControlTest(test.TestCase): def testFoo(self): dtype = dtypes.float32 cfg = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions( optimizer_options=config_pb2.OptimizerOptions( opt_level=config_pb2.OptimizerOptions.L0, do_common_subexpression_elimination=True, do_function_inlining=True, do_constant_folding=True))) cell_func_call_pattern = re.compile(r"Cell[^/]*\(") for noinline in [False, True]: @function.Defun(dtype, noinline=noinline) def Cell(v): # If v is a vector [n, 1], x is a big square matrix. x = math_ops.tanh(v + array_ops.transpose(v, [1, 0])) return math_ops.reduce_sum(x, 1, keep_dims=True) @function.Defun(dtype) def Forward(x): for _ in range(10): # pylint: disable=cell-var-from-loop x = Cell(x) return math_ops.reduce_sum(x, [0, 1]) self.assertEqual(noinline, Cell.definition.attr["_noinline"].b) g = ops.Graph() with g.as_default(): x = array_ops.placeholder(dtype) y = Forward(x) dx, = gradients_impl.gradients([y], [x]) np.random.seed(321) inp = np.random.uniform(-1, 1, [16, 1]).astype(np.float32) run_metadata = config_pb2.RunMetadata() with session.Session(graph=g, config=cfg) as sess: ans = sess.run([y, dx], {x: inp}, run_metadata=run_metadata, options=config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE)) print(ans[0], np.sum(ans[1])) self.assertAllClose(ans[0], 255.971, rtol=1e-3) self.assertAllClose(np.sum(ans[1]), 13.0408, rtol=1e-3) def MetadataHasCell(run_metadata): for dev_stats in run_metadata.step_stats.dev_stats: for node_stats in dev_stats.node_stats: if cell_func_call_pattern.search(node_stats.timeline_label): return True return False self.assertEqual(MetadataHasCell(run_metadata), noinline) @function.Defun(*[dtypes.float32] * 3) def Linear(w, b, x): return nn_ops.relu(math_ops.matmul(x, w) + b) @function.Defun(*[dtypes.float32] * 5) def Linear2(w1, b1, w2, b2, x): return Linear(w2, b2, Linear(w1, b1, x)) class ModuleFunctionTest(test.TestCase): def testBasic(self): with ops.Graph().as_default(): a, b, c, d, e = [ constant_op.constant( [[_]], dtype=dtypes.float32) for _ in range(5) ] y = Linear(a, b, c) z = Linear2(a, b, c, d, e) with session.Session() as sess: self.assertAllEqual([[1]], sess.run(y)) self.assertAllEqual([[5]], sess.run(z)) class VariableHoistingTest(test.TestCase): def _testSimpleModel(self, use_forward_func, use_resource=False): def _Model(x): w = variable_scope.get_variable( "w", (64, 64), initializer=init_ops.random_uniform_initializer(seed=312), use_resource=use_resource) b = variable_scope.get_variable( "b", (64), initializer=init_ops.zeros_initializer(), use_resource=use_resource), return math_ops.sigmoid(math_ops.matmul(x, w) + b) @function.Defun() def Model(x): return _Model(x) cvars = [] @function.Defun() def Grad(x, y0): if use_forward_func: y = Model(x) else: y = _Model(x) loss = math_ops.reduce_mean( math_ops.reduce_sum(y0 * math_ops.log(y), 1), 0) arg_w, arg_b = function.get_extra_args() self.assertEqual(arg_w.get_shape(), tensor_shape.TensorShape([64, 64])) self.assertEqual(arg_b.get_shape(), tensor_shape.TensorShape([64])) dw, db = gradients_impl.gradients(loss, [arg_w, arg_b]) cvars.extend(function.get_extra_vars()) return loss, dw, db g = ops.Graph() with g.as_default(): x = random_ops.random_normal([64, 64], seed=100) y0 = random_ops.random_normal([64, 64], seed=200) with variable_scope.variable_scope("Foo"): loss, dw, db = Grad(x, y0) self.assertEqual(2, len(cvars)) w, b = cvars[:2] self.assertEqual("Foo/w", w.op.name) self.assertEqual("Foo/b", b.op.name) with self.test_session(graph=g) as sess: sess.run(variables.global_variables_initializer()) w, b, x, y0, loss, dw, db = sess.run([w, b, x, y0, loss, dw, db]) self.assertAllEqual(w.shape, (64, 64)) self.assertAllClose(np.sum(w), 2050.44) self.assertAllEqual(b.shape, (64,)) self.assertAllClose(np.sum(b), 0.0) self.assertAllClose(loss, -2.27, rtol=1e-2) self.assertAllEqual(dw.shape, (64, 64)) self.assertAllClose(np.sum(dw), -1.04, rtol=1e-2) self.assertAllEqual(db.shape, (64,)) self.assertAllClose(np.sum(db), 0.509, rtol=1e-2) def testBasic(self): self._testSimpleModel(True) self._testSimpleModel(False) def testBasicResource(self): self._testSimpleModel(True, use_resource=True) self._testSimpleModel(False, use_resource=True) if __name__ == "__main__": test.main()
apache-2.0
qqzwc/XX-Net
code/default/python27/1.0/lib/encodings/iso2022_kr.py
816
1053
# # iso2022_kr.py: Python Unicode Codec for ISO2022_KR # # Written by Hye-Shik Chang <perky@FreeBSD.org> # import _codecs_iso2022, codecs import _multibytecodec as mbc codec = _codecs_iso2022.getcodec('iso2022_kr') class Codec(codecs.Codec): encode = codec.encode decode = codec.decode class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder): codec = codec class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder): codec = codec class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): codec = codec class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): codec = codec def getregentry(): return codecs.CodecInfo( name='iso2022_kr', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
bsd-2-clause
FireWRT/OpenWrt-Firefly-Libraries
staging_dir/host/lib/python3.4/test/test_largefile.py
96
6554
"""Test largefile support on system where this makes sense. """ import os import stat import sys import unittest from test.support import TESTFN, requires, unlink import io # C implementation of io import _pyio as pyio # Python implementation of io # size of file to create (>2GB; 2GB == 2147483648 bytes) size = 2500000000 class LargeFileTest: """Test that each file function works as expected for large (i.e. > 2GB) files. """ def setUp(self): if os.path.exists(TESTFN): mode = 'r+b' else: mode = 'w+b' with self.open(TESTFN, mode) as f: current_size = os.fstat(f.fileno())[stat.ST_SIZE] if current_size == size+1: return if current_size == 0: f.write(b'z') f.seek(0) f.seek(size) f.write(b'a') f.flush() self.assertEqual(os.fstat(f.fileno())[stat.ST_SIZE], size+1) @classmethod def tearDownClass(cls): with cls.open(TESTFN, 'wb'): pass if not os.stat(TESTFN)[stat.ST_SIZE] == 0: raise cls.failureException('File was not truncated by opening ' 'with mode "wb"') def test_osstat(self): self.assertEqual(os.stat(TESTFN)[stat.ST_SIZE], size+1) def test_seek_read(self): with self.open(TESTFN, 'rb') as f: self.assertEqual(f.tell(), 0) self.assertEqual(f.read(1), b'z') self.assertEqual(f.tell(), 1) f.seek(0) self.assertEqual(f.tell(), 0) f.seek(0, 0) self.assertEqual(f.tell(), 0) f.seek(42) self.assertEqual(f.tell(), 42) f.seek(42, 0) self.assertEqual(f.tell(), 42) f.seek(42, 1) self.assertEqual(f.tell(), 84) f.seek(0, 1) self.assertEqual(f.tell(), 84) f.seek(0, 2) # seek from the end self.assertEqual(f.tell(), size + 1 + 0) f.seek(-10, 2) self.assertEqual(f.tell(), size + 1 - 10) f.seek(-size-1, 2) self.assertEqual(f.tell(), 0) f.seek(size) self.assertEqual(f.tell(), size) # the 'a' that was written at the end of file above self.assertEqual(f.read(1), b'a') f.seek(-size-1, 1) self.assertEqual(f.read(1), b'z') self.assertEqual(f.tell(), 1) def test_lseek(self): with self.open(TESTFN, 'rb') as f: self.assertEqual(os.lseek(f.fileno(), 0, 0), 0) self.assertEqual(os.lseek(f.fileno(), 42, 0), 42) self.assertEqual(os.lseek(f.fileno(), 42, 1), 84) self.assertEqual(os.lseek(f.fileno(), 0, 1), 84) self.assertEqual(os.lseek(f.fileno(), 0, 2), size+1+0) self.assertEqual(os.lseek(f.fileno(), -10, 2), size+1-10) self.assertEqual(os.lseek(f.fileno(), -size-1, 2), 0) self.assertEqual(os.lseek(f.fileno(), size, 0), size) # the 'a' that was written at the end of file above self.assertEqual(f.read(1), b'a') def test_truncate(self): with self.open(TESTFN, 'r+b') as f: if not hasattr(f, 'truncate'): raise unittest.SkipTest("open().truncate() not available " "on this system") f.seek(0, 2) # else we've lost track of the true size self.assertEqual(f.tell(), size+1) # Cut it back via seek + truncate with no argument. newsize = size - 10 f.seek(newsize) f.truncate() self.assertEqual(f.tell(), newsize) # else pointer moved f.seek(0, 2) self.assertEqual(f.tell(), newsize) # else wasn't truncated # Ensure that truncate(smaller than true size) shrinks # the file. newsize -= 1 f.seek(42) f.truncate(newsize) self.assertEqual(f.tell(), 42) f.seek(0, 2) self.assertEqual(f.tell(), newsize) # XXX truncate(larger than true size) is ill-defined # across platform; cut it waaaaay back f.seek(0) f.truncate(1) self.assertEqual(f.tell(), 0) # else pointer moved f.seek(0) self.assertEqual(len(f.read()), 1) # else wasn't truncated def test_seekable(self): # Issue #5016; seekable() can return False when the current position # is negative when truncated to an int. for pos in (2**31-1, 2**31, 2**31+1): with self.open(TESTFN, 'rb') as f: f.seek(pos) self.assertTrue(f.seekable()) def setUpModule(): try: import signal # The default handler for SIGXFSZ is to abort the process. # By ignoring it, system calls exceeding the file size resource # limit will raise OSError instead of crashing the interpreter. signal.signal(signal.SIGXFSZ, signal.SIG_IGN) except (ImportError, AttributeError): pass # On Windows and Mac OSX this test comsumes large resources; It # takes a long time to build the >2GB file and takes >2GB of disk # space therefore the resource must be enabled to run this test. # If not, nothing after this line stanza will be executed. if sys.platform[:3] == 'win' or sys.platform == 'darwin': requires('largefile', 'test requires %s bytes and a long time to run' % str(size)) else: # Only run if the current filesystem supports large files. # (Skip this test on Windows, since we now always support # large files.) f = open(TESTFN, 'wb', buffering=0) try: # 2**31 == 2147483648 f.seek(2147483649) # Seeking is not enough of a test: you must write and flush, too! f.write(b'x') f.flush() except (OSError, OverflowError): raise unittest.SkipTest("filesystem does not have " "largefile support") finally: f.close() unlink(TESTFN) class CLargeFileTest(LargeFileTest, unittest.TestCase): open = staticmethod(io.open) class PyLargeFileTest(LargeFileTest, unittest.TestCase): open = staticmethod(pyio.open) def tearDownModule(): unlink(TESTFN) if __name__ == '__main__': unittest.main()
gpl-2.0
GoSteven/Diary
djangoappengine/db/creation.py
17
1691
from .db_settings import get_indexes from djangotoolbox.db.creation import NonrelDatabaseCreation class StringType(object): def __init__(self, internal_type): self.internal_type = internal_type def __mod__(self, field): indexes = get_indexes().get(field['model'], {}) if field['name'] in indexes.get('indexed', ()): return 'text' elif field['name'] in indexes.get('unindexed', ()): return 'longtext' return self.internal_type def get_data_types(): # TODO: Add GAEKeyField and a corresponding db_type string_types = ('text', 'longtext') data_types = NonrelDatabaseCreation.data_types.copy() for name, field_type in data_types.items(): if field_type in string_types: data_types[name] = StringType(field_type) return data_types class DatabaseCreation(NonrelDatabaseCreation): # This dictionary maps Field objects to their associated GAE column # types, as strings. Column-type strings can contain format strings; they'll # be interpolated against the values of Field.__dict__ before being output. # If a column type is set to None, it won't be included in the output. data_types = get_data_types() def create_test_db(self, *args, **kw): """Destroys the test datastore. A new store will be recreated on demand""" self.destroy_test_db() self.connection.use_test_datastore = True self.connection.flush() def destroy_test_db(self, *args, **kw): """Destroys the test datastore files.""" from .base import destroy_datastore, get_test_datastore_paths destroy_datastore(*get_test_datastore_paths())
bsd-3-clause
hilaskis/UAV_MissionPlanner
Lib/site-packages/scipy/ndimage/__init__.py
55
1880
# Copyright (C) 2003-2005 Peter J. Verveer # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. The name of the author may not be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy from filters import * from fourier import * from interpolation import * from measurements import * from morphology import * from io import * # doccer is moved to scipy.misc in scipy 0.8 from scipy.misc import doccer doccer = numpy.deprecate(doccer, old_name='doccer', new_name='scipy.misc.doccer') from info import __doc__ __version__ = '2.0' from numpy.testing import Tester test = Tester().test
gpl-2.0
aldian/tensorflow
tensorflow/python/layers/maxout_test.py
32
2225
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= # pylint: disable=unused-import,g-bad-import-order from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.layers import maxout from tensorflow.python.layers import convolutional as conv_layers from tensorflow.python.layers import core as core_layers from tensorflow.python.ops import random_ops from tensorflow.python.platform import test import numpy as np """ Contains the maxout layer tests """ class MaxOutTest(test.TestCase): def test_simple(self): inputs = random_ops.random_uniform((64, 10, 36), seed=1) graph = maxout.maxout(inputs, num_units=3) self.assertEqual(graph.get_shape().as_list(), [64, 10, 3]) def test_fully_connected(self): inputs = random_ops.random_uniform((64, 50), seed=1) graph = core_layers.dense(inputs, 50) graph = maxout.maxout(graph, num_units=10) self.assertEqual(graph.get_shape().as_list(), [64, 10]) def test_nchw(self): inputs = random_ops.random_uniform((10, 100, 100, 3), seed=1) graph = conv_layers.conv2d(inputs, 10, 3, padding="SAME") graph = maxout.maxout(graph, num_units=1) self.assertEqual(graph.get_shape().as_list(), [10, 100, 100, 1]) def test_invalid_shape(self): inputs = random_ops.random_uniform((10, 100, 100, 3), seed=1) graph = conv_layers.conv2d(inputs, 3, 10, strides=(1, 1)) with self.assertRaisesRegexp(ValueError, 'number of features'): graph = maxout.maxout(graph, num_units=2) if __name__ == '__main__': test.main()
apache-2.0
xpavlus/parabaramba
venv/lib/python2.7/site-packages/cherrypy/lib/reprconf.py
13
16171
"""Generic configuration system using unrepr. Configuration data may be supplied as a Python dictionary, as a filename, or as an open file object. When you supply a filename or file, Python's builtin ConfigParser is used (with some extensions). Namespaces ---------- Configuration keys are separated into namespaces by the first "." in the key. The only key that cannot exist in a namespace is the "environment" entry. This special entry 'imports' other config entries from a template stored in the Config.environments dict. You can define your own namespaces to be called when new config is merged by adding a named handler to Config.namespaces. The name can be any string, and the handler must be either a callable or a context manager. """ try: # Python 3.0+ from configparser import ConfigParser except ImportError: from ConfigParser import ConfigParser try: set except NameError: from sets import Set as set try: basestring except NameError: basestring = str try: # Python 3 import builtins except ImportError: # Python 2 import __builtin__ as builtins import operator as _operator import sys def as_dict(config): """Return a dict from 'config' whether it is a dict, file, or filename.""" if isinstance(config, basestring): config = Parser().dict_from_file(config) elif hasattr(config, 'read'): config = Parser().dict_from_file(config) return config class NamespaceSet(dict): """A dict of config namespace names and handlers. Each config entry should begin with a namespace name; the corresponding namespace handler will be called once for each config entry in that namespace, and will be passed two arguments: the config key (with the namespace removed) and the config value. Namespace handlers may be any Python callable; they may also be Python 2.5-style 'context managers', in which case their __enter__ method should return a callable to be used as the handler. See cherrypy.tools (the Toolbox class) for an example. """ def __call__(self, config): """Iterate through config and pass it to each namespace handler. config A flat dict, where keys use dots to separate namespaces, and values are arbitrary. The first name in each config key is used to look up the corresponding namespace handler. For example, a config entry of {'tools.gzip.on': v} will call the 'tools' namespace handler with the args: ('gzip.on', v) """ # Separate the given config into namespaces ns_confs = {} for k in config: if "." in k: ns, name = k.split(".", 1) bucket = ns_confs.setdefault(ns, {}) bucket[name] = config[k] # I chose __enter__ and __exit__ so someday this could be # rewritten using Python 2.5's 'with' statement: # for ns, handler in self.iteritems(): # with handler as callable: # for k, v in ns_confs.get(ns, {}).iteritems(): # callable(k, v) for ns, handler in self.items(): exit = getattr(handler, "__exit__", None) if exit: callable = handler.__enter__() no_exc = True try: try: for k, v in ns_confs.get(ns, {}).items(): callable(k, v) except: # The exceptional case is handled here no_exc = False if exit is None: raise if not exit(*sys.exc_info()): raise # The exception is swallowed if exit() returns true finally: # The normal and non-local-goto cases are handled here if no_exc and exit: exit(None, None, None) else: for k, v in ns_confs.get(ns, {}).items(): handler(k, v) def __repr__(self): return "%s.%s(%s)" % (self.__module__, self.__class__.__name__, dict.__repr__(self)) def __copy__(self): newobj = self.__class__() newobj.update(self) return newobj copy = __copy__ class Config(dict): """A dict-like set of configuration data, with defaults and namespaces. May take a file, filename, or dict. """ defaults = {} environments = {} namespaces = NamespaceSet() def __init__(self, file=None, **kwargs): self.reset() if file is not None: self.update(file) if kwargs: self.update(kwargs) def reset(self): """Reset self to default values.""" self.clear() dict.update(self, self.defaults) def update(self, config): """Update self from a dict, file or filename.""" if isinstance(config, basestring): # Filename config = Parser().dict_from_file(config) elif hasattr(config, 'read'): # Open file object config = Parser().dict_from_file(config) else: config = config.copy() self._apply(config) def _apply(self, config): """Update self from a dict.""" which_env = config.get('environment') if which_env: env = self.environments[which_env] for k in env: if k not in config: config[k] = env[k] dict.update(self, config) self.namespaces(config) def __setitem__(self, k, v): dict.__setitem__(self, k, v) self.namespaces({k: v}) class Parser(ConfigParser): """Sub-class of ConfigParser that keeps the case of options and that raises an exception if the file cannot be read. """ def optionxform(self, optionstr): return optionstr def read(self, filenames): if isinstance(filenames, basestring): filenames = [filenames] for filename in filenames: # try: # fp = open(filename) # except IOError: # continue fp = open(filename) try: self._read(fp, filename) finally: fp.close() def as_dict(self, raw=False, vars=None): """Convert an INI file to a dictionary""" # Load INI file into a dict result = {} for section in self.sections(): if section not in result: result[section] = {} for option in self.options(section): value = self.get(section, option, raw=raw, vars=vars) try: value = unrepr(value) except Exception: x = sys.exc_info()[1] msg = ("Config error in section: %r, option: %r, " "value: %r. Config values must be valid Python." % (section, option, value)) raise ValueError(msg, x.__class__.__name__, x.args) result[section][option] = value return result def dict_from_file(self, file): if hasattr(file, 'read'): self.readfp(file) else: self.read(file) return self.as_dict() # public domain "unrepr" implementation, found on the web and then improved. class _Builder2: def build(self, o): m = getattr(self, 'build_' + o.__class__.__name__, None) if m is None: raise TypeError("unrepr does not recognize %s" % repr(o.__class__.__name__)) return m(o) def astnode(self, s): """Return a Python2 ast Node compiled from a string.""" try: import compiler except ImportError: # Fallback to eval when compiler package is not available, # e.g. IronPython 1.0. return eval(s) p = compiler.parse("__tempvalue__ = " + s) return p.getChildren()[1].getChildren()[0].getChildren()[1] def build_Subscript(self, o): expr, flags, subs = o.getChildren() expr = self.build(expr) subs = self.build(subs) return expr[subs] def build_CallFunc(self, o): children = o.getChildren() # Build callee from first child callee = self.build(children[0]) # Build args and kwargs from remaining children args = [] kwargs = {} for child in children[1:]: class_name = child.__class__.__name__ # None is ignored if class_name == 'NoneType': continue # Keywords become kwargs if class_name == 'Keyword': kwargs.update(self.build(child)) # Everything else becomes args else : args.append(self.build(child)) return callee(*args, **kwargs) def build_Keyword(self, o): key, value_obj = o.getChildren() value = self.build(value_obj) kw_dict = {key: value} return kw_dict def build_List(self, o): return map(self.build, o.getChildren()) def build_Const(self, o): return o.value def build_Dict(self, o): d = {} i = iter(map(self.build, o.getChildren())) for el in i: d[el] = i.next() return d def build_Tuple(self, o): return tuple(self.build_List(o)) def build_Name(self, o): name = o.name if name == 'None': return None if name == 'True': return True if name == 'False': return False # See if the Name is a package or module. If it is, import it. try: return modules(name) except ImportError: pass # See if the Name is in builtins. try: return getattr(builtins, name) except AttributeError: pass raise TypeError("unrepr could not resolve the name %s" % repr(name)) def build_Add(self, o): left, right = map(self.build, o.getChildren()) return left + right def build_Mul(self, o): left, right = map(self.build, o.getChildren()) return left * right def build_Getattr(self, o): parent = self.build(o.expr) return getattr(parent, o.attrname) def build_NoneType(self, o): return None def build_UnarySub(self, o): return -self.build(o.getChildren()[0]) def build_UnaryAdd(self, o): return self.build(o.getChildren()[0]) class _Builder3: def build(self, o): m = getattr(self, 'build_' + o.__class__.__name__, None) if m is None: raise TypeError("unrepr does not recognize %s" % repr(o.__class__.__name__)) return m(o) def astnode(self, s): """Return a Python3 ast Node compiled from a string.""" try: import ast except ImportError: # Fallback to eval when ast package is not available, # e.g. IronPython 1.0. return eval(s) p = ast.parse("__tempvalue__ = " + s) return p.body[0].value def build_Subscript(self, o): return self.build(o.value)[self.build(o.slice)] def build_Index(self, o): return self.build(o.value) def _build_call35(self, o): """ Workaround for python 3.5 _ast.Call signature, docs found here https://greentreesnakes.readthedocs.org/en/latest/nodes.html """ import ast callee = self.build(o.func) args = [] if o.args is not None: for a in o.args: if isinstance(a, ast.Starred): args.append(self.build(a.value)) else: args.append(self.build(a)) kwargs = {} for kw in o.keywords: if kw.arg is None: # double asterix `**` rst = self.build(kw.value) if not isinstance(rst, dict): raise TypeError("Invalid argument for call." "Must be a mapping object.") # give preference to the keys set directly from arg=value for k, v in rst.items(): if k not in kwargs: kwargs[k] = v else: # defined on the call as: arg=value kwargs[kw.arg] = self.build(kw.value) return callee(*args, **kwargs) def build_Call(self, o): if sys.version_info >= (3, 5): return self._build_call35(o) callee = self.build(o.func) if o.args is None: args = () else: args = tuple([self.build(a) for a in o.args]) if o.starargs is None: starargs = () else: starargs = tuple(self.build(o.starargs)) if o.kwargs is None: kwargs = {} else: kwargs = self.build(o.kwargs) if o.keywords is not None: # direct a=b keywords for kw in o.keywords: # preference because is a direct keyword against **kwargs kwargs[kw.arg] = self.build(kw.value) return callee(*(args + starargs), **kwargs) def build_List(self, o): return list(map(self.build, o.elts)) def build_Str(self, o): return o.s def build_Num(self, o): return o.n def build_Dict(self, o): return dict([(self.build(k), self.build(v)) for k, v in zip(o.keys, o.values)]) def build_Tuple(self, o): return tuple(self.build_List(o)) def build_Name(self, o): name = o.id if name == 'None': return None if name == 'True': return True if name == 'False': return False # See if the Name is a package or module. If it is, import it. try: return modules(name) except ImportError: pass # See if the Name is in builtins. try: import builtins return getattr(builtins, name) except AttributeError: pass raise TypeError("unrepr could not resolve the name %s" % repr(name)) def build_NameConstant(self, o): return o.value def build_UnaryOp(self, o): op, operand = map(self.build, [o.op, o.operand]) return op(operand) def build_BinOp(self, o): left, op, right = map(self.build, [o.left, o.op, o.right]) return op(left, right) def build_Add(self, o): return _operator.add def build_Mult(self, o): return _operator.mul def build_USub(self, o): return _operator.neg def build_Attribute(self, o): parent = self.build(o.value) return getattr(parent, o.attr) def build_NoneType(self, o): return None def unrepr(s): """Return a Python object compiled from a string.""" if not s: return s if sys.version_info < (3, 0): b = _Builder2() else: b = _Builder3() obj = b.astnode(s) return b.build(obj) def modules(modulePath): """Load a module and retrieve a reference to that module.""" __import__(modulePath) return sys.modules[modulePath] def attributes(full_attribute_name): """Load a module and retrieve an attribute of that module.""" # Parse out the path, module, and attribute last_dot = full_attribute_name.rfind(".") attr_name = full_attribute_name[last_dot + 1:] mod_path = full_attribute_name[:last_dot] mod = modules(mod_path) # Let an AttributeError propagate outward. try: attr = getattr(mod, attr_name) except AttributeError: raise AttributeError("'%s' object has no attribute '%s'" % (mod_path, attr_name)) # Return a reference to the attribute. return attr
gpl-3.0
proxysh/Safejumper-for-Desktop
buildlinux/env64/lib/python2.7/sre_constants.py
185
7197
# # Secret Labs' Regular Expression Engine # # various symbols used by the regular expression engine. # run this script to update the _sre include files! # # Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. # # See the sre.py file for information on usage and redistribution. # """Internal support module for sre""" # update when constants are added or removed MAGIC = 20031017 try: from _sre import MAXREPEAT except ImportError: import _sre MAXREPEAT = _sre.MAXREPEAT = 65535 # SRE standard exception (access as sre.error) # should this really be here? class error(Exception): pass # operators FAILURE = "failure" SUCCESS = "success" ANY = "any" ANY_ALL = "any_all" ASSERT = "assert" ASSERT_NOT = "assert_not" AT = "at" BIGCHARSET = "bigcharset" BRANCH = "branch" CALL = "call" CATEGORY = "category" CHARSET = "charset" GROUPREF = "groupref" GROUPREF_IGNORE = "groupref_ignore" GROUPREF_EXISTS = "groupref_exists" IN = "in" IN_IGNORE = "in_ignore" INFO = "info" JUMP = "jump" LITERAL = "literal" LITERAL_IGNORE = "literal_ignore" MARK = "mark" MAX_REPEAT = "max_repeat" MAX_UNTIL = "max_until" MIN_REPEAT = "min_repeat" MIN_UNTIL = "min_until" NEGATE = "negate" NOT_LITERAL = "not_literal" NOT_LITERAL_IGNORE = "not_literal_ignore" RANGE = "range" REPEAT = "repeat" REPEAT_ONE = "repeat_one" SUBPATTERN = "subpattern" MIN_REPEAT_ONE = "min_repeat_one" # positions AT_BEGINNING = "at_beginning" AT_BEGINNING_LINE = "at_beginning_line" AT_BEGINNING_STRING = "at_beginning_string" AT_BOUNDARY = "at_boundary" AT_NON_BOUNDARY = "at_non_boundary" AT_END = "at_end" AT_END_LINE = "at_end_line" AT_END_STRING = "at_end_string" AT_LOC_BOUNDARY = "at_loc_boundary" AT_LOC_NON_BOUNDARY = "at_loc_non_boundary" AT_UNI_BOUNDARY = "at_uni_boundary" AT_UNI_NON_BOUNDARY = "at_uni_non_boundary" # categories CATEGORY_DIGIT = "category_digit" CATEGORY_NOT_DIGIT = "category_not_digit" CATEGORY_SPACE = "category_space" CATEGORY_NOT_SPACE = "category_not_space" CATEGORY_WORD = "category_word" CATEGORY_NOT_WORD = "category_not_word" CATEGORY_LINEBREAK = "category_linebreak" CATEGORY_NOT_LINEBREAK = "category_not_linebreak" CATEGORY_LOC_WORD = "category_loc_word" CATEGORY_LOC_NOT_WORD = "category_loc_not_word" CATEGORY_UNI_DIGIT = "category_uni_digit" CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit" CATEGORY_UNI_SPACE = "category_uni_space" CATEGORY_UNI_NOT_SPACE = "category_uni_not_space" CATEGORY_UNI_WORD = "category_uni_word" CATEGORY_UNI_NOT_WORD = "category_uni_not_word" CATEGORY_UNI_LINEBREAK = "category_uni_linebreak" CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak" OPCODES = [ # failure=0 success=1 (just because it looks better that way :-) FAILURE, SUCCESS, ANY, ANY_ALL, ASSERT, ASSERT_NOT, AT, BRANCH, CALL, CATEGORY, CHARSET, BIGCHARSET, GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE, IN, IN_IGNORE, INFO, JUMP, LITERAL, LITERAL_IGNORE, MARK, MAX_UNTIL, MIN_UNTIL, NOT_LITERAL, NOT_LITERAL_IGNORE, NEGATE, RANGE, REPEAT, REPEAT_ONE, SUBPATTERN, MIN_REPEAT_ONE ] ATCODES = [ AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY, AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING, AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY, AT_UNI_NON_BOUNDARY ] CHCODES = [ CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE, CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD, CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD, CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT, CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD, CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK, CATEGORY_UNI_NOT_LINEBREAK ] def makedict(list): d = {} i = 0 for item in list: d[item] = i i = i + 1 return d OPCODES = makedict(OPCODES) ATCODES = makedict(ATCODES) CHCODES = makedict(CHCODES) # replacement operations for "ignore case" mode OP_IGNORE = { GROUPREF: GROUPREF_IGNORE, IN: IN_IGNORE, LITERAL: LITERAL_IGNORE, NOT_LITERAL: NOT_LITERAL_IGNORE } AT_MULTILINE = { AT_BEGINNING: AT_BEGINNING_LINE, AT_END: AT_END_LINE } AT_LOCALE = { AT_BOUNDARY: AT_LOC_BOUNDARY, AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY } AT_UNICODE = { AT_BOUNDARY: AT_UNI_BOUNDARY, AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY } CH_LOCALE = { CATEGORY_DIGIT: CATEGORY_DIGIT, CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT, CATEGORY_SPACE: CATEGORY_SPACE, CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE, CATEGORY_WORD: CATEGORY_LOC_WORD, CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD, CATEGORY_LINEBREAK: CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK } CH_UNICODE = { CATEGORY_DIGIT: CATEGORY_UNI_DIGIT, CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT, CATEGORY_SPACE: CATEGORY_UNI_SPACE, CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE, CATEGORY_WORD: CATEGORY_UNI_WORD, CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD, CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK, CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK } # flags SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking) SRE_FLAG_IGNORECASE = 2 # case insensitive SRE_FLAG_LOCALE = 4 # honour system locale SRE_FLAG_MULTILINE = 8 # treat target as multiline string SRE_FLAG_DOTALL = 16 # treat target as a single string SRE_FLAG_UNICODE = 32 # use unicode locale SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments SRE_FLAG_DEBUG = 128 # debugging # flags for INFO primitive SRE_INFO_PREFIX = 1 # has prefix SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix) SRE_INFO_CHARSET = 4 # pattern starts with character from given set if __name__ == "__main__": def dump(f, d, prefix): items = d.items() items.sort(key=lambda a: a[1]) for k, v in items: f.write("#define %s_%s %s\n" % (prefix, k.upper(), v)) f = open("sre_constants.h", "w") f.write("""\ /* * Secret Labs' Regular Expression Engine * * regular expression matching engine * * NOTE: This file is generated by sre_constants.py. If you need * to change anything in here, edit sre_constants.py and run it. * * Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved. * * See the _sre.c file for information on usage and redistribution. */ """) f.write("#define SRE_MAGIC %d\n" % MAGIC) dump(f, OPCODES, "SRE_OP") dump(f, ATCODES, "SRE") dump(f, CHCODES, "SRE") f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE) f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE) f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE) f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE) f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL) f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE) f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE) f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX) f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL) f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET) f.close() print "done"
gpl-2.0
zmike/servo
tests/wpt/web-platform-tests/tools/html5lib/html5lib/treewalkers/pulldom.py
1729
2302
from __future__ import absolute_import, division, unicode_literals from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \ COMMENT, IGNORABLE_WHITESPACE, CHARACTERS from . import _base from ..constants import voidElements class TreeWalker(_base.TreeWalker): def __iter__(self): ignore_until = None previous = None for event in self.tree: if previous is not None and \ (ignore_until is None or previous[1] is ignore_until): if previous[1] is ignore_until: ignore_until = None for token in self.tokens(previous, event): yield token if token["type"] == "EmptyTag": ignore_until = previous[1] previous = event if ignore_until is None or previous[1] is ignore_until: for token in self.tokens(previous, None): yield token elif ignore_until is not None: raise ValueError("Illformed DOM event stream: void element without END_ELEMENT") def tokens(self, event, next): type, node = event if type == START_ELEMENT: name = node.nodeName namespace = node.namespaceURI attrs = {} for attr in list(node.attributes.keys()): attr = node.getAttributeNode(attr) attrs[(attr.namespaceURI, attr.localName)] = attr.value if name in voidElements: for token in self.emptyTag(namespace, name, attrs, not next or next[1] is not node): yield token else: yield self.startTag(namespace, name, attrs) elif type == END_ELEMENT: name = node.nodeName namespace = node.namespaceURI if name not in voidElements: yield self.endTag(namespace, name) elif type == COMMENT: yield self.comment(node.nodeValue) elif type in (IGNORABLE_WHITESPACE, CHARACTERS): for token in self.text(node.nodeValue): yield token else: yield self.unknown(type)
mpl-2.0
apanju/odoo
addons/hr_attendance/report/__init__.py
375
1071
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import attendance_errors # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
tux-00/ansible
lib/ansible/plugins/test/core.py
17
4661
# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import re import operator as py_operator from collections import MutableMapping, MutableSequence from distutils.version import LooseVersion, StrictVersion from ansible import errors def failed(*a, **kw): ''' Test if task result yields failed ''' item = a[0] if not isinstance(item, MutableMapping): raise errors.AnsibleFilterError("|failed expects a dictionary") rc = item.get('rc', 0) failed = item.get('failed', False) if rc != 0 or failed: return True else: return False def success(*a, **kw): ''' Test if task result yields success ''' return not failed(*a, **kw) def changed(*a, **kw): ''' Test if task result yields changed ''' item = a[0] if not isinstance(item, MutableMapping): raise errors.AnsibleFilterError("|changed expects a dictionary") if 'changed' not in item: changed = False if ( 'results' in item and # some modules return a 'results' key isinstance(item['results'], MutableSequence) and isinstance(item['results'][0], MutableMapping) ): for result in item['results']: changed = changed or result.get('changed', False) else: changed = item.get('changed', False) return changed def skipped(*a, **kw): ''' Test if task result yields skipped ''' item = a[0] if not isinstance(item, MutableMapping): raise errors.AnsibleFilterError("|skipped expects a dictionary") skipped = item.get('skipped', False) return skipped def regex(value='', pattern='', ignorecase=False, multiline=False, match_type='search'): ''' Expose `re` as a boolean filter using the `search` method by default. This is likely only useful for `search` and `match` which already have their own filters. ''' flags = 0 if ignorecase: flags |= re.I if multiline: flags |= re.M _re = re.compile(pattern, flags=flags) _bool = __builtins__.get('bool') return _bool(getattr(_re, match_type, 'search')(value)) def match(value, pattern='', ignorecase=False, multiline=False): ''' Perform a `re.match` returning a boolean ''' return regex(value, pattern, ignorecase, multiline, 'match') def search(value, pattern='', ignorecase=False, multiline=False): ''' Perform a `re.search` returning a boolean ''' return regex(value, pattern, ignorecase, multiline, 'search') def version_compare(value, version, operator='eq', strict=False): ''' Perform a version comparison on a value ''' op_map = { '==': 'eq', '=': 'eq', 'eq': 'eq', '<': 'lt', 'lt': 'lt', '<=': 'le', 'le': 'le', '>': 'gt', 'gt': 'gt', '>=': 'ge', 'ge': 'ge', '!=': 'ne', '<>': 'ne', 'ne': 'ne' } if strict: Version = StrictVersion else: Version = LooseVersion if operator in op_map: operator = op_map[operator] else: raise errors.AnsibleFilterError('Invalid operator type') try: method = getattr(py_operator, operator) return method(Version(str(value)), Version(str(version))) except Exception as e: raise errors.AnsibleFilterError('Version comparison: %s' % e) class TestModule(object): ''' Ansible core jinja2 tests ''' def tests(self): return { # failure testing 'failed': failed, 'succeeded': success, # changed testing 'changed': changed, # skip testing 'skipped': skipped, # regex 'match': match, 'search': search, 'regex': regex, # version comparison 'version_compare': version_compare, # lists 'any': any, 'all': all, }
gpl-3.0
Tatsh-ansible/ansible
lib/ansible/modules/packaging/os/swdepot.py
33
6615
#!/usr/bin/python -tt # -*- coding: utf-8 -*- # (c) 2013, Raul Melo # Written by Raul Melo <raulmelo@gmail.com> # Based on yum module written by Seth Vidal <skvidal at fedoraproject.org> # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: swdepot short_description: Manage packages with swdepot package manager (HP-UX) description: - Will install, upgrade and remove packages with swdepot package manager (HP-UX) version_added: "1.4" notes: [] author: "Raul Melo (@melodous)" options: name: description: - package name. required: true default: null choices: [] aliases: [] version_added: 1.4 state: description: - whether to install (C(present), C(latest)), or remove (C(absent)) a package. required: true default: null choices: [ 'present', 'latest', 'absent'] aliases: [] version_added: 1.4 depot: description: - The source repository from which install or upgrade a package. required: false default: null choices: [] aliases: [] version_added: 1.4 ''' EXAMPLES = ''' - swdepot: name: unzip-6.0 state: installed depot: 'repository:/path' - swdepot: name: unzip state: latest depot: 'repository:/path' - swdepot: name: unzip state: absent ''' import re import pipes def compare_package(version1, version2): """ Compare version packages. Return values: -1 first minor 0 equal 1 first greater """ def normalize(v): return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")] normalized_version1 = normalize(version1) normalized_version2 = normalize(version2) if normalized_version1 == normalized_version2: rc = 0 elif normalized_version1 < normalized_version2: rc = -1 else: rc = 1 return rc def query_package(module, name, depot=None): """ Returns whether a package is installed or not and version. """ cmd_list = '/usr/sbin/swlist -a revision -l product' if depot: rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, pipes.quote(depot), pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True) else: rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True) if rc == 0: version = re.sub("\s\s+|\t" , " ", stdout).strip().split()[1] else: version = None return rc, version def remove_package(module, name): """ Uninstall package if installed. """ cmd_remove = '/usr/sbin/swremove' rc, stdout, stderr = module.run_command("%s %s" % (cmd_remove, name)) if rc == 0: return rc, stdout else: return rc, stderr def install_package(module, depot, name): """ Install package if not already installed """ cmd_install = '/usr/sbin/swinstall -x mount_all_filesystems=false' rc, stdout, stderr = module.run_command("%s -s %s %s" % (cmd_install, depot, name)) if rc == 0: return rc, stdout else: return rc, stderr def main(): module = AnsibleModule( argument_spec = dict( name = dict(aliases=['pkg'], required=True), state = dict(choices=['present', 'absent', 'latest'], required=True), depot = dict(default=None, required=False) ), supports_check_mode=True ) name = module.params['name'] state = module.params['state'] depot = module.params['depot'] changed = False msg = "No changed" rc = 0 if ( state == 'present' or state == 'latest' ) and depot is None: output = "depot parameter is mandatory in present or latest task" module.fail_json(name=name, msg=output, rc=rc) #Check local version rc, version_installed = query_package(module, name) if not rc: installed = True msg = "Already installed" else: installed = False if ( state == 'present' or state == 'latest' ) and installed is False: if module.check_mode: module.exit_json(changed=True) rc, output = install_package(module, depot, name) if not rc: changed = True msg = "Package installed" else: module.fail_json(name=name, msg=output, rc=rc) elif state == 'latest' and installed is True: #Check depot version rc, version_depot = query_package(module, name, depot) if not rc: if compare_package(version_installed,version_depot) == -1: if module.check_mode: module.exit_json(changed=True) #Install new version rc, output = install_package(module, depot, name) if not rc: msg = "Package upgraded, Before " + version_installed + " Now " + version_depot changed = True else: module.fail_json(name=name, msg=output, rc=rc) else: output = "Software package not in repository " + depot module.fail_json(name=name, msg=output, rc=rc) elif state == 'absent' and installed is True: if module.check_mode: module.exit_json(changed=True) rc, output = remove_package(module, name) if not rc: changed = True msg = "Package removed" else: module.fail_json(name=name, msg=output, rc=rc) if module.check_mode: module.exit_json(changed=False) module.exit_json(changed=changed, name=name, state=state, msg=msg) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
pony-revolution/helpothers
helpothers/views.py
1
1239
from django.contrib.auth import get_user_model from django.views.generic.base import TemplateView from django.views.generic.detail import DetailView from django.views.generic.edit import UpdateView from .views_mixins import HelpOthersMetaDataMixin from listings.models import GatheringCenter, Resource class HomeView(HelpOthersMetaDataMixin, TemplateView): template_name = 'home.html' def get_context_data(self, **kwargs): context = super(HomeView, self).get_context_data(**kwargs) context['gathering_centers'] = GatheringCenter.objects.filter(published=True) context['resources'] = Resource.objects.filter(published=True) return context class LoginView(HelpOthersMetaDataMixin, TemplateView): template_name = 'login.html' def get_context_data(self, **kwargs): ctx = super(LoginView, self).get_context_data(**kwargs) ctx['next'] = self.request.GET.get('next') return ctx class ProfileView(HelpOthersMetaDataMixin, UpdateView): context_object_name = 'profile' template_name = 'accounts/profile.html' fields = ('user__first_name', 'user__last_name', 'user__email') def get_object(self, queryset=None): return self.request.user.profile
apache-2.0
scattering/ipeek
server/pull_push_expman.py
1
6015
import glob import os import sys sys.path.append('/var/www/') sys.path.append('/home/bbm/') import paramiko import urllib2, ftplib import time import StringIO import json DEBUG = False RETRIEVE_METHOD = "ssh" # or "ftp" or "urllib" MAX_FTP_RETRIES = 5 HOST_PORT = 22 DEFAULT_PATH = "/usr/local/nice/server_data/experiments/manifest/experiment_manifest.backup" sources = [ {"name": "NSE", "host_name": "echo.ncnr.nist.gov"}, {"name": "MAGIK", "host_name": "magik.ncnr.nist.gov"}, {"name": "NG7", "host_name": "ng7refl.ncnr.nist.gov"}, {"name": "PBR", "host_name": "pbr.ncnr.nist.gov"}, {"name": "NGBSANS", "host_name": "ngbsans.ncnr.nist.gov"}, {"name": "NGB30SANS", "host_name": "ngb30sans.ncnr.nist.gov"}, {"name": "NG7SANS", "host_name": "ng7sans.ncnr.nist.gov"}, {"name": "PHADES", "host_name": "cts.ncnr.nist.gov"}, {"name": "VSANS", "host_name": "vsans.ncnr.nist.gov"}, ] output = {} output_filelike = {} #local_path = "/home/bbm/.livedata/DCS/" dest_host = "webster.ncnr.nist.gov" #hard-coded dest_port = 22 # I have a different key for pushing to webster. dest_pkey = paramiko.RSAKey(filename='/home/bbm/.ssh/datapushkey') dest_username = "bbm" def retrieve_ftp(source_host, source_port, file_path, output_buffer, username): ftp = ftplib.FTP(source_host) ftp.login('anonymous') live_datapath = os.path.dirname(file_path) live_dataname = os.path.basename(file_path) ftp.cwd(live_datapath) ftp.retrbinary("RETR " + live_dataname, output_buffer.write) ftp.close() def retrieve_ssh(source_host, source_port, file_path, output_buffer, username): source_transport = paramiko.Transport((source_host, source_port)) source_transport.window_size = 2147483647 source_transport.use_compression(True) source_pkey = paramiko.RSAKey(filename="/home/bbm/.ssh/datapullkey") source_username = username source_transport.connect(username=source_username, pkey = source_pkey) source_sftp = paramiko.SFTPClient.from_transport(source_transport) if DEBUG: print("starting read:", name, os.path.basename(file_path)) f = source_sftp.open(file_path) response = f.read() f.close() if DEBUG: print("ending read:", name, os.path.basename(file_path)) output_buffer.write(response) if DEBUG: print("ending stringIO:", name, os.path.basename(file_path)) def retrieve_urllib(source_host, source_port, file_path, output_buffer, username): req_addr = os.path.join("ftp://" + source_host, live_datapath, live_dataname) #req = urllib2.Request(req_addr) response = None retries = 0 while retries < MAX_FTP_RETRIES: try: response = urllib2.urlopen(req_addr) break except: print("failed attempt %d to retrieve %s: trying again" % (retries, req_addr)) retries += 1 if response is None: return if DEBUG: print("retrieved %s" % (req_addr)) output_buffer.write(response.read()) retrievers = { "ssh": retrieve_ssh, "urllib": retrieve_urllib, "ftp": retrieve_ftp } def strip_header(manifest): json_start = manifest.find('[') return manifest[json_start:] def strip_emails(manifest): manifest_obj = json.loads(manifest) for expt in manifest_obj: expt['value']['value'].pop('emails', None) return json.dumps(manifest_obj) def strip_emails_and_proprietary(manifest): manifest_obj = json.loads(manifest) for i, expt in enumerate(manifest_obj): if expt['value']['value'].get('publish', '') != 'NORMAL': manifest_obj.pop(i) else: expt['value']['value'].pop('emails', None) return json.dumps(manifest_obj) filters = [strip_header, strip_emails_and_proprietary] for source in sources: retrieve_method = source.get('retrieve_method', RETRIEVE_METHOD) name = source['name'] username = source.get('username', 'ncnr') source_host = source['host_name'] source_port = source.get('host_port', HOST_PORT) live_datapath = source.get('manifest_path', DEFAULT_PATH) try: live_data = StringIO.StringIO() retriever = retrievers.get(retrieve_method, lambda *args: None) retriever(source_host, source_port, live_datapath, live_data, username) live_data.seek(0) # move back to the beginning of file output.setdefault(name, {}) filename = os.path.basename(live_datapath) result = live_data.read() for f in filters: result = f(result) output[name][filename] = result except Exception as e: if DEBUG: print "could not connect to %s because of %s\n" % (name,str(e)) # Now initialize the transfer to the destination: dest_transport = paramiko.Transport((dest_host, dest_port)) dest_transport.connect(username = dest_username, pkey = dest_pkey) dest_transport.window_size = 2147483647 dest_transport.use_compression(True) dest_sftp = paramiko.SFTPClient.from_transport(dest_transport) for name in output: #name = source['name'] for json_filename in output[name].keys(): # now I push that file outside the firewall to webster: remote_tmp = os.path.join('ipeek_html', 'data', name, json_filename + ".tmp") remotedir = os.path.join('ipeek_html', 'data', name) remotepath = os.path.join('ipeek_html', 'data', name, json_filename) if DEBUG: print "starting write:", name, json_filename f = dest_sftp.open(remote_tmp, 'w') f.write(output[name][json_filename]) f.close() if json_filename in dest_sftp.listdir(remotedir): dest_sftp.unlink(remotepath) dest_sftp.rename(remote_tmp, remotepath) if DEBUG: print "ending write:", name, json_filename dest_sftp.close() dest_transport.close() #print 'Upload done.'
unlicense
matuu/fades
fades/main.py
2
7315
# Copyright 2014-2015 Facundo Batista, Nicolás Demarchi # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General # Public License version 3, as published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranties of # MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. # If not, see <http://www.gnu.org/licenses/>. # # For further info, check https://github.com/PyAr/fades """Main 'fades' modules.""" import argparse import os import signal import sys import logging import subprocess from fades import parsing, logger, cache, helpers, envbuilder # the signals to redirect to the child process (note: only these are # allowed in Windows, see 'signal' doc). REDIRECTED_SIGNALS = [ signal.SIGABRT, signal.SIGFPE, signal.SIGILL, signal.SIGINT, signal.SIGSEGV, signal.SIGTERM, ] help_epilog = """ The "child program" is the script that fades will execute. It's an optional parameter, it will be the first thing received by fades that is not a parameter. If no child program is indicated, a Python interactive interpreter will be opened. The "child options" (everything after the child program) are parameters passed as is to the child program. """ help_usage = """ fades [-h] [-V] [-v] [-q] [-i] [-d DEPENDENCY] [-r REQUIREMENT] [-p PYTHON] [child_program [child_options]] """ def _merge_deps(*deps): """Merge all the dependencies; latest dicts overwrite first ones.""" final = {} for dep in deps: for repo, info in dep.items(): final.setdefault(repo, []).extend(info) return final def go(version, argv): """Make the magic happen.""" parser = argparse.ArgumentParser(prog='PROG', epilog=help_epilog, usage=help_usage, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-V', '--version', action='store_true', help="show version and info about the system, and exit") parser.add_argument('-v', '--verbose', action='store_true', help="send all internal debugging lines to stderr, which may be very " "useful to debug any problem that may arise.") parser.add_argument('-q', '--quiet', action='store_true', help="don't show anything (unless it has a real problem), so the " "original script stderr is not polluted at all.") parser.add_argument('-d', '--dependency', action='append', help="specify dependencies through command line (this option can be " "used multiple times)") parser.add_argument('-r', '--requirement', help="indicate from which file read the dependencies") parser.add_argument('-p', '--python', action='store', help=("Specify the Python interpreter to use.\n" " Default is: %s") % (sys.executable,)) parser.add_argument('-x', '--exec', dest='executable', action='store_true', help=("Indicate that the child_program should be looked up in the " "virtualenv.")) parser.add_argument('-i', '--ipython', action='store_true', help="use IPython shell.") parser.add_argument('child_program', nargs='?', default=None) parser.add_argument('child_options', nargs=argparse.REMAINDER) # support the case when executed from a shell-bang, where all the # parameters come in sys.argv[1] in a single string separated # by spaces (in this case, the third parameter is what is being # executed) if len(sys.argv) > 1 and " " in sys.argv[1]: real_args = sys.argv[1].split() + [sys.argv[2]] args = parser.parse_args(real_args) else: args = parser.parse_args() # validate input, parameters, and support some special options if args.version: print("Running 'fades' version", version) print(" Python:", sys.version_info) print(" System:", sys.platform) sys.exit() if args.verbose: log_level = logging.DEBUG elif args.quiet: log_level = logging.WARNING else: log_level = logging.INFO # set up logger and dump basic version info l = logger.set_up(log_level) l.debug("Running Python %s on %r", sys.version_info, sys.platform) l.debug("Starting fades v. %s", version) l.debug("Arguments: %s", args) if args.verbose and args.quiet: l.warning("Overriding 'quiet' option ('verbose' also requested)") # parse file and get deps if args.ipython: l.debug("Adding ipython dependency because --ipython was detected") ipython_dep = parsing.parse_manual(['ipython']) else: ipython_dep = {} if args.executable: indicated_deps = {} else: indicated_deps = parsing.parse_srcfile(args.child_program) l.debug("Dependencies from source file: %s", indicated_deps) reqfile_deps = parsing.parse_reqfile(args.requirement) l.debug("Dependencies from requirements file: %s", reqfile_deps) manual_deps = parsing.parse_manual(args.dependency) l.debug("Dependencies from parameters: %s", manual_deps) indicated_deps = _merge_deps(ipython_dep, indicated_deps, reqfile_deps, manual_deps) # get the interpreter version requested for the child_program interpreter, is_current = helpers.get_interpreter_version(args.python) # start the virtualenvs manager venvscache = cache.VEnvsCache(os.path.join(helpers.get_basedir(), 'venvs.idx')) venv_data = venvscache.get_venv(indicated_deps, interpreter) if venv_data is None: venv_data, installed = envbuilder.create_venv(indicated_deps, interpreter, is_current) # store this new venv in the cache venvscache.store(installed, venv_data, interpreter) # run forest run!! python_exe = 'ipython' if args.ipython else 'python' python_exe = os.path.join(venv_data['env_bin_path'], python_exe) if args.child_program is None: l.debug("Calling the interactive Python interpreter") p = subprocess.Popen([python_exe]) else: if args.executable: cmd = [os.path.join(venv_data['env_bin_path'], args.child_program)] else: cmd = [python_exe, args.child_program] l.debug("Calling the child program %r with options %s", args.child_program, args.child_options) p = subprocess.Popen(cmd + args.child_options) def _signal_handler(signum, _): """Handle signals received by parent process, send them to child.""" l.debug("Redirecting signal %s to child", signum) os.kill(p.pid, signum) # redirect these signals for s in REDIRECTED_SIGNALS: signal.signal(s, _signal_handler) # wait child to finish, end rc = p.wait() if rc: l.debug("Child process not finished correctly: returncode=%d", rc)
gpl-3.0
imaculate/scikit-learn
sklearn/linear_model/randomized_l1.py
11
24849
""" Randomized Lasso/Logistic: feature selection based on Lasso and sparse Logistic Regression """ # Author: Gael Varoquaux, Alexandre Gramfort # # License: BSD 3 clause import itertools from abc import ABCMeta, abstractmethod import warnings import numpy as np from scipy.sparse import issparse from scipy import sparse from scipy.interpolate import interp1d from .base import _preprocess_data from ..base import BaseEstimator, TransformerMixin from ..externals import six from ..externals.joblib import Memory, Parallel, delayed from ..utils import (as_float_array, check_random_state, check_X_y, check_array, safe_mask) from ..utils.validation import check_is_fitted from .least_angle import lars_path, LassoLarsIC from .logistic import LogisticRegression from ..exceptions import ConvergenceWarning ############################################################################### # Randomized linear model: feature selection def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200, n_jobs=1, verbose=False, pre_dispatch='3*n_jobs', random_state=None, sample_fraction=.75, **params): random_state = check_random_state(random_state) # We are generating 1 - weights, and not weights n_samples, n_features = X.shape if not (0 < scaling < 1): raise ValueError( "'scaling' should be between 0 and 1. Got %r instead." % scaling) scaling = 1. - scaling scores_ = 0.0 for active_set in Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)( delayed(estimator_func)( X, y, weights=scaling * random_state.randint( 0, 2, size=(n_features,)), mask=(random_state.rand(n_samples) < sample_fraction), verbose=max(0, verbose - 1), **params) for _ in range(n_resampling)): scores_ += active_set scores_ /= n_resampling return scores_ class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)): """Base class to implement randomized linear models for feature selection This implements the strategy by Meinshausen and Buhlman: stability selection with randomized sampling, and random re-weighting of the penalty. """ @abstractmethod def __init__(self): pass _preprocess_data = staticmethod(_preprocess_data) def fit(self, X, y): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training data. y : array-like, shape = [n_samples] Target values. Returns ------- self : object Returns an instance of self. """ X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True, ensure_min_samples=2, estimator=self) X = as_float_array(X, copy=False) n_samples, n_features = X.shape X, y, X_offset, y_offset, X_scale = \ self._preprocess_data(X, y, self.fit_intercept, self.normalize) estimator_func, params = self._make_estimator_and_params(X, y) memory = self.memory if isinstance(memory, six.string_types): memory = Memory(cachedir=memory) scores_ = memory.cache( _resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch'] )( estimator_func, X, y, scaling=self.scaling, n_resampling=self.n_resampling, n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=self.pre_dispatch, random_state=self.random_state, sample_fraction=self.sample_fraction, **params) if scores_.ndim == 1: scores_ = scores_[:, np.newaxis] self.all_scores_ = scores_ self.scores_ = np.max(self.all_scores_, axis=1) return self def _make_estimator_and_params(self, X, y): """Return the parameters passed to the estimator""" raise NotImplementedError def get_support(self, indices=False): """Return a mask, or list, of the features/indices selected.""" check_is_fitted(self, 'scores_') mask = self.scores_ > self.selection_threshold return mask if not indices else np.where(mask)[0] # XXX: the two function below are copy/pasted from feature_selection, # Should we add an intermediate base class? def transform(self, X): """Transform a new matrix using the selected features""" mask = self.get_support() X = check_array(X) if len(mask) != X.shape[1]: raise ValueError("X has a different shape than during fitting.") return check_array(X)[:, safe_mask(X, mask)] def inverse_transform(self, X): """Transform a new matrix using the selected features""" support = self.get_support() if X.ndim == 1: X = X[None, :] Xt = np.zeros((X.shape[0], support.size)) Xt[:, support] = X return Xt ############################################################################### # Randomized lasso: regression settings def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False, precompute=False, eps=np.finfo(np.float).eps, max_iter=500): X = X[safe_mask(X, mask)] y = y[mask] # Center X and y to avoid fit the intercept X -= X.mean(axis=0) y -= y.mean() alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float64)) X = (1 - weights) * X with warnings.catch_warnings(): warnings.simplefilter('ignore', ConvergenceWarning) alphas_, _, coef_ = lars_path(X, y, Gram=precompute, copy_X=False, copy_Gram=False, alpha_min=np.min(alpha), method='lasso', verbose=verbose, max_iter=max_iter, eps=eps) if len(alpha) > 1: if len(alphas_) > 1: # np.min(alpha) < alpha_min interpolator = interp1d(alphas_[::-1], coef_[:, ::-1], bounds_error=False, fill_value=0.) scores = (interpolator(alpha) != 0.0) else: scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool) else: scores = coef_[:, -1] != 0.0 return scores class RandomizedLasso(BaseRandomizedLinearModel): """Randomized Lasso. Randomized Lasso works by subsampling the training data and computing a Lasso estimate where the penalty of a random subset of coefficients has been scaled. By performing this double randomization several times, the method assigns high scores to features that are repeatedly selected across randomizations. This is known as stability selection. In short, features selected more often are considered good features. Read more in the :ref:`User Guide <randomized_l1>`. Parameters ---------- alpha : float, 'aic', or 'bic', optional The regularization parameter alpha parameter in the Lasso. Warning: this is not the alpha parameter in the stability selection article which is scaling. scaling : float, optional The s parameter used to randomly scale the penalty of different features (See :ref:`User Guide <randomized_l1>` for details ). Should be between 0 and 1. sample_fraction : float, optional The fraction of samples to be used in each randomized design. Should be between 0 and 1. If 1, all samples are used. n_resampling : int, optional Number of randomized models. selection_threshold: float, optional The score above which features should be selected. fit_intercept : boolean, optional whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. This parameter is ignored when `fit_intercept` is set to False. When the regressors are normalized, note that this makes the hyperparameters learned more robust and almost independent of the number of samples. The same property is not valid for standardized data. However, if you wish to standardize, please use `preprocessing.StandardScaler` before calling `fit` on an estimator with `normalize=False`. precompute : True | False | 'auto' Whether to use a precomputed Gram matrix to speed up calculations. If set to 'auto' let us decide. The Gram matrix can also be passed as argument. max_iter : integer, optional Maximum number of iterations to perform in the Lars algorithm. eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the 'tol' parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization. n_jobs : integer, optional Number of CPUs to use during the resampling. If '-1', use all the CPUs random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' memory : Instance of joblib.Memory or string Used for internal caching. By default, no caching is done. If a string is given, it is the path to the caching directory. Attributes ---------- scores_ : array, shape = [n_features] Feature scores between 0 and 1. all_scores_ : array, shape = [n_features, n_reg_parameter] Feature scores between 0 and 1 for all values of the regularization \ parameter. The reference article suggests ``scores_`` is the max of \ ``all_scores_``. Examples -------- >>> from sklearn.linear_model import RandomizedLasso >>> randomized_lasso = RandomizedLasso() Notes ----- See examples/linear_model/plot_sparse_recovery.py for an example. References ---------- Stability selection Nicolai Meinshausen, Peter Buhlmann Journal of the Royal Statistical Society: Series B Volume 72, Issue 4, pages 417-473, September 2010 DOI: 10.1111/j.1467-9868.2010.00740.x See also -------- RandomizedLogisticRegression, Lasso, ElasticNet """ def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75, n_resampling=200, selection_threshold=.25, fit_intercept=True, verbose=False, normalize=True, precompute='auto', max_iter=500, eps=np.finfo(np.float).eps, random_state=None, n_jobs=1, pre_dispatch='3*n_jobs', memory=Memory(cachedir=None, verbose=0)): self.alpha = alpha self.scaling = scaling self.sample_fraction = sample_fraction self.n_resampling = n_resampling self.fit_intercept = fit_intercept self.max_iter = max_iter self.verbose = verbose self.normalize = normalize self.precompute = precompute self.eps = eps self.random_state = random_state self.n_jobs = n_jobs self.selection_threshold = selection_threshold self.pre_dispatch = pre_dispatch self.memory = memory def _make_estimator_and_params(self, X, y): assert self.precompute in (True, False, None, 'auto') alpha = self.alpha if isinstance(alpha, six.string_types) and alpha in ('aic', 'bic'): model = LassoLarsIC(precompute=self.precompute, criterion=self.alpha, max_iter=self.max_iter, eps=self.eps) model.fit(X, y) self.alpha_ = alpha = model.alpha_ return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter, eps=self.eps, precompute=self.precompute) ############################################################################### # Randomized logistic: classification settings def _randomized_logistic(X, y, weights, mask, C=1., verbose=False, fit_intercept=True, tol=1e-3): X = X[safe_mask(X, mask)] y = y[mask] if issparse(X): size = len(weights) weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size)) X = X * weight_dia else: X *= (1 - weights) C = np.atleast_1d(np.asarray(C, dtype=np.float64)) scores = np.zeros((X.shape[1], len(C)), dtype=np.bool) for this_C, this_scores in zip(C, scores.T): # XXX : would be great to do it with a warm_start ... clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False, fit_intercept=fit_intercept) clf.fit(X, y) this_scores[:] = np.any( np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0) return scores class RandomizedLogisticRegression(BaseRandomizedLinearModel): """Randomized Logistic Regression Randomized Logistic Regression works by subsampling the training data and fitting a L1-penalized LogisticRegression model where the penalty of a random subset of coefficients has been scaled. By performing this double randomization several times, the method assigns high scores to features that are repeatedly selected across randomizations. This is known as stability selection. In short, features selected more often are considered good features. Read more in the :ref:`User Guide <randomized_l1>`. Parameters ---------- C : float, optional, default=1 The regularization parameter C in the LogisticRegression. scaling : float, optional, default=0.5 The s parameter used to randomly scale the penalty of different features (See :ref:`User Guide <randomized_l1>` for details ). Should be between 0 and 1. sample_fraction : float, optional, default=0.75 The fraction of samples to be used in each randomized design. Should be between 0 and 1. If 1, all samples are used. n_resampling : int, optional, default=200 Number of randomized models. selection_threshold : float, optional, default=0.25 The score above which features should be selected. fit_intercept : boolean, optional, default=True whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. This parameter is ignored when `fit_intercept` is set to False. When the regressors are normalized, note that this makes the hyperparameters learnt more robust and almost independent of the number of samples. The same property is not valid for standardized data. However, if you wish to standardize, please use `preprocessing.StandardScaler` before calling `fit` on an estimator with `normalize=False`. tol : float, optional, default=1e-3 tolerance for stopping criteria of LogisticRegression n_jobs : integer, optional Number of CPUs to use during the resampling. If '-1', use all the CPUs random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' memory : Instance of joblib.Memory or string Used for internal caching. By default, no caching is done. If a string is given, it is the path to the caching directory. Attributes ---------- scores_ : array, shape = [n_features] Feature scores between 0 and 1. all_scores_ : array, shape = [n_features, n_reg_parameter] Feature scores between 0 and 1 for all values of the regularization \ parameter. The reference article suggests ``scores_`` is the max \ of ``all_scores_``. Examples -------- >>> from sklearn.linear_model import RandomizedLogisticRegression >>> randomized_logistic = RandomizedLogisticRegression() Notes ----- See examples/linear_model/plot_sparse_recovery.py for an example. References ---------- Stability selection Nicolai Meinshausen, Peter Buhlmann Journal of the Royal Statistical Society: Series B Volume 72, Issue 4, pages 417-473, September 2010 DOI: 10.1111/j.1467-9868.2010.00740.x See also -------- RandomizedLasso, LogisticRegression """ def __init__(self, C=1, scaling=.5, sample_fraction=.75, n_resampling=200, selection_threshold=.25, tol=1e-3, fit_intercept=True, verbose=False, normalize=True, random_state=None, n_jobs=1, pre_dispatch='3*n_jobs', memory=Memory(cachedir=None, verbose=0)): self.C = C self.scaling = scaling self.sample_fraction = sample_fraction self.n_resampling = n_resampling self.fit_intercept = fit_intercept self.verbose = verbose self.normalize = normalize self.tol = tol self.random_state = random_state self.n_jobs = n_jobs self.selection_threshold = selection_threshold self.pre_dispatch = pre_dispatch self.memory = memory def _make_estimator_and_params(self, X, y): params = dict(C=self.C, tol=self.tol, fit_intercept=self.fit_intercept) return _randomized_logistic, params def _preprocess_data(self, X, y, fit_intercept, normalize=False): """Center the data in X but not in y""" X, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept, normalize=normalize) return X, y, X_offset, y, X_scale ############################################################################### # Stability paths def _lasso_stability_path(X, y, mask, weights, eps): "Inner loop of lasso_stability_path" X = X * weights[np.newaxis, :] X = X[safe_mask(X, mask), :] y = y[mask] alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0] alpha_min = eps * alpha_max # set for early stopping in path with warnings.catch_warnings(): warnings.simplefilter('ignore', ConvergenceWarning) alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False, alpha_min=alpha_min) # Scale alpha by alpha_max alphas /= alphas[0] # Sort alphas in assending order alphas = alphas[::-1] coefs = coefs[:, ::-1] # Get rid of the alphas that are too small mask = alphas >= eps # We also want to keep the first one: it should be close to the OLS # solution mask[0] = True alphas = alphas[mask] coefs = coefs[:, mask] return alphas, coefs def lasso_stability_path(X, y, scaling=0.5, random_state=None, n_resampling=200, n_grid=100, sample_fraction=0.75, eps=4 * np.finfo(np.float).eps, n_jobs=1, verbose=False): """Stabiliy path based on randomized Lasso estimates Read more in the :ref:`User Guide <randomized_l1>`. Parameters ---------- X : array-like, shape = [n_samples, n_features] training data. y : array-like, shape = [n_samples] target values. scaling : float, optional, default=0.5 The alpha parameter in the stability selection article used to randomly scale the features. Should be between 0 and 1. random_state : integer or numpy.random.RandomState, optional The generator used to randomize the design. n_resampling : int, optional, default=200 Number of randomized models. n_grid : int, optional, default=100 Number of grid points. The path is linearly reinterpolated on a grid between 0 and 1 before computing the scores. sample_fraction : float, optional, default=0.75 The fraction of samples to be used in each randomized design. Should be between 0 and 1. If 1, all samples are used. eps : float, optional Smallest value of alpha / alpha_max considered n_jobs : integer, optional Number of CPUs to use during the resampling. If '-1', use all the CPUs verbose : boolean or integer, optional Sets the verbosity amount Returns ------- alphas_grid : array, shape ~ [n_grid] The grid points between 0 and 1: alpha/alpha_max scores_path : array, shape = [n_features, n_grid] The scores for each feature along the path. Notes ----- See examples/linear_model/plot_sparse_recovery.py for an example. """ rng = check_random_state(random_state) if not (0 < scaling < 1): raise ValueError("Parameter 'scaling' should be between 0 and 1." " Got %r instead." % scaling) n_samples, n_features = X.shape paths = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_lasso_stability_path)( X, y, mask=rng.rand(n_samples) < sample_fraction, weights=1. - scaling * rng.randint(0, 2, size=(n_features,)), eps=eps) for k in range(n_resampling)) all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths])))) # Take approximately n_grid values stride = int(max(1, int(len(all_alphas) / float(n_grid)))) all_alphas = all_alphas[::stride] if not all_alphas[-1] == 1: all_alphas.append(1.) all_alphas = np.array(all_alphas) scores_path = np.zeros((n_features, len(all_alphas))) for alphas, coefs in paths: if alphas[0] != 0: alphas = np.r_[0, alphas] coefs = np.c_[np.ones((n_features, 1)), coefs] if alphas[-1] != all_alphas[-1]: alphas = np.r_[alphas, all_alphas[-1]] coefs = np.c_[coefs, np.zeros((n_features, 1))] scores_path += (interp1d(alphas, coefs, kind='nearest', bounds_error=False, fill_value=0, axis=-1)(all_alphas) != 0) scores_path /= n_resampling return all_alphas, scores_path
bsd-3-clause
totallybradical/temp_servo2
tests/wpt/web-platform-tests/old-tests/webdriver/user_input/click_test.py
141
10579
import os import sys import unittest sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../.."))) import base_test repo_root = os.path.abspath(os.path.join(__file__, "../../..")) sys.path.insert(1, os.path.join(repo_root, "tools", "webdriver")) from webdriver import exceptions, wait class ClickTest(base_test.WebDriverBaseTest): def setUp(self): self.wait = wait.WebDriverWait(self.driver, 5, ignored_exceptions = [exceptions.NoSuchAlertException]) self.driver.get(self.webserver.where_is('modal/res/alerts.html')) def tearDown(self): try: self.driver.switch_to_alert().dismiss() except exceptions.NoSuchAlertException: pass def test_click_div(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("div") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "div") def test_click_p(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("p") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "p") def test_click_h1(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("h1") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "h1") def test_click_pre(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("pre") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "pre") def test_click_ol(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("ol") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "ol") def test_click_ul(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("ul") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "ul") def test_click_a(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("a") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "a") def test_click_img(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("img") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "img") def test_click_video(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("video") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "video") def test_click_canvas(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("canvas") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "canvas") def test_click_progress(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("progress") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "progress") def test_click_textarea(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("textarea") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "textarea") def test_click_button(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("button") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "button") def test_click_svg(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("svg") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "svg") def test_click_input_range(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("input_range") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "input_range") def test_click_input_button(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("input_button") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "input_button") def test_click_input_submit(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("input_submit") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "input_submit") def test_click_input_reset(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("input_reset") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "input_reset") def test_click_input_checkbox(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("input_checkbox") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "input_checkbox") def test_click_input_radio(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("input_radio") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "input_radio") def test_click_input_text(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("input_text") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "input_text") def test_click_input_number(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("input_number") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "input_number") def test_click_input_tel(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("input_tel") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "input_tel") def test_click_input_url(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("input_url") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "input_url") def test_click_input_email(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("input_email") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "input_email") def test_click_input_search(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("input_search") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "input_search") def test_click_input_image(self): self.driver.get(self.webserver.where_is("user_input/res/click.html")) element = self.driver.find_element_by_id("input_image") element.click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.get_text() alert.accept() self.assertEquals(value, "input_image") if __name__ == "__main__": unittest.main()
mpl-2.0
luotao1/Paddle
python/paddle/fluid/tests/unittests/ir/inference/test_conv_bn_fuse_pass.py
2
6097
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from inference_pass_test import InferencePassTest import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.core import PassVersionChecker class ConvBnFusePassExplicitPaddingTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name="data", shape=[-1, 3, 64, 64], dtype="float32") conv_out = fluid.layers.conv2d( input=data, num_filters=6, filter_size=6, groups=3, padding=[1, 1, 1, 1], bias_attr=False, act=None) bn_out = fluid.layers.batch_norm(conv_out, is_test=True) self.feeds = { "data": np.random.random([1, 3, 64, 64]).astype("float32"), } self.fetch_list = [bn_out] def test_check_output(self): self.check_output() self.assertTrue(PassVersionChecker.IsCompatible('conv_bn_fuse_pass')) class ConvBnFusePassValidPaddingTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name="data", shape=[-1, 3, 64, 64], dtype="float32") conv_out = fluid.layers.conv2d( input=data, num_filters=6, filter_size=6, groups=3, padding='VALID', bias_attr=False, act=None) bn_out = fluid.layers.batch_norm(conv_out, is_test=True) self.feeds = { "data": np.random.random([1, 3, 64, 64]).astype("float32"), } self.fetch_list = [bn_out] def test_check_output(self): self.check_output() self.assertTrue(PassVersionChecker.IsCompatible('conv_bn_fuse_pass')) class ConvBnFusePassSamePaddingTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name="data", shape=[-1, 3, 64, 64], dtype="float32") conv_out = fluid.layers.conv2d( input=data, num_filters=6, filter_size=6, groups=3, padding='SAME', bias_attr=False, act=None) bn_out = fluid.layers.batch_norm(conv_out, is_test=True) self.feeds = { "data": np.random.random([1, 3, 64, 64]).astype("float32"), } self.fetch_list = [bn_out] def test_check_output(self): self.check_output() self.assertTrue(PassVersionChecker.IsCompatible('conv_bn_fuse_pass')) class ConvEltwiseAddBnFuseExplicitPaddingPass(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name="data", shape=[-1, 3, 64, 64], dtype="float32") conv_out = fluid.layers.conv2d( input=data, num_filters=6, filter_size=6, groups=3, padding=[1, 1, 1, 1], bias_attr=None, act=None) bn_out = fluid.layers.batch_norm(conv_out, is_test=True) self.feeds = { "data": np.random.random([1, 3, 64, 64]).astype("float32"), } self.fetch_list = [bn_out] def test_check_output(self): self.check_output() self.assertTrue( PassVersionChecker.IsCompatible('conv_eltwiseadd_bn_fuse_pass')) class ConvEltwiseAddBnFuseValidPaddingPass(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name="data", shape=[-1, 3, 64, 64], dtype="float32") conv_out = fluid.layers.conv2d( input=data, num_filters=6, filter_size=6, groups=3, padding='VALID', bias_attr=None, act=None) bn_out = fluid.layers.batch_norm(conv_out, is_test=True) self.feeds = { "data": np.random.random([1, 3, 64, 64]).astype("float32"), } self.fetch_list = [bn_out] def test_check_output(self): self.check_output() self.assertTrue( PassVersionChecker.IsCompatible('conv_eltwiseadd_bn_fuse_pass')) class ConvEltwiseAddBnFuseSamePaddingPass(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name="data", shape=[-1, 3, 64, 64], dtype="float32") conv_out = fluid.layers.conv2d( input=data, num_filters=6, filter_size=6, groups=3, padding='SAME', bias_attr=None, act=None) bn_out = fluid.layers.batch_norm(conv_out, is_test=True) self.feeds = { "data": np.random.random([1, 3, 64, 64]).astype("float32"), } self.fetch_list = [bn_out] def test_check_output(self): self.check_output() self.assertTrue( PassVersionChecker.IsCompatible('conv_eltwiseadd_bn_fuse_pass')) if __name__ == "__main__": unittest.main()
apache-2.0
kennethreitz/pipenv
pipenv/vendor/dateutil/easter.py
34
2684
# -*- coding: utf-8 -*- """ This module offers a generic easter computing method for any given year, using Western, Orthodox or Julian algorithms. """ import datetime __all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"] EASTER_JULIAN = 1 EASTER_ORTHODOX = 2 EASTER_WESTERN = 3 def easter(year, method=EASTER_WESTERN): """ This method was ported from the work done by GM Arts, on top of the algorithm by Claus Tondering, which was based in part on the algorithm of Ouding (1940), as quoted in "Explanatory Supplement to the Astronomical Almanac", P. Kenneth Seidelmann, editor. This algorithm implements three different easter calculation methods: 1 - Original calculation in Julian calendar, valid in dates after 326 AD 2 - Original method, with date converted to Gregorian calendar, valid in years 1583 to 4099 3 - Revised method, in Gregorian calendar, valid in years 1583 to 4099 as well These methods are represented by the constants: * ``EASTER_JULIAN = 1`` * ``EASTER_ORTHODOX = 2`` * ``EASTER_WESTERN = 3`` The default method is method 3. More about the algorithm may be found at: `GM Arts: Easter Algorithms <http://www.gmarts.org/index.php?go=415>`_ and `The Calendar FAQ: Easter <https://www.tondering.dk/claus/cal/easter.php>`_ """ if not (1 <= method <= 3): raise ValueError("invalid method") # g - Golden year - 1 # c - Century # h - (23 - Epact) mod 30 # i - Number of days from March 21 to Paschal Full Moon # j - Weekday for PFM (0=Sunday, etc) # p - Number of days from March 21 to Sunday on or before PFM # (-6 to 28 methods 1 & 3, to 56 for method 2) # e - Extra days to add for method 2 (converting Julian # date to Gregorian date) y = year g = y % 19 e = 0 if method < 3: # Old method i = (19*g + 15) % 30 j = (y + y//4 + i) % 7 if method == 2: # Extra dates to convert Julian to Gregorian date e = 10 if y > 1600: e = e + y//100 - 16 - (y//100 - 16)//4 else: # New method c = y//100 h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30 i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11)) j = (y + y//4 + i + 2 - c + c//4) % 7 # p can be from -6 to 56 corresponding to dates 22 March to 23 May # (later dates apply to method 2, although 23 May never actually occurs) p = i - j + e d = 1 + (p + 27 + (p + 6)//40) % 31 m = 3 + (p + 26)//30 return datetime.date(int(y), int(m), int(d))
mit
keflavich/scikit-image
skimage/io/_plugins/freeimage_plugin.py
26
27618
import ctypes import numpy import sys import os import os.path from numpy.compat import asbytes, asstr def _generate_candidate_libs(): # look for likely library files in the following dirs: lib_dirs = [os.path.dirname(__file__), '/lib', '/usr/lib', '/usr/local/lib', '/opt/local/lib', os.path.join(sys.prefix, 'lib'), os.path.join(sys.prefix, 'DLLs') ] if 'HOME' in os.environ: lib_dirs.append(os.path.join(os.environ['HOME'], 'lib')) lib_dirs = [ld for ld in lib_dirs if os.path.exists(ld)] lib_names = ['libfreeimage', 'freeimage'] # should be lower-case! # Now attempt to find libraries of that name in the given directory # (case-insensitive and without regard for extension) lib_paths = [] for lib_dir in lib_dirs: for lib_name in lib_names: files = os.listdir(lib_dir) lib_paths += [os.path.join(lib_dir, lib) for lib in files if lib.lower().startswith(lib_name) and not os.path.splitext(lib)[1] in ('.py', '.pyc', '.ini')] lib_paths = [lp for lp in lib_paths if os.path.exists(lp)] return lib_dirs, lib_paths if sys.platform == 'win32': LOADER = ctypes.windll FUNCTYPE = ctypes.WINFUNCTYPE else: LOADER = ctypes.cdll FUNCTYPE = ctypes.CFUNCTYPE def handle_errors(): global FT_ERROR_STR if FT_ERROR_STR: tmp = FT_ERROR_STR FT_ERROR_STR = None raise RuntimeError(tmp) FT_ERROR_STR = None # This MUST happen in module scope, or the function pointer is garbage # collected, leading to a segfault when error_handler is called. @FUNCTYPE(None, ctypes.c_int, ctypes.c_char_p) def c_error_handler(fif, message): global FT_ERROR_STR FT_ERROR_STR = 'FreeImage error: %s' % message def load_freeimage(): freeimage = None errors = [] # First try a few bare library names that ctypes might be able to find # in the default locations for each platform. Win DLL names don't need the # extension, but other platforms do. bare_libs = ['FreeImage', 'libfreeimage.dylib', 'libfreeimage.so', 'libfreeimage.so.3'] lib_dirs, lib_paths = _generate_candidate_libs() lib_paths = bare_libs + lib_paths for lib in lib_paths: try: freeimage = LOADER.LoadLibrary(lib) break except Exception: if lib not in bare_libs: # Don't record errors when it couldn't load the library from # a bare name -- this fails often, and doesn't provide any # useful debugging information anyway, beyond "couldn't find # library..." # Get exception instance in Python 2.x/3.x compatible manner e_type, e_value, e_tb = sys.exc_info() del e_tb errors.append((lib, e_value)) if freeimage is None: if errors: # No freeimage library loaded, and load-errors reported for some # candidate libs err_txt = ['%s:\n%s' % (l, str(e)) for l, e in errors] raise RuntimeError('One or more FreeImage libraries were found, ' 'but could not be loaded due to the following ' 'errors:\n\n\n'.join(err_txt)) else: # No errors, because no potential libraries found at all! raise RuntimeError('Could not find a FreeImage library in any of:' '\n\n'.join(lib_dirs)) # FreeImage found freeimage.FreeImage_SetOutputMessage(c_error_handler) return freeimage _FI = load_freeimage() API = { # All we're doing here is telling ctypes that some of the FreeImage # functions return pointers instead of integers. (On 64-bit systems, # without this information the pointers get truncated and crashes result). # There's no need to list functions that return ints, or the types of the # parameters to these or other functions -- that's fine to do implicitly. # Note that the ctypes immediately converts the returned void_p back to a # python int again! This is really not helpful, because then passing it # back to another library call will cause truncation-to-32-bits on 64-bit # systems. Thanks, ctypes! So after these calls one must immediately # re-wrap the int as a c_void_p if it is to be passed back into FreeImage. 'FreeImage_AllocateT': (ctypes.c_void_p, None), 'FreeImage_FindFirstMetadata': (ctypes.c_void_p, None), 'FreeImage_GetBits': (ctypes.c_void_p, None), 'FreeImage_GetPalette': (ctypes.c_void_p, None), 'FreeImage_GetTagKey': (ctypes.c_char_p, None), 'FreeImage_GetTagValue': (ctypes.c_void_p, None), 'FreeImage_Load': (ctypes.c_void_p, None), 'FreeImage_LockPage': (ctypes.c_void_p, None), 'FreeImage_OpenMultiBitmap': (ctypes.c_void_p, None) } # Albert's ctypes pattern def register_api(lib, api): for f, (restype, argtypes) in api.items(): func = getattr(lib, f) func.restype = restype func.argtypes = argtypes register_api(_FI, API) class FiTypes(object): FIT_UNKNOWN = 0 FIT_BITMAP = 1 FIT_UINT16 = 2 FIT_INT16 = 3 FIT_UINT32 = 4 FIT_INT32 = 5 FIT_FLOAT = 6 FIT_DOUBLE = 7 FIT_COMPLEX = 8 FIT_RGB16 = 9 FIT_RGBA16 = 10 FIT_RGBF = 11 FIT_RGBAF = 12 dtypes = {FIT_BITMAP: numpy.uint8, FIT_UINT16: numpy.uint16, FIT_INT16: numpy.int16, FIT_UINT32: numpy.uint32, FIT_INT32: numpy.int32, FIT_FLOAT: numpy.float32, FIT_DOUBLE: numpy.float64, FIT_COMPLEX: numpy.complex128, FIT_RGB16: numpy.uint16, FIT_RGBA16: numpy.uint16, FIT_RGBF: numpy.float32, FIT_RGBAF: numpy.float32, } fi_types = {(numpy.dtype('uint8'), 1): FIT_BITMAP, (numpy.dtype('uint8'), 3): FIT_BITMAP, (numpy.dtype('uint8'), 4): FIT_BITMAP, (numpy.dtype('uint16'), 1): FIT_UINT16, (numpy.dtype('int16'), 1): FIT_INT16, (numpy.dtype('uint32'), 1): FIT_UINT32, (numpy.dtype('int32'), 1): FIT_INT32, (numpy.dtype('float32'), 1): FIT_FLOAT, (numpy.dtype('float64'), 1): FIT_DOUBLE, (numpy.dtype('complex128'), 1): FIT_COMPLEX, (numpy.dtype('uint16'), 3): FIT_RGB16, (numpy.dtype('uint16'), 4): FIT_RGBA16, (numpy.dtype('float32'), 3): FIT_RGBF, (numpy.dtype('float32'), 4): FIT_RGBAF, } extra_dims = {FIT_UINT16: [], FIT_INT16: [], FIT_UINT32: [], FIT_INT32: [], FIT_FLOAT: [], FIT_DOUBLE: [], FIT_COMPLEX: [], FIT_RGB16: [3], FIT_RGBA16: [4], FIT_RGBF: [3], FIT_RGBAF: [4], } @classmethod def get_type_and_shape(cls, bitmap): w = _FI.FreeImage_GetWidth(bitmap) handle_errors() h = _FI.FreeImage_GetHeight(bitmap) handle_errors() fi_type = _FI.FreeImage_GetImageType(bitmap) handle_errors() if not fi_type: raise ValueError('Unknown image pixel type') dtype = cls.dtypes[fi_type] if fi_type == cls.FIT_BITMAP: bpp = _FI.FreeImage_GetBPP(bitmap) handle_errors() if bpp == 8: extra_dims = [] elif bpp == 24: extra_dims = [3] elif bpp == 32: extra_dims = [4] else: raise ValueError('Cannot convert %d BPP bitmap' % bpp) else: extra_dims = cls.extra_dims[fi_type] return numpy.dtype(dtype), extra_dims + [w, h] class IoFlags(object): # loading: load the image header only (not supported by all plugins) FIF_LOAD_NOPIXELS = 0x8000 BMP_DEFAULT = 0 BMP_SAVE_RLE = 1 CUT_DEFAULT = 0 DDS_DEFAULT = 0 EXR_DEFAULT = 0 # save data as half with piz-based wavelet compression EXR_FLOAT = 0x0001 # save data as float instead of half (not recommended) EXR_NONE = 0x0002 # save with no compression EXR_ZIP = 0x0004 # save with zlib compression, in blocks of 16 scan lines EXR_PIZ = 0x0008 # save with piz-based wavelet compression EXR_PXR24 = 0x0010 # save with lossy 24-bit float compression # save with lossy 44% float compression (22% when combined with EXR_LC) EXR_B44 = 0x0020 # one luminance and two chroma channels rather than as RGB (lossy) EXR_LC = 0x0040 FAXG3_DEFAULT = 0 GIF_DEFAULT = 0 # Load as 256 color image with ununsed palette entries if 16 or 2 color GIF_LOAD256 = 1 # 'Play' the GIF generating each frame (as 32bpp) instead of raw frame data GIF_PLAYBACK = 2 HDR_DEFAULT = 0 ICO_DEFAULT = 0 # convert to 32bpp then add an alpha channel from the AND-mask when loading ICO_MAKEALPHA = 1 IFF_DEFAULT = 0 J2K_DEFAULT = 0 # save with a 16:1 rate JP2_DEFAULT = 0 # save with a 16:1 rate # loading (see JPEG_FAST) # saving (see JPEG_QUALITYGOOD|JPEG_SUBSAMPLING_420) JPEG_DEFAULT = 0 # load the file as fast as possible, sacrificing some quality JPEG_FAST = 0x0001 # load the file with the best quality, sacrificing some speed JPEG_ACCURATE = 0x0002 # load separated CMYK "as is" (use | to combine with other load flags) JPEG_CMYK = 0x0004 # load and rotate according to Exif 'Orientation' tag if available JPEG_EXIFROTATE = 0x0008 JPEG_QUALITYSUPERB = 0x80 # save with superb quality (100:1) JPEG_QUALITYGOOD = 0x0100 # save with good quality (75:1) JPEG_QUALITYNORMAL = 0x0200 # save with normal quality (50:1) JPEG_QUALITYAVERAGE = 0x0400 # save with average quality (25:1) JPEG_QUALITYBAD = 0x0800 # save with bad quality (10:1) # save as a progressive-JPEG (use | to combine with other save flags) JPEG_PROGRESSIVE = 0x2000 # save with high 4x1 chroma subsampling (4:1:1) JPEG_SUBSAMPLING_411 = 0x1000 # save with medium 2x2 medium chroma subsampling (4:2:0) - default value JPEG_SUBSAMPLING_420 = 0x4000 # save with low 2x1 chroma subsampling (4:2:2) JPEG_SUBSAMPLING_422 = 0x8000 JPEG_SUBSAMPLING_444 = 0x10000 # save with no chroma subsampling (4:4:4) # compute optimal Huffman coding tables (can reduce file size a few %) JPEG_OPTIMIZE = 0x20000 # on saving, JPEG_BASELINE = 0x40000 # save basic JPEG, without metadata or any markers KOALA_DEFAULT = 0 LBM_DEFAULT = 0 MNG_DEFAULT = 0 PCD_DEFAULT = 0 PCD_BASE = 1 # load the bitmap sized 768 x 512 PCD_BASEDIV4 = 2 # load the bitmap sized 384 x 256 PCD_BASEDIV16 = 3 # load the bitmap sized 192 x 128 PCX_DEFAULT = 0 PFM_DEFAULT = 0 PICT_DEFAULT = 0 PNG_DEFAULT = 0 PNG_IGNOREGAMMA = 1 # loading: avoid gamma correction # save using ZLib level 1 compression flag (default value is 6) PNG_Z_BEST_SPEED = 0x0001 # save using ZLib level 6 compression flag (default recommended value) PNG_Z_DEFAULT_COMPRESSION = 0x0006 # save using ZLib level 9 compression flag (default value is 6) PNG_Z_BEST_COMPRESSION = 0x0009 PNG_Z_NO_COMPRESSION = 0x0100 # save without ZLib compression # save using Adam7 interlacing (use | to combine with other save flags) PNG_INTERLACED = 0x0200 PNM_DEFAULT = 0 PNM_SAVE_RAW = 0 # Writer saves in RAW format (i.e. P4, P5 or P6) PNM_SAVE_ASCII = 1 # Writer saves in ASCII format (i.e. P1, P2 or P3) PSD_DEFAULT = 0 PSD_CMYK = 1 # reads tags for separated CMYK (default converts to RGB) PSD_LAB = 2 # reads tags for CIELab (default is conversion to RGB) RAS_DEFAULT = 0 RAW_DEFAULT = 0 # load the file as linear RGB 48-bit # try to load embedded JPEG preview from Exif Data or default to RGB 24-bit RAW_PREVIEW = 1 RAW_DISPLAY = 2 # load the file as RGB 24-bit SGI_DEFAULT = 0 TARGA_DEFAULT = 0 TARGA_LOAD_RGB888 = 1 # Convert RGB555 and ARGB8888 -> RGB888. TARGA_SAVE_RLE = 2 # Save with RLE compression TIFF_DEFAULT = 0 # reads/stores tags for separated CMYK # (use | to combine with compression flags) TIFF_CMYK = 0x0001 TIFF_PACKBITS = 0x0100 # save using PACKBITS compression TIFF_DEFLATE = 0x0200 # save using DEFLATE (a.k.a. ZLIB) compression TIFF_ADOBE_DEFLATE = 0x0400 # save using ADOBE DEFLATE compression TIFF_NONE = 0x0800 # save without any compression TIFF_CCITTFAX3 = 0x1000 # save using CCITT Group 3 fax encoding TIFF_CCITTFAX4 = 0x2000 # save using CCITT Group 4 fax encoding TIFF_LZW = 0x4000 # save using LZW compression TIFF_JPEG = 0x8000 # save using JPEG compression TIFF_LOGLUV = 0x10000 # save using LogLuv compression WBMP_DEFAULT = 0 XBM_DEFAULT = 0 XPM_DEFAULT = 0 class MetadataModels(object): FIMD_COMMENTS = 0 FIMD_EXIF_MAIN = 1 FIMD_EXIF_EXIF = 2 FIMD_EXIF_GPS = 3 FIMD_EXIF_MAKERNOTE = 4 FIMD_EXIF_INTEROP = 5 FIMD_IPTC = 6 FIMD_XMP = 7 FIMD_GEOTIFF = 8 FIMD_ANIMATION = 9 class MetadataDatatype(object): FIDT_BYTE = 1 # 8-bit unsigned integer FIDT_ASCII = 2 # 8-bit bytes w/ last byte null FIDT_SHORT = 3 # 16-bit unsigned integer FIDT_LONG = 4 # 32-bit unsigned integer FIDT_RATIONAL = 5 # 64-bit unsigned fraction FIDT_SBYTE = 6 # 8-bit signed integer FIDT_UNDEFINED = 7 # 8-bit untyped data FIDT_SSHORT = 8 # 16-bit signed integer FIDT_SLONG = 9 # 32-bit signed integer FIDT_SRATIONAL = 10 # 64-bit signed fraction FIDT_FLOAT = 11 # 32-bit IEEE floating point FIDT_DOUBLE = 12 # 64-bit IEEE floating point FIDT_IFD = 13 # 32-bit unsigned integer (offset) FIDT_PALETTE = 14 # 32-bit RGBQUAD FIDT_LONG8 = 16 # 64-bit unsigned integer FIDT_SLONG8 = 17 # 64-bit signed integer FIDT_IFD8 = 18 # 64-bit unsigned integer (offset) dtypes = {FIDT_BYTE: numpy.uint8, FIDT_SHORT: numpy.uint16, FIDT_LONG: numpy.uint32, FIDT_RATIONAL: [('numerator', numpy.uint32), ('denominator', numpy.uint32)], FIDT_SBYTE: numpy.int8, FIDT_UNDEFINED: numpy.uint8, FIDT_SSHORT: numpy.int16, FIDT_SLONG: numpy.int32, FIDT_SRATIONAL: [('numerator', numpy.int32), ('denominator', numpy.int32)], FIDT_FLOAT: numpy.float32, FIDT_DOUBLE: numpy.float64, FIDT_IFD: numpy.uint32, FIDT_PALETTE: [('R', numpy.uint8), ('G', numpy.uint8), ('B', numpy.uint8), ('A', numpy.uint8)], FIDT_LONG8: numpy.uint64, FIDT_SLONG8: numpy.int64, FIDT_IFD8: numpy.uint64, } def _process_bitmap(filename, flags, process_func): filename = asbytes(filename) ftype = _FI.FreeImage_GetFileType(filename, 0) handle_errors() if ftype == -1: raise ValueError('Cannot determine type of file %s' % filename) bitmap = _FI.FreeImage_Load(ftype, filename, flags) handle_errors() bitmap = ctypes.c_void_p(bitmap) if not bitmap: raise ValueError('Could not load file %s' % filename) try: return process_func(bitmap) finally: _FI.FreeImage_Unload(bitmap) handle_errors() def read(filename, flags=0): """Read an image to a numpy array of shape (height, width) for greyscale images, or shape (height, width, nchannels) for RGB or RGBA images. The `flags` parameter should be one or more values from the IoFlags class defined in this module, or-ed together with | as appropriate. (See the source-code comments for more details.) """ return _process_bitmap(filename, flags, _array_from_bitmap) def read_metadata(filename): """Return a dict containing all image metadata. Returned dict maps (metadata_model, tag_name) keys to tag values, where metadata_model is a string name based on the FreeImage "metadata models" defined in the class MetadataModels. """ flags = IoFlags.FIF_LOAD_NOPIXELS return _process_bitmap(filename, flags, _read_metadata) def _process_multipage(filename, flags, process_func): filename = asbytes(filename) ftype = _FI.FreeImage_GetFileType(filename, 0) handle_errors() if ftype == -1: raise ValueError('Cannot determine type of file %s' % filename) create_new = False read_only = True keep_cache_in_memory = True multibitmap = _FI.FreeImage_OpenMultiBitmap( ftype, filename, create_new, read_only, keep_cache_in_memory, flags) handle_errors() multibitmap = ctypes.c_void_p(multibitmap) if not multibitmap: raise ValueError('Could not open %s as multi-page image.' % filename) try: pages = _FI.FreeImage_GetPageCount(multibitmap) handle_errors() out = [] for i in range(pages): bitmap = _FI.FreeImage_LockPage(multibitmap, i) handle_errors() bitmap = ctypes.c_void_p(bitmap) if not bitmap: raise ValueError('Could not open %s as a multi-page image.' % filename) try: out.append(process_func(bitmap)) finally: _FI.FreeImage_UnlockPage(multibitmap, bitmap, False) handle_errors() return out finally: _FI.FreeImage_CloseMultiBitmap(multibitmap, 0) handle_errors() def read_multipage(filename, flags=0): """Read a multipage image to a list of numpy arrays, where each array is of shape (height, width) for greyscale images, or shape (height, width, nchannels) for RGB or RGBA images. The `flags` parameter should be one or more values from the IoFlags class defined in this module, or-ed together with | as appropriate. (See the source-code comments for more details.) """ return _process_multipage(filename, flags, _array_from_bitmap) def read_multipage_metadata(filename): """Read a multipage image to a list of metadata dicts, one dict for each page. The dict format is as in read_metadata(). """ flags = IoFlags.FIF_LOAD_NOPIXELS return _process_multipage(filename, flags, _read_metadata) def _wrap_bitmap_bits_in_array(bitmap, shape, dtype): """Return an ndarray view on the data in a FreeImage bitmap. Only valid for as long as the bitmap is loaded (if single page) / locked in memory (if multipage). """ pitch = _FI.FreeImage_GetPitch(bitmap) handle_errors() height = shape[-1] byte_size = height * pitch itemsize = dtype.itemsize if len(shape) == 3: strides = (itemsize, shape[0] * itemsize, pitch) else: strides = (itemsize, pitch) bits = _FI.FreeImage_GetBits(bitmap) handle_errors() array = numpy.ndarray( shape, dtype=dtype, buffer=(ctypes.c_char * byte_size).from_address(bits), strides=strides) return array def _array_from_bitmap(bitmap): """Convert a FreeImage bitmap pointer to a numpy array. """ dtype, shape = FiTypes.get_type_and_shape(bitmap) array = _wrap_bitmap_bits_in_array(bitmap, shape, dtype) # swizzle the color components and flip the scanlines to go from # FreeImage's BGR[A] and upside-down internal memory format to something # more normal def n(arr): return arr[..., ::-1].T if len(shape) == 3 and _FI.FreeImage_IsLittleEndian() and \ dtype.type == numpy.uint8: b = n(array[0]) g = n(array[1]) r = n(array[2]) if shape[0] == 3: handle_errors() return numpy.dstack((r, g, b)) elif shape[0] == 4: a = n(array[3]) return numpy.dstack((r, g, b, a)) else: raise ValueError('Cannot handle images of shape %s' % shape) # We need to copy because array does *not* own its memory # after bitmap is freed. return n(array).copy() def _read_metadata(bitmap): metadata = {} models = [(name[5:], number) for name, number in MetadataModels.__dict__.items() if name.startswith('FIMD_')] tag = ctypes.c_void_p() for model_name, number in models: mdhandle = _FI.FreeImage_FindFirstMetadata(number, bitmap, ctypes.byref(tag)) handle_errors() mdhandle = ctypes.c_void_p(mdhandle) if mdhandle: more = True while more: tag_name = asstr(_FI.FreeImage_GetTagKey(tag)) tag_type = _FI.FreeImage_GetTagType(tag) byte_size = _FI.FreeImage_GetTagLength(tag) handle_errors() char_ptr = ctypes.c_char * byte_size tag_str = char_ptr.from_address(_FI.FreeImage_GetTagValue(tag)) handle_errors() if tag_type == MetadataDatatype.FIDT_ASCII: tag_val = asstr(tag_str.value) else: tag_val = numpy.fromstring( tag_str, dtype=MetadataDatatype.dtypes[tag_type]) if len(tag_val) == 1: tag_val = tag_val[0] metadata[(model_name, tag_name)] = tag_val more = _FI.FreeImage_FindNextMetadata(mdhandle, ctypes.byref(tag)) handle_errors() _FI.FreeImage_FindCloseMetadata(mdhandle) handle_errors() return metadata def write(array, filename, flags=0): """Write a (height, width) or (height, width, nchannels) array to a greyscale, RGB, or RGBA image, with file type deduced from the filename. The `flags` parameter should be one or more values from the IoFlags class defined in this module, or-ed together with | as appropriate. (See the source-code comments for more details.) """ array = numpy.asarray(array) filename = asbytes(filename) ftype = _FI.FreeImage_GetFIFFromFilename(filename) handle_errors() if ftype == -1: raise ValueError('Cannot determine type for %s' % filename) bitmap, fi_type = _array_to_bitmap(array) try: if fi_type == FiTypes.FIT_BITMAP: can_write = _FI.FreeImage_FIFSupportsExportBPP( ftype, _FI.FreeImage_GetBPP(bitmap)) handle_errors() else: can_write = _FI.FreeImage_FIFSupportsExportType(ftype, fi_type) handle_errors() if not can_write: raise TypeError('Cannot save image of this format ' 'to this file type') res = _FI.FreeImage_Save(ftype, bitmap, filename, flags) handle_errors() if not res: raise RuntimeError('Could not save image properly.') finally: _FI.FreeImage_Unload(bitmap) handle_errors() def write_multipage(arrays, filename, flags=0): """Write a list of (height, width) or (height, width, nchannels) arrays to a multipage greyscale, RGB, or RGBA image, with file type deduced from the filename. The `flags` parameter should be one or more values from the IoFlags class defined in this module, or-ed together with | as appropriate. (See the source-code comments for more details.) """ filename = asbytes(filename) ftype = _FI.FreeImage_GetFIFFromFilename(filename) if ftype == -1: raise ValueError('Cannot determine type of file %s' % filename) create_new = True read_only = False keep_cache_in_memory = True multibitmap = _FI.FreeImage_OpenMultiBitmap(ftype, filename, create_new, read_only, keep_cache_in_memory, 0) multibitmap = ctypes.c_void_p(multibitmap) if not multibitmap: raise ValueError('Could not open %s for writing multi-page image.' % filename) try: for array in arrays: array = numpy.asarray(array) bitmap, fi_type = _array_to_bitmap(array) _FI.FreeImage_AppendPage(multibitmap, bitmap) finally: _FI.FreeImage_CloseMultiBitmap(multibitmap, flags) # 4-byte quads of 0,v,v,v from 0,0,0,0 to 0,255,255,255 _GREY_PALETTE = numpy.arange(0, 0x01000000, 0x00010101, dtype=numpy.uint32) def _array_to_bitmap(array): """Allocate a FreeImage bitmap and copy a numpy array into it. """ shape = array.shape dtype = array.dtype r, c = shape[:2] if len(shape) == 2: n_channels = 1 w_shape = (c, r) elif len(shape) == 3: n_channels = shape[2] w_shape = (n_channels, c, r) else: n_channels = shape[0] try: fi_type = FiTypes.fi_types[(dtype, n_channels)] except KeyError: raise ValueError('Cannot write arrays of given type and shape.') itemsize = array.dtype.itemsize bpp = 8 * itemsize * n_channels bitmap = _FI.FreeImage_AllocateT(fi_type, c, r, bpp, 0, 0, 0) bitmap = ctypes.c_void_p(bitmap) if not bitmap: raise RuntimeError('Could not allocate image for storage') try: def n(arr): # normalise to freeimage's in-memory format return arr.T[..., ::-1] wrapped_array = _wrap_bitmap_bits_in_array(bitmap, w_shape, dtype) # swizzle the color components and flip the scanlines to go to # FreeImage's BGR[A] and upside-down internal memory format if len(shape) == 3 and _FI.FreeImage_IsLittleEndian(): r = array[:, :, 0] g = array[:, :, 1] b = array[:, :, 2] if dtype.type == numpy.uint8: wrapped_array[0] = n(b) wrapped_array[1] = n(g) wrapped_array[2] = n(r) elif dtype.type == numpy.uint16: wrapped_array[0] = n(r) wrapped_array[1] = n(g) wrapped_array[2] = n(b) if shape[2] == 4: a = array[:, :, 3] wrapped_array[3] = n(a) else: wrapped_array[:] = n(array) if len(shape) == 2 and dtype.type == numpy.uint8: palette = _FI.FreeImage_GetPalette(bitmap) palette = ctypes.c_void_p(palette) if not palette: raise RuntimeError('Could not get image palette') ctypes.memmove(palette, _GREY_PALETTE.ctypes.data, 1024) return bitmap, fi_type except: _FI.FreeImage_Unload(bitmap) raise def imread(filename): """ img = imread(filename) Reads an image from file `filename` Parameters ---------- filename : file name Returns ------- img : ndarray """ img = read(filename) return img def imsave(filename, img): ''' imsave(filename, img) Save image to disk Image type is inferred from filename Parameters ---------- filename : file name img : image to be saved as nd array ''' write(img, filename)
bsd-3-clause
DivineHime/seishirou
lib/chardet/utf8prober.py
290
2766
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .charsetprober import CharSetProber from .enums import ProbingState, MachineState from .codingstatemachine import CodingStateMachine from .mbcssm import UTF8_SM_MODEL class UTF8Prober(CharSetProber): ONE_CHAR_PROB = 0.5 def __init__(self): super(UTF8Prober, self).__init__() self.coding_sm = CodingStateMachine(UTF8_SM_MODEL) self._num_mb_chars = None self.reset() def reset(self): super(UTF8Prober, self).reset() self.coding_sm.reset() self._num_mb_chars = 0 @property def charset_name(self): return "utf-8" @property def language(self): return "" def feed(self, byte_str): for c in byte_str: coding_state = self.coding_sm.next_state(c) if coding_state == MachineState.ERROR: self._state = ProbingState.NOT_ME break elif coding_state == MachineState.ITS_ME: self._state = ProbingState.FOUND_IT break elif coding_state == MachineState.START: if self.coding_sm.get_current_charlen() >= 2: self._num_mb_chars += 1 if self.state == ProbingState.DETECTING: if self.get_confidence() > self.SHORTCUT_THRESHOLD: self._state = ProbingState.FOUND_IT return self.state def get_confidence(self): unlike = 0.99 if self._num_mb_chars < 6: unlike *= self.ONE_CHAR_PROB ** self._num_mb_chars return 1.0 - unlike else: return unlike
gpl-3.0
xuxiao19910803/edx
lms/lib/courseware_search/lms_result_processor.py
61
2677
""" This file contains implementation override of SearchResultProcessor which will allow * Blends in "location" property * Confirms user access to object """ from django.core.urlresolvers import reverse from opaque_keys.edx.locations import SlashSeparatedCourseKey from search.result_processor import SearchResultProcessor from xmodule.modulestore.django import modulestore from courseware.access import has_access class LmsSearchResultProcessor(SearchResultProcessor): """ SearchResultProcessor for LMS Search """ _course_key = None _course_name = None _usage_key = None _module_store = None _module_temp_dictionary = {} def get_course_key(self): """ fetch course key object from string representation - retain result for subsequent uses """ if self._course_key is None: self._course_key = SlashSeparatedCourseKey.from_deprecated_string(self._results_fields["course"]) return self._course_key def get_usage_key(self): """ fetch usage key for component from string representation - retain result for subsequent uses """ if self._usage_key is None: self._usage_key = self.get_course_key().make_usage_key_from_deprecated_string(self._results_fields["id"]) return self._usage_key def get_module_store(self): """ module store accessor - retain result for subsequent uses """ if self._module_store is None: self._module_store = modulestore() return self._module_store def get_item(self, usage_key): """ fetch item from the modulestore - don't refetch if we've already retrieved it beforehand """ if usage_key not in self._module_temp_dictionary: self._module_temp_dictionary[usage_key] = self.get_module_store().get_item(usage_key) return self._module_temp_dictionary[usage_key] @property def url(self): """ Property to display the url for the given location, useful for allowing navigation """ if "course" not in self._results_fields or "id" not in self._results_fields: raise ValueError("Must have course and id in order to build url") return reverse( "jump_to", kwargs={"course_id": self._results_fields["course"], "location": self._results_fields["id"]} ) def should_remove(self, user): """ Test to see if this result should be removed due to access restriction """ user_has_access = has_access( user, "load", self.get_item(self.get_usage_key()), self.get_course_key() ) return not user_has_access
agpl-3.0
jrichte43/ProjectEuler
Problem-0121/solutions.py
1
1722
__problem_title__ = "Disc game prize fund" __problem_url___ = "https://projecteuler.net/problem=121" __problem_description__ = "A bag contains one red disc and one blue disc. In a game of chance a " \ "player takes a disc at random and its colour is noted. After each " \ "turn the disc is returned to the bag, an extra red disc is added, and " \ "another disc is taken at random. The player pays £1 to play and wins " \ "if they have taken more blue discs than red discs at the end of the " \ "game. If the game is played for four turns, the probability of a " \ "player winning is exactly 11/120, and so the maximum prize fund the " \ "banker should allocate for winning in this game would be £10 before " \ "they would expect to incur a loss. Note that any payout will be a " \ "whole number of pounds and also includes the original £1 paid to play " \ "the game, so in the example given the player actually wins £9. Find " \ "the maximum prize fund that should be allocated to a single game in " \ "which fifteen turns are played." import timeit class Solution(): @staticmethod def solution1(): pass @staticmethod def time_solutions(): setup = 'from __main__ import Solution' print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1)) if __name__ == '__main__': s = Solution() print(s.solution1()) s.time_solutions()
gpl-3.0
pleaseproject/python-for-android
python-build/python-libs/gdata/src/gdata/tlslite/utils/OpenSSL_AES.py
359
1822
"""OpenSSL/M2Crypto AES implementation.""" from cryptomath import * from AES import * if m2cryptoLoaded: def new(key, mode, IV): return OpenSSL_AES(key, mode, IV) class OpenSSL_AES(AES): def __init__(self, key, mode, IV): AES.__init__(self, key, mode, IV, "openssl") self.key = key self.IV = IV def _createContext(self, encrypt): context = m2.cipher_ctx_new() if len(self.key)==16: cipherType = m2.aes_128_cbc() if len(self.key)==24: cipherType = m2.aes_192_cbc() if len(self.key)==32: cipherType = m2.aes_256_cbc() m2.cipher_init(context, cipherType, self.key, self.IV, encrypt) return context def encrypt(self, plaintext): AES.encrypt(self, plaintext) context = self._createContext(1) ciphertext = m2.cipher_update(context, plaintext) m2.cipher_ctx_free(context) self.IV = ciphertext[-self.block_size:] return ciphertext def decrypt(self, ciphertext): AES.decrypt(self, ciphertext) context = self._createContext(0) #I think M2Crypto has a bug - it fails to decrypt and return the last block passed in. #To work around this, we append sixteen zeros to the string, below: plaintext = m2.cipher_update(context, ciphertext+('\0'*16)) #If this bug is ever fixed, then plaintext will end up having a garbage #plaintext block on the end. That's okay - the below code will discard it. plaintext = plaintext[:len(ciphertext)] m2.cipher_ctx_free(context) self.IV = ciphertext[-self.block_size:] return plaintext
apache-2.0
pleaseproject/python-for-android
python-build/python-libs/gdata/build/lib/atom/data.py
136
8060
#!/usr/bin/env python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This module is used for version 2 of the Google Data APIs. __author__ = 'j.s@google.com (Jeff Scudder)' import atom.core ATOM_TEMPLATE = '{http://www.w3.org/2005/Atom}%s' APP_TEMPLATE_V1 = '{http://purl.org/atom/app#}%s' APP_TEMPLATE_V2 = '{http://www.w3.org/2007/app}%s' class Name(atom.core.XmlElement): """The atom:name element.""" _qname = ATOM_TEMPLATE % 'name' class Email(atom.core.XmlElement): """The atom:email element.""" _qname = ATOM_TEMPLATE % 'email' class Uri(atom.core.XmlElement): """The atom:uri element.""" _qname = ATOM_TEMPLATE % 'uri' class Person(atom.core.XmlElement): """A foundation class which atom:author and atom:contributor extend. A person contains information like name, email address, and web page URI for an author or contributor to an Atom feed. """ name = Name email = Email uri = Uri class Author(Person): """The atom:author element. An author is a required element in Feed unless each Entry contains an Author. """ _qname = ATOM_TEMPLATE % 'author' class Contributor(Person): """The atom:contributor element.""" _qname = ATOM_TEMPLATE % 'contributor' class Link(atom.core.XmlElement): """The atom:link element.""" _qname = ATOM_TEMPLATE % 'link' href = 'href' rel = 'rel' type = 'type' hreflang = 'hreflang' title = 'title' length = 'length' class Generator(atom.core.XmlElement): """The atom:generator element.""" _qname = ATOM_TEMPLATE % 'generator' uri = 'uri' version = 'version' class Text(atom.core.XmlElement): """A foundation class from which atom:title, summary, etc. extend. This class should never be instantiated. """ type = 'type' class Title(Text): """The atom:title element.""" _qname = ATOM_TEMPLATE % 'title' class Subtitle(Text): """The atom:subtitle element.""" _qname = ATOM_TEMPLATE % 'subtitle' class Rights(Text): """The atom:rights element.""" _qname = ATOM_TEMPLATE % 'rights' class Summary(Text): """The atom:summary element.""" _qname = ATOM_TEMPLATE % 'summary' class Content(Text): """The atom:content element.""" _qname = ATOM_TEMPLATE % 'content' src = 'src' class Category(atom.core.XmlElement): """The atom:category element.""" _qname = ATOM_TEMPLATE % 'category' term = 'term' scheme = 'scheme' label = 'label' class Id(atom.core.XmlElement): """The atom:id element.""" _qname = ATOM_TEMPLATE % 'id' class Icon(atom.core.XmlElement): """The atom:icon element.""" _qname = ATOM_TEMPLATE % 'icon' class Logo(atom.core.XmlElement): """The atom:logo element.""" _qname = ATOM_TEMPLATE % 'logo' class Draft(atom.core.XmlElement): """The app:draft element which indicates if this entry should be public.""" _qname = (APP_TEMPLATE_V1 % 'draft', APP_TEMPLATE_V2 % 'draft') class Control(atom.core.XmlElement): """The app:control element indicating restrictions on publication. The APP control element may contain a draft element indicating whether or not this entry should be publicly available. """ _qname = (APP_TEMPLATE_V1 % 'control', APP_TEMPLATE_V2 % 'control') draft = Draft class Date(atom.core.XmlElement): """A parent class for atom:updated, published, etc.""" class Updated(Date): """The atom:updated element.""" _qname = ATOM_TEMPLATE % 'updated' class Published(Date): """The atom:published element.""" _qname = ATOM_TEMPLATE % 'published' class LinkFinder(object): """An "interface" providing methods to find link elements Entry elements often contain multiple links which differ in the rel attribute or content type. Often, developers are interested in a specific type of link so this class provides methods to find specific classes of links. This class is used as a mixin in Atom entries and feeds. """ def find_url(self, rel): """Returns the URL in a link with the desired rel value.""" for link in self.link: if link.rel == rel and link.href: return link.href return None FindUrl = find_url def get_link(self, rel): """Returns a link object which has the desired rel value. If you are interested in the URL instead of the link object, consider using find_url instead. """ for link in self.link: if link.rel == rel and link.href: return link return None GetLink = get_link def find_self_link(self): """Find the first link with rel set to 'self' Returns: A str containing the link's href or None if none of the links had rel equal to 'self' """ return self.find_url('self') FindSelfLink = find_self_link def get_self_link(self): return self.get_link('self') GetSelfLink = get_self_link def find_edit_link(self): return self.find_url('edit') FindEditLink = find_edit_link def get_edit_link(self): return self.get_link('edit') GetEditLink = get_edit_link def find_edit_media_link(self): link = self.find_url('edit-media') # Search for media-edit as well since Picasa API used media-edit instead. if link is None: return self.find_url('media-edit') return link FindEditMediaLink = find_edit_media_link def get_edit_media_link(self): link = self.get_link('edit-media') if link is None: return self.get_link('media-edit') return link GetEditMediaLink = get_edit_media_link def find_next_link(self): return self.find_url('next') FindNextLink = find_next_link def get_next_link(self): return self.get_link('next') GetNextLink = get_next_link def find_license_link(self): return self.find_url('license') FindLicenseLink = find_license_link def get_license_link(self): return self.get_link('license') GetLicenseLink = get_license_link def find_alternate_link(self): return self.find_url('alternate') FindAlternateLink = find_alternate_link def get_alternate_link(self): return self.get_link('alternate') GetAlternateLink = get_alternate_link class FeedEntryParent(atom.core.XmlElement, LinkFinder): """A super class for atom:feed and entry, contains shared attributes""" author = [Author] category = [Category] contributor = [Contributor] id = Id link = [Link] rights = Rights title = Title updated = Updated def __init__(self, atom_id=None, text=None, *args, **kwargs): if atom_id is not None: self.id = atom_id atom.core.XmlElement.__init__(self, text=text, *args, **kwargs) class Source(FeedEntryParent): """The atom:source element.""" _qname = ATOM_TEMPLATE % 'source' generator = Generator icon = Icon logo = Logo subtitle = Subtitle class Entry(FeedEntryParent): """The atom:entry element.""" _qname = ATOM_TEMPLATE % 'entry' content = Content published = Published source = Source summary = Summary control = Control class Feed(Source): _qname = ATOM_TEMPLATE % 'feed' entry = [Entry] class ExtensionElement(atom.core.XmlElement): """Provided for backwards compatibility to the v1 atom.ExtensionElement.""" def __init__(self, tag=None, namespace=None, attributes=None, children=None, text=None, *args, **kwargs): if namespace: self._qname = '{%s}%s' % (namespace, tag) else: self._qname = tag self.children = children or [] self.attributes = attributes or {} self.text = text _BecomeChildElement = atom.core.XmlElement._become_child
apache-2.0
qwefi/nova
nova/tests/api/openstack/compute/test_v3_extensions.py
2
7785
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg import stevedore import webob.exc from nova.api import openstack from nova.api.openstack import compute from nova.api.openstack.compute import plugins from nova.api.openstack import extensions from nova import exception from nova import test CONF = cfg.CONF class fake_bad_extension(object): name = "fake_bad_extension" alias = "fake-bad" class fake_stevedore_enabled_extensions(object): def __init__(self, namespace, check_func, invoke_on_load=False, invoke_args=(), invoke_kwds={}): self.extensions = [] def map(self, func, *args, **kwds): pass def __iter__(self): return iter(self.extensions) class fake_loaded_extension_info(object): def __init__(self): self.extensions = {} def register_extension(self, ext): self.extensions[ext] = ext return True def get_extensions(self): return {'core1': None, 'core2': None, 'noncore1': None} class ExtensionLoadingTestCase(test.TestCase): def _set_v3_core(self, core_extensions): openstack.API_V3_CORE_EXTENSIONS = core_extensions def test_extensions_loaded(self): app = compute.APIRouterV3() self.assertIn('servers', app._loaded_extension_info.extensions) def test_check_bad_extension(self): extension_info = plugins.LoadedExtensionInfo() self.assertFalse(extension_info._check_extension(fake_bad_extension)) def test_extensions_blacklist(self): app = compute.APIRouterV3() self.assertIn('os-fixed-ips', app._loaded_extension_info.extensions) CONF.set_override('extensions_blacklist', ['os-fixed-ips'], 'osapi_v3') app = compute.APIRouterV3() self.assertNotIn('os-fixed-ips', app._loaded_extension_info.extensions) def test_extensions_whitelist_accept(self): # NOTE(maurosr): just to avoid to get an exception raised for not # loading all core api. v3_core = openstack.API_V3_CORE_EXTENSIONS openstack.API_V3_CORE_EXTENSIONS = set(['servers']) self.addCleanup(self._set_v3_core, v3_core) app = compute.APIRouterV3() self.assertIn('os-fixed-ips', app._loaded_extension_info.extensions) CONF.set_override('extensions_whitelist', ['servers', 'os-fixed-ips'], 'osapi_v3') app = compute.APIRouterV3() self.assertIn('os-fixed-ips', app._loaded_extension_info.extensions) def test_extensions_whitelist_block(self): # NOTE(maurosr): just to avoid to get an exception raised for not # loading all core api. v3_core = openstack.API_V3_CORE_EXTENSIONS openstack.API_V3_CORE_EXTENSIONS = set(['servers']) self.addCleanup(self._set_v3_core, v3_core) app = compute.APIRouterV3() self.assertIn('os-fixed-ips', app._loaded_extension_info.extensions) CONF.set_override('extensions_whitelist', ['servers'], 'osapi_v3') app = compute.APIRouterV3() self.assertNotIn('os-fixed-ips', app._loaded_extension_info.extensions) def test_blacklist_overrides_whitelist(self): # NOTE(maurosr): just to avoid to get an exception raised for not # loading all core api. v3_core = openstack.API_V3_CORE_EXTENSIONS openstack.API_V3_CORE_EXTENSIONS = set(['servers']) self.addCleanup(self._set_v3_core, v3_core) app = compute.APIRouterV3() self.assertIn('os-fixed-ips', app._loaded_extension_info.extensions) CONF.set_override('extensions_whitelist', ['servers', 'os-fixed-ips'], 'osapi_v3') CONF.set_override('extensions_blacklist', ['os-fixed-ips'], 'osapi_v3') app = compute.APIRouterV3() self.assertNotIn('os-fixed-ips', app._loaded_extension_info.extensions) self.assertIn('servers', app._loaded_extension_info.extensions) self.assertEqual(len(app._loaded_extension_info.extensions), 1) def test_get_missing_core_extensions(self): v3_core = openstack.API_V3_CORE_EXTENSIONS openstack.API_V3_CORE_EXTENSIONS = set(['core1', 'core2']) self.addCleanup(self._set_v3_core, v3_core) self.assertEqual(len(compute.APIRouterV3.get_missing_core_extensions( ['core1', 'core2', 'noncore1'])), 0) missing_core = compute.APIRouterV3.get_missing_core_extensions( ['core1']) self.assertEqual(len(missing_core), 1) self.assertIn('core2', missing_core) missing_core = compute.APIRouterV3.get_missing_core_extensions([]) self.assertEqual(len(missing_core), 2) self.assertIn('core1', missing_core) self.assertIn('core2', missing_core) missing_core = compute.APIRouterV3.get_missing_core_extensions( ['noncore1']) self.assertEqual(len(missing_core), 2) self.assertIn('core1', missing_core) self.assertIn('core2', missing_core) def test_core_extensions_present(self): self.stubs.Set(stevedore.enabled, 'EnabledExtensionManager', fake_stevedore_enabled_extensions) self.stubs.Set(plugins, 'LoadedExtensionInfo', fake_loaded_extension_info) v3_core = openstack.API_V3_CORE_EXTENSIONS openstack.API_V3_CORE_EXTENSIONS = set(['core1', 'core2']) self.addCleanup(self._set_v3_core, v3_core) # if no core API extensions are missing then an exception will # not be raised when creating an instance of compute.APIRouterV3 _ = compute.APIRouterV3() def test_core_extensions_missing(self): self.stubs.Set(stevedore.enabled, 'EnabledExtensionManager', fake_stevedore_enabled_extensions) self.stubs.Set(plugins, 'LoadedExtensionInfo', fake_loaded_extension_info) self.assertRaises(exception.CoreAPIMissing, compute.APIRouterV3) def test_extensions_expected_error(self): @extensions.expected_errors(404) def fake_func(): raise webob.exc.HTTPNotFound() self.assertRaises(webob.exc.HTTPNotFound, fake_func) def test_extensions_expected_error_from_list(self): @extensions.expected_errors((404, 403)) def fake_func(): raise webob.exc.HTTPNotFound() self.assertRaises(webob.exc.HTTPNotFound, fake_func) def test_extensions_unexpected_error(self): @extensions.expected_errors(404) def fake_func(): raise webob.exc.HTTPConflict() self.assertRaises(webob.exc.HTTPInternalServerError, fake_func) def test_extensions_unexpected_error_from_list(self): @extensions.expected_errors((404, 413)) def fake_func(): raise webob.exc.HTTPConflict() self.assertRaises(webob.exc.HTTPInternalServerError, fake_func) def test_extensions_unexpected_policy_not_authorized_error(self): @extensions.expected_errors(404) def fake_func(): raise exception.PolicyNotAuthorized(action="foo") self.assertRaises(exception.PolicyNotAuthorized, fake_func)
apache-2.0
nirmeshk/oh-mainline
mysite/profile/migrations/0047_remove_stale_from_dia.py
17
10681
# This file is part of OpenHatch. # Copyright (C) 2009 OpenHatch, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from south.db import db from django.db import models from mysite.profile.models import * class Migration: def forwards(self, orm): # Deleting field 'DataImportAttempt.stale' db.delete_column('profile_dataimportattempt', 'stale') def backwards(self, orm): # Adding field 'DataImportAttempt.stale' db.add_column('profile_dataimportattempt', 'stale', orm['profile.dataimportattempt:stale']) models = { 'auth.group': { 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'unique_together': "(('content_type', 'codename'),)"}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'profile.dataimportattempt': { 'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}), 'person_wants_data': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'query': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'source': ('django.db.models.fields.CharField', [], {'max_length': '2'}) }, 'profile.link_person_tag': { 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}), 'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"}) }, 'profile.link_project_tag': { 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}), 'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"}) }, 'profile.link_projectexp_tag': { 'Meta': {'unique_together': "[('tag', 'project_exp', 'source')]"}, 'favorite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project_exp': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.ProjectExp']"}), 'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"}) }, 'profile.link_sf_proj_dude_fm': { 'Meta': {'unique_together': "[('person', 'project')]"}, 'date_collected': ('django.db.models.fields.DateTimeField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.SourceForgePerson']"}), 'position': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.SourceForgeProject']"}) }, 'profile.person': { 'gotten_name_from_ohloh': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'interested_in_working_on': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}), 'last_polled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}), 'photo': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100'}), 'show_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'profile.projectexp': { 'data_import_attempt': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.DataImportAttempt']", 'null': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'man_months': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}), 'modified': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']", 'null': 'True'}), 'person_role': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'primary_language': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}), 'should_show_this': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'source': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}) }, 'profile.sourceforgeperson': { 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'profile.sourceforgeproject': { 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'unixname': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'profile.tag': { 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'tag_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.TagType']"}), 'text': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'profile.tagtype': { 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'prefix': ('django.db.models.fields.CharField', [], {'max_length': '20'}) }, 'search.project': { 'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}) } } complete_apps = ['profile']
agpl-3.0
tgsd96/gargnotes
venv/lib/python2.7/site-packages/django/db/migrations/operations/models.py
22
16872
from __future__ import unicode_literals from django.db import models from django.db.models.options import normalize_together from django.db.migrations.state import ModelState from django.db.migrations.operations.base import Operation from django.utils import six class CreateModel(Operation): """ Create a model's table. """ serialization_expand_args = ['fields', 'options'] def __init__(self, name, fields, options=None, bases=None): self.name = name self.fields = fields self.options = options or {} self.bases = bases or (models.Model,) def state_forwards(self, app_label, state): state.models[app_label, self.name.lower()] = ModelState( app_label, self.name, list(self.fields), dict(self.options), tuple(self.bases), ) def database_forwards(self, app_label, schema_editor, from_state, to_state): apps = to_state.render() model = apps.get_model(app_label, self.name) if self.allowed_to_migrate(schema_editor.connection.alias, model): schema_editor.create_model(model) def database_backwards(self, app_label, schema_editor, from_state, to_state): apps = from_state.render() model = apps.get_model(app_label, self.name) if self.allowed_to_migrate(schema_editor.connection.alias, model): schema_editor.delete_model(model) def describe(self): return "Create %smodel %s" % ("proxy " if self.options.get("proxy", False) else "", self.name) def references_model(self, name, app_label=None): strings_to_check = [self.name] # Check we didn't inherit from the model for base in self.bases: if isinstance(base, six.string_types): strings_to_check.append(base.split(".")[-1]) # Check we have no FKs/M2Ms with it for fname, field in self.fields: if field.rel: if isinstance(field.rel.to, six.string_types): strings_to_check.append(field.rel.to.split(".")[-1]) # Now go over all the strings and compare them for string in strings_to_check: if string.lower() == name.lower(): return True return False def __eq__(self, other): return ( (self.__class__ == other.__class__) and (self.name == other.name) and (self.options == other.options) and (self.bases == other.bases) and ([(k, f.deconstruct()[1:]) for k, f in self.fields] == [(k, f.deconstruct()[1:]) for k, f in other.fields]) ) class DeleteModel(Operation): """ Drops a model's table. """ def __init__(self, name): self.name = name def state_forwards(self, app_label, state): del state.models[app_label, self.name.lower()] def database_forwards(self, app_label, schema_editor, from_state, to_state): apps = from_state.render() model = apps.get_model(app_label, self.name) if self.allowed_to_migrate(schema_editor.connection.alias, model): schema_editor.delete_model(model) def database_backwards(self, app_label, schema_editor, from_state, to_state): apps = to_state.render() model = apps.get_model(app_label, self.name) if self.allowed_to_migrate(schema_editor.connection.alias, model): schema_editor.create_model(model) def references_model(self, name, app_label=None): return name.lower() == self.name.lower() def describe(self): return "Delete model %s" % (self.name, ) class RenameModel(Operation): """ Renames a model. """ def __init__(self, old_name, new_name): self.old_name = old_name self.new_name = new_name def state_forwards(self, app_label, state): # Get all of the related objects we need to repoint apps = state.render(skip_cache=True) model = apps.get_model(app_label, self.old_name) related_objects = model._meta.get_all_related_objects() related_m2m_objects = model._meta.get_all_related_many_to_many_objects() # Rename the model state.models[app_label, self.new_name.lower()] = state.models[app_label, self.old_name.lower()] state.models[app_label, self.new_name.lower()].name = self.new_name del state.models[app_label, self.old_name.lower()] # Repoint the FKs and M2Ms pointing to us for related_object in (related_objects + related_m2m_objects): # Use the new related key for self referential related objects. if related_object.model == model: related_key = (app_label, self.new_name.lower()) else: related_key = ( related_object.model._meta.app_label, related_object.model._meta.object_name.lower(), ) new_fields = [] for name, field in state.models[related_key].fields: if name == related_object.field.name: field = field.clone() field.rel.to = "%s.%s" % (app_label, self.new_name) new_fields.append((name, field)) state.models[related_key].fields = new_fields def database_forwards(self, app_label, schema_editor, from_state, to_state): new_apps = to_state.render() new_model = new_apps.get_model(app_label, self.new_name) if self.allowed_to_migrate(schema_editor.connection.alias, new_model): old_apps = from_state.render() old_model = old_apps.get_model(app_label, self.old_name) # Move the main table schema_editor.alter_db_table( new_model, old_model._meta.db_table, new_model._meta.db_table, ) # Alter the fields pointing to us related_objects = old_model._meta.get_all_related_objects() related_m2m_objects = old_model._meta.get_all_related_many_to_many_objects() for related_object in (related_objects + related_m2m_objects): if related_object.model == old_model: model = new_model related_key = (app_label, self.new_name.lower()) else: model = related_object.model related_key = ( related_object.model._meta.app_label, related_object.model._meta.object_name.lower(), ) to_field = new_apps.get_model( *related_key )._meta.get_field_by_name(related_object.field.name)[0] schema_editor.alter_field( model, related_object.field, to_field, ) # Rename M2M fields whose name is based on this model's name. fields = zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many) for (old_field, new_field) in fields: # Skip self-referential fields as these are renamed above. if new_field.model == new_field.related.parent_model or not new_field.rel.through._meta.auto_created: continue # Rename the M2M table that's based on this model's name. old_m2m_model = old_field.rel.through new_m2m_model = new_field.rel.through schema_editor.alter_db_table( new_m2m_model, old_m2m_model._meta.db_table, new_m2m_model._meta.db_table, ) # Rename the column in the M2M table that's based on this # model's name. schema_editor.alter_field( new_m2m_model, old_m2m_model._meta.get_field(old_model._meta.model_name), new_m2m_model._meta.get_field(new_model._meta.model_name), ) def database_backwards(self, app_label, schema_editor, from_state, to_state): self.new_name, self.old_name = self.old_name, self.new_name self.database_forwards(app_label, schema_editor, from_state, to_state) self.new_name, self.old_name = self.old_name, self.new_name def references_model(self, name, app_label=None): return ( name.lower() == self.old_name.lower() or name.lower() == self.new_name.lower() ) def describe(self): return "Rename model %s to %s" % (self.old_name, self.new_name) class AlterModelTable(Operation): """ Renames a model's table """ def __init__(self, name, table): self.name = name self.table = table def state_forwards(self, app_label, state): state.models[app_label, self.name.lower()].options["db_table"] = self.table def database_forwards(self, app_label, schema_editor, from_state, to_state): new_apps = to_state.render() new_model = new_apps.get_model(app_label, self.name) if self.allowed_to_migrate(schema_editor.connection.alias, new_model): old_apps = from_state.render() old_model = old_apps.get_model(app_label, self.name) schema_editor.alter_db_table( new_model, old_model._meta.db_table, new_model._meta.db_table, ) # Rename M2M fields whose name is based on this model's db_table for (old_field, new_field) in zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many): if new_field.rel.through._meta.auto_created: schema_editor.alter_db_table( new_field.rel.through, old_field.rel.through._meta.db_table, new_field.rel.through._meta.db_table, ) def database_backwards(self, app_label, schema_editor, from_state, to_state): return self.database_forwards(app_label, schema_editor, from_state, to_state) def references_model(self, name, app_label=None): return name.lower() == self.name.lower() def describe(self): return "Rename table for %s to %s" % (self.name, self.table) class AlterUniqueTogether(Operation): """ Changes the value of unique_together to the target one. Input value of unique_together must be a set of tuples. """ option_name = "unique_together" def __init__(self, name, unique_together): self.name = name unique_together = normalize_together(unique_together) self.unique_together = set(tuple(cons) for cons in unique_together) def state_forwards(self, app_label, state): model_state = state.models[app_label, self.name.lower()] model_state.options[self.option_name] = self.unique_together def database_forwards(self, app_label, schema_editor, from_state, to_state): new_apps = to_state.render() new_model = new_apps.get_model(app_label, self.name) if self.allowed_to_migrate(schema_editor.connection.alias, new_model): old_apps = from_state.render() old_model = old_apps.get_model(app_label, self.name) schema_editor.alter_unique_together( new_model, getattr(old_model._meta, self.option_name, set()), getattr(new_model._meta, self.option_name, set()), ) def database_backwards(self, app_label, schema_editor, from_state, to_state): return self.database_forwards(app_label, schema_editor, from_state, to_state) def references_model(self, name, app_label=None): return name.lower() == self.name.lower() def describe(self): return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.unique_together or '')) class AlterIndexTogether(Operation): """ Changes the value of index_together to the target one. Input value of index_together must be a set of tuples. """ option_name = "index_together" def __init__(self, name, index_together): self.name = name index_together = normalize_together(index_together) self.index_together = set(tuple(cons) for cons in index_together) def state_forwards(self, app_label, state): model_state = state.models[app_label, self.name.lower()] model_state.options[self.option_name] = self.index_together def database_forwards(self, app_label, schema_editor, from_state, to_state): new_apps = to_state.render() new_model = new_apps.get_model(app_label, self.name) if self.allowed_to_migrate(schema_editor.connection.alias, new_model): old_apps = from_state.render() old_model = old_apps.get_model(app_label, self.name) schema_editor.alter_index_together( new_model, getattr(old_model._meta, self.option_name, set()), getattr(new_model._meta, self.option_name, set()), ) def database_backwards(self, app_label, schema_editor, from_state, to_state): return self.database_forwards(app_label, schema_editor, from_state, to_state) def references_model(self, name, app_label=None): return name.lower() == self.name.lower() def describe(self): return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.index_together or '')) class AlterOrderWithRespectTo(Operation): """ Represents a change with the order_with_respect_to option. """ def __init__(self, name, order_with_respect_to): self.name = name self.order_with_respect_to = order_with_respect_to def state_forwards(self, app_label, state): model_state = state.models[app_label, self.name.lower()] model_state.options['order_with_respect_to'] = self.order_with_respect_to def database_forwards(self, app_label, schema_editor, from_state, to_state): to_model = to_state.render().get_model(app_label, self.name) if self.allowed_to_migrate(schema_editor.connection.alias, to_model): from_model = from_state.render().get_model(app_label, self.name) # Remove a field if we need to if from_model._meta.order_with_respect_to and not to_model._meta.order_with_respect_to: schema_editor.remove_field(from_model, from_model._meta.get_field_by_name("_order")[0]) # Add a field if we need to (altering the column is untouched as # it's likely a rename) elif to_model._meta.order_with_respect_to and not from_model._meta.order_with_respect_to: field = to_model._meta.get_field_by_name("_order")[0] if not field.has_default(): field.default = 0 schema_editor.add_field( from_model, field, ) def database_backwards(self, app_label, schema_editor, from_state, to_state): self.database_forwards(app_label, schema_editor, from_state, to_state) def references_model(self, name, app_label=None): return name.lower() == self.name.lower() def describe(self): return "Set order_with_respect_to on %s to %s" % (self.name, self.order_with_respect_to) class AlterModelOptions(Operation): """ Sets new model options that don't directly affect the database schema (like verbose_name, permissions, ordering). Python code in migrations may still need them. """ # Model options we want to compare and preserve in an AlterModelOptions op ALTER_OPTION_KEYS = [ "get_latest_by", "managed", "ordering", "permissions", "default_permissions", "select_on_save", "verbose_name", "verbose_name_plural", ] def __init__(self, name, options): self.name = name self.options = options def state_forwards(self, app_label, state): model_state = state.models[app_label, self.name.lower()] model_state.options = dict(model_state.options) model_state.options.update(self.options) for key in self.ALTER_OPTION_KEYS: if key not in self.options and key in model_state.options: del model_state.options[key] def database_forwards(self, app_label, schema_editor, from_state, to_state): pass def database_backwards(self, app_label, schema_editor, from_state, to_state): pass def references_model(self, name, app_label=None): return name.lower() == self.name.lower() def describe(self): return "Change Meta options on %s" % (self.name, )
mit
nzavagli/UnrealPy
UnrealPyEmbed/Source/Python/Lib/python27/test/test_pyclbr.py
36
7917
''' Test cases for pyclbr.py Nick Mathewson ''' from test.test_support import run_unittest, import_module import sys from types import ClassType, FunctionType, MethodType, BuiltinFunctionType import pyclbr from unittest import TestCase StaticMethodType = type(staticmethod(lambda: None)) ClassMethodType = type(classmethod(lambda c: None)) # Silence Py3k warning import_module('commands', deprecated=True) # This next line triggers an error on old versions of pyclbr. from commands import getstatus # Here we test the python class browser code. # # The main function in this suite, 'testModule', compares the output # of pyclbr with the introspected members of a module. Because pyclbr # is imperfect (as designed), testModule is called with a set of # members to ignore. class PyclbrTest(TestCase): def assertListEq(self, l1, l2, ignore): ''' succeed iff {l1} - {ignore} == {l2} - {ignore} ''' missing = (set(l1) ^ set(l2)) - set(ignore) if missing: print >>sys.stderr, "l1=%r\nl2=%r\nignore=%r" % (l1, l2, ignore) self.fail("%r missing" % missing.pop()) def assertHasattr(self, obj, attr, ignore): ''' succeed iff hasattr(obj,attr) or attr in ignore. ''' if attr in ignore: return if not hasattr(obj, attr): print "???", attr self.assertTrue(hasattr(obj, attr), 'expected hasattr(%r, %r)' % (obj, attr)) def assertHaskey(self, obj, key, ignore): ''' succeed iff key in obj or key in ignore. ''' if key in ignore: return if key not in obj: print >>sys.stderr, "***", key self.assertIn(key, obj) def assertEqualsOrIgnored(self, a, b, ignore): ''' succeed iff a == b or a in ignore or b in ignore ''' if a not in ignore and b not in ignore: self.assertEqual(a, b) def checkModule(self, moduleName, module=None, ignore=()): ''' succeed iff pyclbr.readmodule_ex(modulename) corresponds to the actual module object, module. Any identifiers in ignore are ignored. If no module is provided, the appropriate module is loaded with __import__.''' if module is None: # Import it. # ('<silly>' is to work around an API silliness in __import__) module = __import__(moduleName, globals(), {}, ['<silly>']) dict = pyclbr.readmodule_ex(moduleName) def ismethod(oclass, obj, name): classdict = oclass.__dict__ if isinstance(obj, FunctionType): if not isinstance(classdict[name], StaticMethodType): return False else: if not isinstance(obj, MethodType): return False if obj.im_self is not None: if (not isinstance(classdict[name], ClassMethodType) or obj.im_self is not oclass): return False else: if not isinstance(classdict[name], FunctionType): return False objname = obj.__name__ if objname.startswith("__") and not objname.endswith("__"): objname = "_%s%s" % (obj.im_class.__name__, objname) return objname == name # Make sure the toplevel functions and classes are the same. for name, value in dict.items(): if name in ignore: continue self.assertHasattr(module, name, ignore) py_item = getattr(module, name) if isinstance(value, pyclbr.Function): self.assertIsInstance(py_item, (FunctionType, BuiltinFunctionType)) if py_item.__module__ != moduleName: continue # skip functions that came from somewhere else self.assertEqual(py_item.__module__, value.module) else: self.assertIsInstance(py_item, (ClassType, type)) if py_item.__module__ != moduleName: continue # skip classes that came from somewhere else real_bases = [base.__name__ for base in py_item.__bases__] pyclbr_bases = [ getattr(base, 'name', base) for base in value.super ] try: self.assertListEq(real_bases, pyclbr_bases, ignore) except: print >>sys.stderr, "class=%s" % py_item raise actualMethods = [] for m in py_item.__dict__.keys(): if ismethod(py_item, getattr(py_item, m), m): actualMethods.append(m) foundMethods = [] for m in value.methods.keys(): if m[:2] == '__' and m[-2:] != '__': foundMethods.append('_'+name+m) else: foundMethods.append(m) try: self.assertListEq(foundMethods, actualMethods, ignore) self.assertEqual(py_item.__module__, value.module) self.assertEqualsOrIgnored(py_item.__name__, value.name, ignore) # can't check file or lineno except: print >>sys.stderr, "class=%s" % py_item raise # Now check for missing stuff. def defined_in(item, module): if isinstance(item, ClassType): return item.__module__ == module.__name__ if isinstance(item, FunctionType): return item.func_globals is module.__dict__ return False for name in dir(module): item = getattr(module, name) if isinstance(item, (ClassType, FunctionType)): if defined_in(item, module): self.assertHaskey(dict, name, ignore) def test_easy(self): self.checkModule('pyclbr') self.checkModule('doctest', ignore=("DocTestCase",)) # Silence Py3k warning rfc822 = import_module('rfc822', deprecated=True) self.checkModule('rfc822', rfc822) self.checkModule('difflib') def test_decorators(self): # XXX: See comment in pyclbr_input.py for a test that would fail # if it were not commented out. # self.checkModule('test.pyclbr_input') def test_others(self): cm = self.checkModule # These were once about the 10 longest modules cm('random', ignore=('Random',)) # from _random import Random as CoreGenerator cm('cgi', ignore=('log',)) # set with = in module cm('urllib', ignore=('_CFNumberToInt32', '_CStringFromCFString', '_CFSetup', 'getproxies_registry', 'proxy_bypass_registry', 'proxy_bypass_macosx_sysconf', 'open_https', 'getproxies_macosx_sysconf', 'getproxies_internetconfig',)) # not on all platforms cm('pickle') cm('aifc', ignore=('openfp',)) # set with = in module cm('Cookie') cm('sre_parse', ignore=('dump', 'groups')) # from sre_constants import *; property cm('pdb') cm('pydoc') # Tests for modules inside packages cm('email.parser') cm('test.test_pyclbr') def test_issue_14798(self): # test ImportError is raised when the first part of a dotted name is # not a package self.assertRaises(ImportError, pyclbr.readmodule_ex, 'asyncore.foo') def test_main(): run_unittest(PyclbrTest) if __name__ == "__main__": test_main()
mit
dcroc16/skunk_works
google_appengine/lib/django-1.5/tests/regressiontests/utils/dateformat.py
51
6241
from __future__ import unicode_literals from datetime import datetime, date import os import time from django.utils.dateformat import format from django.utils import dateformat, translation, unittest from django.utils.timezone import utc from django.utils.tzinfo import FixedOffset, LocalTimezone class DateFormatTests(unittest.TestCase): def setUp(self): self.old_TZ = os.environ.get('TZ') os.environ['TZ'] = 'Europe/Copenhagen' translation.activate('en-us') try: # Check if a timezone has been set time.tzset() self.tz_tests = True except AttributeError: # No timezone available. Don't run the tests that require a TZ self.tz_tests = False def tearDown(self): if self.old_TZ is None: del os.environ['TZ'] else: os.environ['TZ'] = self.old_TZ # Cleanup - force re-evaluation of TZ environment variable. if self.tz_tests: time.tzset() def test_date(self): d = date(2009, 5, 16) self.assertEqual(date.fromtimestamp(int(format(d, 'U'))), d) def test_naive_datetime(self): dt = datetime(2009, 5, 16, 5, 30, 30) self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt) def test_datetime_with_local_tzinfo(self): ltz = LocalTimezone(datetime.now()) dt = datetime(2009, 5, 16, 5, 30, 30, tzinfo=ltz) self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz), dt) self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt.replace(tzinfo=None)) def test_datetime_with_tzinfo(self): tz = FixedOffset(-510) ltz = LocalTimezone(datetime.now()) dt = datetime(2009, 5, 16, 5, 30, 30, tzinfo=tz) self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), tz), dt) self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz), dt) self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt.astimezone(ltz).replace(tzinfo=None)) self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), tz).utctimetuple(), dt.utctimetuple()) self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz).utctimetuple(), dt.utctimetuple()) def test_epoch(self): udt = datetime(1970, 1, 1, tzinfo=utc) self.assertEqual(format(udt, 'U'), '0') def test_empty_format(self): my_birthday = datetime(1979, 7, 8, 22, 00) self.assertEqual(dateformat.format(my_birthday, ''), '') def test_am_pm(self): my_birthday = datetime(1979, 7, 8, 22, 00) self.assertEqual(dateformat.format(my_birthday, 'a'), 'p.m.') def test_microsecond(self): # Regression test for #18951 dt = datetime(2009, 5, 16, microsecond=123) self.assertEqual(dateformat.format(dt, 'u'), '000123') def test_date_formats(self): my_birthday = datetime(1979, 7, 8, 22, 00) timestamp = datetime(2008, 5, 19, 11, 45, 23, 123456) self.assertEqual(dateformat.format(my_birthday, 'A'), 'PM') self.assertEqual(dateformat.format(timestamp, 'c'), '2008-05-19T11:45:23.123456') self.assertEqual(dateformat.format(my_birthday, 'd'), '08') self.assertEqual(dateformat.format(my_birthday, 'j'), '8') self.assertEqual(dateformat.format(my_birthday, 'l'), 'Sunday') self.assertEqual(dateformat.format(my_birthday, 'L'), 'False') self.assertEqual(dateformat.format(my_birthday, 'm'), '07') self.assertEqual(dateformat.format(my_birthday, 'M'), 'Jul') self.assertEqual(dateformat.format(my_birthday, 'b'), 'jul') self.assertEqual(dateformat.format(my_birthday, 'n'), '7') self.assertEqual(dateformat.format(my_birthday, 'N'), 'July') def test_time_formats(self): my_birthday = datetime(1979, 7, 8, 22, 00) self.assertEqual(dateformat.format(my_birthday, 'P'), '10 p.m.') self.assertEqual(dateformat.format(my_birthday, 's'), '00') self.assertEqual(dateformat.format(my_birthday, 'S'), 'th') self.assertEqual(dateformat.format(my_birthday, 't'), '31') self.assertEqual(dateformat.format(my_birthday, 'w'), '0') self.assertEqual(dateformat.format(my_birthday, 'W'), '27') self.assertEqual(dateformat.format(my_birthday, 'y'), '79') self.assertEqual(dateformat.format(my_birthday, 'Y'), '1979') self.assertEqual(dateformat.format(my_birthday, 'z'), '189') def test_dateformat(self): my_birthday = datetime(1979, 7, 8, 22, 00) self.assertEqual(dateformat.format(my_birthday, r'Y z \C\E\T'), '1979 189 CET') self.assertEqual(dateformat.format(my_birthday, r'jS \o\f F'), '8th of July') def test_futuredates(self): the_future = datetime(2100, 10, 25, 0, 00) self.assertEqual(dateformat.format(the_future, r'Y'), '2100') def test_timezones(self): my_birthday = datetime(1979, 7, 8, 22, 00) summertime = datetime(2005, 10, 30, 1, 00) wintertime = datetime(2005, 10, 30, 4, 00) timestamp = datetime(2008, 5, 19, 11, 45, 23, 123456) if self.tz_tests: self.assertEqual(dateformat.format(my_birthday, 'O'), '+0100') self.assertEqual(dateformat.format(my_birthday, 'r'), 'Sun, 8 Jul 1979 22:00:00 +0100') self.assertEqual(dateformat.format(my_birthday, 'T'), 'CET') self.assertEqual(dateformat.format(my_birthday, 'U'), '300315600') self.assertEqual(dateformat.format(timestamp, 'u'), '123456') self.assertEqual(dateformat.format(my_birthday, 'Z'), '3600') self.assertEqual(dateformat.format(summertime, 'I'), '1') self.assertEqual(dateformat.format(summertime, 'O'), '+0200') self.assertEqual(dateformat.format(wintertime, 'I'), '0') self.assertEqual(dateformat.format(wintertime, 'O'), '+0100') # Ticket #16924 -- We don't need timezone support to test this # 3h30m to the west of UTC tz = FixedOffset(-3*60 - 30) dt = datetime(2009, 5, 16, 5, 30, 30, tzinfo=tz) self.assertEqual(dateformat.format(dt, 'O'), '-0330')
mit