repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
simplegeo/eventlet | eventlet/green/_socket_nodns.py | 1 | 4979 | __socket = __import__('socket')
exec "\n".join(["%s = __socket.%s" % (var, var) for var in __socket.__all__])
os = __import__('os')
import sys
import warnings
from eventlet.hubs import get_hub
from eventlet.greenio import GreenSocket as socket
from eventlet.greenio import SSL as _SSL # for exceptions
from eventlet.greenio import _GLOBAL_DEFAULT_TIMEOUT
from eventlet.greenio import _fileobject
__all__ = __socket.__all__
__patched__ = ['fromfd', 'socketpair', 'create_connection', 'ssl', 'socket']
try:
__original_fromfd__ = __socket.fromfd
def fromfd(*args):
return socket(__original_fromfd__(*args))
except AttributeError:
pass
try:
__original_socketpair__ = __socket.socketpair
def socketpair(*args):
one, two = __original_socketpair__(*args)
return socket(one), socket(two)
except AttributeError:
pass
def create_connection(address,
timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used.
"""
msg = "getaddrinfo returns an empty list"
host, port = address
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error, msg:
if sock is not None:
sock.close()
raise error, msg
def _convert_to_sslerror(ex):
""" Transliterates SSL.SysCallErrors to socket.sslerrors"""
return sslerror((ex.args[0], ex.args[1]))
class GreenSSLObject(object):
""" Wrapper object around the SSLObjects returned by socket.ssl, which have a
slightly different interface from SSL.Connection objects. """
def __init__(self, green_ssl_obj):
""" Should only be called by a 'green' socket.ssl """
self.connection = green_ssl_obj
try:
# if it's already connected, do the handshake
self.connection.getpeername()
except:
pass
else:
try:
self.connection.do_handshake()
except _SSL.SysCallError, e:
raise _convert_to_sslerror(e)
def read(self, n=1024):
"""If n is provided, read n bytes from the SSL connection, otherwise read
until EOF. The return value is a string of the bytes read."""
try:
return self.connection.read(n)
except _SSL.ZeroReturnError:
return ''
except _SSL.SysCallError, e:
raise _convert_to_sslerror(e)
def write(self, s):
"""Writes the string s to the on the object's SSL connection.
The return value is the number of bytes written. """
try:
return self.connection.write(s)
except _SSL.SysCallError, e:
raise _convert_to_sslerror(e)
def server(self):
""" Returns a string describing the server's certificate. Useful for debugging
purposes; do not parse the content of this string because its format can't be
parsed unambiguously. """
return str(self.connection.get_peer_certificate().get_subject())
def issuer(self):
"""Returns a string describing the issuer of the server's certificate. Useful
for debugging purposes; do not parse the content of this string because its
format can't be parsed unambiguously."""
return str(self.connection.get_peer_certificate().get_issuer())
try:
try:
# >= Python 2.6
from eventlet.green import ssl as ssl_module
sslerror = __socket.sslerror
__socket.ssl
def ssl(sock, certificate=None, private_key=None):
warnings.warn("socket.ssl() is deprecated. Use ssl.wrap_socket() instead.",
DeprecationWarning, stacklevel=2)
return ssl_module.sslwrap_simple(sock, private_key, certificate)
except ImportError:
# <= Python 2.5 compatibility
sslerror = __socket.sslerror
__socket.ssl
def ssl(sock, certificate=None, private_key=None):
from eventlet import util
wrapped = util.wrap_ssl(sock, certificate, private_key)
return GreenSSLObject(wrapped)
except AttributeError:
# if the real socket module doesn't have the ssl method or sslerror
# exception, we can't emulate them
pass
| mit |
AngryBork/apex-sigma-plugins | moderation/server_settings/roles/self_assignable_roles/addselfrole.py | 3 | 1597 | import discord
from sigma.core.utilities.role_processing import matching_role
async def addselfrole(cmd, message, args):
if message.author.permissions_in(message.channel).manage_roles:
if args:
lookup = ' '.join(args)
target_role = matching_role(message.guild, lookup)
if target_role:
role_bellow = bool(target_role.position < message.guild.me.top_role.position)
if role_bellow:
selfroles = cmd.db.get_guild_settings(message.guild.id, 'SelfRoles')
if selfroles is None:
selfroles = []
if target_role.id in selfroles:
response = discord.Embed(color=0xBE1931, title='❗ This role is already self assignable.')
else:
selfroles.append(target_role.id)
cmd.db.set_guild_settings(message.guild.id, 'SelfRoles', selfroles)
response = discord.Embed(color=0x77B255, title=f'✅ {target_role.name} added.')
else:
response = discord.Embed(color=0xBE1931, title='❗ This role is above my highest role.')
else:
response = discord.Embed(color=0x696969, title=f'🔍 I can\'t find {lookup} on this server.')
else:
response = discord.Embed(color=0xBE1931, title='❗ Nothing inputted.')
else:
response = discord.Embed(title='⛔ Access Denied. Manage Roles needed.', color=0xBE1931)
await message.channel.send(embed=response)
| gpl-3.0 |
saintfrank/yard | apply/apply/settings.py | 1 | 2836 | """
Django settings for apply project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '--v$_^*0r5(ok1^2sxdm4w_wwskvuv-z0tcop+yf1-m@+7p#5i'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'gunicorn',
'bootstrapform',
'yard',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'apply.urls'
WSGI_APPLICATION = 'apply.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'yard', # Or path to database file if using sqlite3.
'USER': 'frankie', # Not used with sqlite3.
'PASSWORD': 'frankie', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
#'PASSWORD': 'frankie', # Not used with sqlite3.
#'HOST': 'archivione.caprj3rjkmkr.us-west-2.rds.amazonaws.com', # Set to empty string for localhost. Not used with sqlite3.
#'PORT': '3306', # Set to empty string for default. Not used with sqlite3.
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
DOCS_URL = BASE_DIR + '/yard/static/docs/'
LOGGING = {
'version': 1,
}
LOGIN_REDIRECT_URL="/"
| mit |
danielharbor/openerp | addons/purchase/__openerp__.py | 42 | 3903 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Purchase Management',
'version': '1.1',
'category': 'Purchase Management',
'sequence': 19,
'summary': 'Purchase Orders, Receipts, Supplier Invoices',
'description': """
Manage goods requirement by Purchase Orders easily
==================================================
Purchase management enables you to track your suppliers' price quotations and convert them into purchase orders if necessary.
OpenERP has several methods of monitoring invoices and tracking the receipt of ordered goods. You can handle partial deliveries in OpenERP, so you can keep track of items that are still to be delivered in your orders, and you can issue reminders automatically.
OpenERP’s replenishment management rules enable the system to generate draft purchase orders automatically, or you can configure it to run a lean process driven entirely by current production needs.
Dashboard / Reports for Purchase Management will include:
---------------------------------------------------------
* Request for Quotations
* Purchase Orders Waiting Approval
* Monthly Purchases by Category
* Receipt Analysis
* Purchase Analysis
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/purchase',
'images': ['images/purchase_order.jpeg', 'images/purchase_analysis.jpeg', 'images/request_for_quotation.jpeg'],
'depends': ['stock_account', 'report'],
'data': [
'security/purchase_security.xml',
'security/ir.model.access.csv',
'purchase_workflow.xml',
'purchase_sequence.xml',
'company_view.xml',
'purchase_data.xml',
'purchase_data.yml',
'wizard/purchase_order_group_view.xml',
'wizard/purchase_line_invoice_view.xml',
'purchase_report.xml',
'purchase_view.xml',
'stock_view.xml',
'partner_view.xml',
'report/purchase_report_view.xml',
'edi/purchase_order_action_data.xml',
'res_config_view.xml',
'views/report_purchaseorder.xml',
'views/report_purchasequotation.xml',
],
'test': [
'test/ui/purchase_users.yml',
'test/process/run_scheduler.yml',
'test/fifo_price.yml',
'test/fifo_returns.yml',
#'test/costmethodchange.yml',
'test/process/cancel_order.yml',
'test/process/rfq2order2done.yml',
'test/process/generate_invoice_from_reception.yml',
'test/process/merge_order.yml',
'test/process/edi_purchase_order.yml',
'test/process/invoice_on_poline.yml',
'test/ui/duplicate_order.yml',
'test/ui/delete_order.yml',
'test/average_price.yml',
],
'demo': [
'purchase_order_demo.yml',
'purchase_demo.xml',
'purchase_stock_demo.yml',
],
'installable': True,
'auto_install': False,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zuazo-forks/graphite-web | contrib/test_aggregator_rules.py | 65 | 1172 | import sys
from os.path import dirname, join, abspath
# Figure out where we're installed
ROOT_DIR = dirname(dirname(abspath(__file__)))
# Make sure that carbon's 'lib' dir is in the $PYTHONPATH if we're running from source.
LIB_DIR = join(ROOT_DIR, 'graphite', 'lib')
sys.path.insert(0, LIB_DIR)
from carbon.aggregator.rules import RuleManager
### Basic usage
if len(sys.argv) != 3:
print "Usage: %s 'aggregator rule' 'line item'" % (__file__)
print "\nSample invocation: %s %s %s" % \
(__file__, "'<prefix>.<env>.<key>.sum.all (10) = sum <prefix>.<env>.<<key>>.sum.<node>'", 'stats.prod.js.ktime_sum.sum.host2' )
sys.exit(42)
### cli arguments
me, raw_rule, raw_metric = sys.argv
### XXX rather whitebox, by reading the source ;(
rm = RuleManager
rule = rm.parse_definition( raw_rule )
### rule/parsed rule
print "Raw rule: %s" % raw_rule
print "Parsed rule: %s" % rule.regex.pattern
print "\n======\n"
### run the parse
match = rule.regex.match( raw_metric )
print "Raw metric: %s" % raw_metric
if match:
print "Match dict: %s" % match.groupdict()
print "Result: %s" % rule.output_template % match.groupdict()
else:
print "ERROR: NO MATCH"
| apache-2.0 |
WillStewart1994/NoSleep | requests/packages/chardet/langgreekmodel.py | 2763 | 12628 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = (
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = {
'charToOrderMap': Latin7_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-7"
}
Win1253GreekModel = {
'charToOrderMap': win1253_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "windows-1253"
}
# flake8: noqa
| apache-2.0 |
ulope/django | django/test/client.py | 11 | 25023 | from __future__ import unicode_literals
import sys
import os
import re
import mimetypes
from copy import copy
from importlib import import_module
from io import BytesIO
from django.apps import apps
from django.conf import settings
from django.core import urlresolvers
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest, ISO_8859_1, UTF_8
from django.core.signals import (request_started, request_finished,
got_request_exception)
from django.db import close_old_connections
from django.http import SimpleCookie, HttpRequest, QueryDict
from django.template import TemplateDoesNotExist
from django.test import signals
from django.utils.functional import curry, SimpleLazyObject
from django.utils.encoding import force_bytes, force_str, uri_to_iri
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
from django.utils import six
from django.utils.six.moves.urllib.parse import urlparse, urlsplit
from django.test.utils import ContextList
__all__ = ('Client', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile('.*; charset=([\w\d-]+);?')
class FakePayload(object):
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in Real Life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
if self.read_started:
raise ValueError("Unable to write a payload after he's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def closing_iterator_wrapper(iterable, close):
try:
for item in iterable:
yield item
finally:
request_finished.disconnect(close_old_connections)
close() # will fire request_finished
request_finished.connect(close_old_connections)
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes. Uses the WSGI
interface to compose requests, but returns the raw HttpResponse object with
the originating WSGIRequest attached to its ``wsgi_request`` attribute.
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super(ClientHandler, self).__init__(*args, **kwargs)
def __call__(self, environ):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.load_middleware()
request_started.disconnect(close_old_connections)
request_started.send(sender=self.__class__, environ=environ)
request_started.connect(close_old_connections)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
# Request goes through middleware.
response = self.get_response(request)
# Attach the originating request to the response so that it could be
# later retrieved.
response.wsgi_request = request
# We're emulating a WSGI server; we must call the close method
# on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close)
else:
request_finished.disconnect(close_old_connections)
response.close() # will fire request_finished
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Stores templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
store.setdefault('context', ContextList()).append(copy(context))
def encode_multipart(boundary, data):
"""
Encodes multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
is_file = lambda thing: hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, six.string_types) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend([to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
item
]])
else:
lines.extend([to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
value
]])
lines.extend([
to_bytes('--%s--' % boundary),
b'',
])
return b'\r\n'.join(lines)
def encode_file(boundary, key, file):
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
if hasattr(file, 'content_type'):
content_type = file.content_type
else:
content_type = mimetypes.guess_type(file.name)[0]
if content_type is None:
content_type = 'application/octet-stream'
return [
to_bytes('--%s' % boundary),
to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"'
% (key, os.path.basename(file.name))),
to_bytes('Content-Type: %s' % content_type),
b'',
file.read()
]
class RequestFactory(object):
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, **defaults):
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See http://www.python.org/dev/peps/pep-3333/#environ-variables
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': str('/'),
'REMOTE_ADDR': str('127.0.0.1'),
'REQUEST_METHOD': str('GET'),
'SCRIPT_NAME': str(''),
'SERVER_NAME': str('testserver'),
'SERVER_PORT': str('80'),
'SERVER_PROTOCOL': str('HTTP/1.1'),
'wsgi.version': (1, 0),
'wsgi.url_scheme': str('http'),
'wsgi.input': FakePayload(b''),
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return environ
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _encode_data(self, data, content_type):
if content_type is MULTIPART_CONTENT:
return encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
return force_bytes(data, encoding=charset)
def _get_path(self, parsed):
path = force_str(parsed[2])
# If there are parameters, add them
if parsed[3]:
path += str(";") + force_str(parsed[3])
path = uri_to_iri(path).encode(UTF_8)
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. We replicate this behavior here.
# Refs comment in `get_bytes_from_wsgi()`.
return path.decode(ISO_8859_1) if six.PY3 else path
def get(self, path, data=None, secure=False, **extra):
"Construct a GET request."
data = {} if data is None else data
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('GET', path, secure=secure, **r)
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
secure=False, **extra):
"Construct a POST request."
data = {} if data is None else data
post_data = self._encode_data(data, content_type)
return self.generic('POST', path, post_data, content_type,
secure=secure, **extra)
def head(self, path, data=None, secure=False, **extra):
"Construct a HEAD request."
data = {} if data is None else data
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('HEAD', path, secure=secure, **r)
def trace(self, path, secure=False, **extra):
"Construct a TRACE request."
return self.generic('TRACE', path, secure=secure, **extra)
def options(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct an OPTIONS request."
return self.generic('OPTIONS', path, data, content_type,
secure=secure, **extra)
def put(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a PUT request."
return self.generic('PUT', path, data, content_type,
secure=secure, **extra)
def patch(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a PATCH request."
return self.generic('PATCH', path, data, content_type,
secure=secure, **extra)
def delete(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a DELETE request."
return self.generic('DELETE', path, data, content_type,
secure=secure, **extra)
def generic(self, method, path, data='',
content_type='application/octet-stream', secure=False,
**extra):
"""Constructs an arbitrary HTTP request."""
parsed = urlparse(path)
data = force_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'REQUEST_METHOD': str(method),
'SERVER_PORT': str('443') if secure else str('80'),
'wsgi.url_scheme': str('https') if secure else str('http'),
}
if data:
r.update({
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': str(content_type),
'wsgi.input': FakePayload(data),
})
r.update(extra)
# If QUERY_STRING is absent or empty, we want to extract it from the URL.
if not r.get('QUERY_STRING'):
query_string = force_bytes(parsed[4])
# WSGI requires latin-1 encoded strings. See get_path_info().
if six.PY3:
query_string = query_string.decode('iso-8859-1')
r['QUERY_STRING'] = query_string
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super(Client, self).__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.exc_info = None
def store_exc_info(self, **kwargs):
"""
Stores exceptions when they are generated by a view.
"""
self.exc_info = sys.exc_info()
def _session(self):
"""
Obtains the current session variables.
"""
if apps.is_installed('django.contrib.sessions'):
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME, None)
if cookie:
return engine.SessionStore(cookie.value)
else:
s = engine.SessionStore()
s.save()
self.cookies[settings.SESSION_COOKIE_NAME] = s.session_key
return s
return {}
session = property(_session)
def request(self, **request):
"""
The master request method. Composes the environment dictionary
and passes to the handler, returning the result of the handler.
Assumes defaults for the query environment, which can be overridden
using the arguments to the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = curry(store_rendered_templates, data)
signal_uid = "template-render-%s" % id(request)
signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid)
# Capture exceptions created by the handler.
got_request_exception.connect(self.store_exc_info, dispatch_uid="request-exception")
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist as e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
exc_info = self.exc_info
self.exc_info = None
six.reraise(*exc_info)
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
# Attach the ResolverMatch instance to the response
response.resolver_match = SimpleLazyObject(
lambda: urlresolvers.resolve(request['PATH_INFO']))
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid=signal_uid)
got_request_exception.disconnect(dispatch_uid="request-exception")
def get(self, path, data=None, follow=False, secure=False, **extra):
"""
Requests a response from the server using GET.
"""
response = super(Client, self).get(path, data=data, secure=secure,
**extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
follow=False, secure=False, **extra):
"""
Requests a response from the server using POST.
"""
response = super(Client, self).post(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def head(self, path, data=None, follow=False, secure=False, **extra):
"""
Request a response from the server using HEAD.
"""
response = super(Client, self).head(path, data=data, secure=secure,
**extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def options(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Request a response from the server using OPTIONS.
"""
response = super(Client, self).options(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def put(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a resource to the server using PUT.
"""
response = super(Client, self).put(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def patch(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a resource to the server using PATCH.
"""
response = super(Client, self).patch(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def delete(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a DELETE request to the server.
"""
response = super(Client, self).delete(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def trace(self, path, data='', follow=False, secure=False, **extra):
"""
Send a TRACE request to the server.
"""
response = super(Client, self).trace(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def login(self, **credentials):
"""
Sets the Factory to appear as if it has successfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect, or the user is inactive, or if the sessions framework is
not available.
"""
from django.contrib.auth import authenticate, login
user = authenticate(**credentials)
if (user and user.is_active and
apps.is_installed('django.contrib.sessions')):
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
return True
else:
return False
def logout(self):
"""
Removes the authenticated user's cookies and session object.
Causes the authenticated user to be logged out.
"""
from django.contrib.auth import get_user, logout
request = HttpRequest()
engine = import_module(settings.SESSION_ENGINE)
if self.session:
request.session = self.session
request.user = get_user(request)
else:
request.session = engine.SessionStore()
logout(request)
self.cookies = SimpleCookie()
def _handle_redirects(self, response, **extra):
"Follows any redirects by requesting responses from the server using GET."
response.redirect_chain = []
while response.status_code in (301, 302, 303, 307):
url = response.url
redirect_chain = response.redirect_chain
redirect_chain.append((url, response.status_code))
url = urlsplit(url)
if url.scheme:
extra['wsgi.url_scheme'] = url.scheme
if url.hostname:
extra['SERVER_NAME'] = url.hostname
if url.port:
extra['SERVER_PORT'] = str(url.port)
response = self.get(url.path, QueryDict(url.query), follow=False, **extra)
response.redirect_chain = redirect_chain
# Prevent loops
if response.redirect_chain[-1] in response.redirect_chain[0:-1]:
break
return response
| bsd-3-clause |
fafaman/django | tests/select_related_regress/models.py | 282 | 3677 | from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Building(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return "Building: %s" % self.name
@python_2_unicode_compatible
class Device(models.Model):
building = models.ForeignKey('Building', models.CASCADE)
name = models.CharField(max_length=10)
def __str__(self):
return "device '%s' in building %s" % (self.name, self.building)
@python_2_unicode_compatible
class Port(models.Model):
device = models.ForeignKey('Device', models.CASCADE)
port_number = models.CharField(max_length=10)
def __str__(self):
return "%s/%s" % (self.device.name, self.port_number)
@python_2_unicode_compatible
class Connection(models.Model):
start = models.ForeignKey(
Port,
models.CASCADE,
related_name='connection_start',
unique=True,
)
end = models.ForeignKey(
Port,
models.CASCADE,
related_name='connection_end',
unique=True,
)
def __str__(self):
return "%s to %s" % (self.start, self.end)
# Another non-tree hierarchy that exercises code paths similar to the above
# example, but in a slightly different configuration.
class TUser(models.Model):
name = models.CharField(max_length=200)
class Person(models.Model):
user = models.ForeignKey(TUser, models.CASCADE, unique=True)
class Organizer(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
class Student(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
class Class(models.Model):
org = models.ForeignKey(Organizer, models.CASCADE)
class Enrollment(models.Model):
std = models.ForeignKey(Student, models.CASCADE)
cls = models.ForeignKey(Class, models.CASCADE)
# Models for testing bug #8036.
class Country(models.Model):
name = models.CharField(max_length=50)
class State(models.Model):
name = models.CharField(max_length=50)
country = models.ForeignKey(Country, models.CASCADE)
class ClientStatus(models.Model):
name = models.CharField(max_length=50)
class Client(models.Model):
name = models.CharField(max_length=50)
state = models.ForeignKey(State, models.SET_NULL, null=True)
status = models.ForeignKey(ClientStatus, models.CASCADE)
class SpecialClient(Client):
value = models.IntegerField()
# Some model inheritance exercises
@python_2_unicode_compatible
class Parent(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
class Child(Parent):
value = models.IntegerField()
@python_2_unicode_compatible
class Item(models.Model):
name = models.CharField(max_length=10)
child = models.ForeignKey(Child, models.SET_NULL, null=True)
def __str__(self):
return self.name
# Models for testing bug #19870.
@python_2_unicode_compatible
class Fowl(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
class Hen(Fowl):
pass
class Chick(Fowl):
mother = models.ForeignKey(Hen, models.CASCADE)
class Base(models.Model):
name = models.CharField(max_length=10)
lots_of_text = models.TextField()
class Meta:
abstract = True
class A(Base):
a_field = models.CharField(max_length=10)
class B(Base):
b_field = models.CharField(max_length=10)
class C(Base):
c_a = models.ForeignKey(A, models.CASCADE)
c_b = models.ForeignKey(B, models.CASCADE)
is_published = models.BooleanField(default=False)
| bsd-3-clause |
chrrrles/ansible | lib/ansible/plugins/cache/memcached.py | 193 | 6097 | # (c) 2014, Brian Coca, Josh Drake, et al
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import os
import sys
import time
import threading
from itertools import chain
from ansible import constants as C
from ansible.plugins.cache.base import BaseCacheModule
try:
import memcache
except ImportError:
print('python-memcached is required for the memcached fact cache')
sys.exit(1)
class ProxyClientPool(object):
"""
Memcached connection pooling for thread/fork safety. Inspired by py-redis
connection pool.
Available connections are maintained in a deque and released in a FIFO manner.
"""
def __init__(self, *args, **kwargs):
self.max_connections = kwargs.pop('max_connections', 1024)
self.connection_args = args
self.connection_kwargs = kwargs
self.reset()
def reset(self):
self.pid = os.getpid()
self._num_connections = 0
self._available_connections = collections.deque(maxlen=self.max_connections)
self._locked_connections = set()
self._lock = threading.Lock()
def _check_safe(self):
if self.pid != os.getpid():
with self._lock:
if self.pid == os.getpid():
# bail out - another thread already acquired the lock
return
self.disconnect_all()
self.reset()
def get_connection(self):
self._check_safe()
try:
connection = self._available_connections.popleft()
except IndexError:
connection = self.create_connection()
self._locked_connections.add(connection)
return connection
def create_connection(self):
if self._num_connections >= self.max_connections:
raise RuntimeError("Too many memcached connections")
self._num_connections += 1
return memcache.Client(*self.connection_args, **self.connection_kwargs)
def release_connection(self, connection):
self._check_safe()
self._locked_connections.remove(connection)
self._available_connections.append(connection)
def disconnect_all(self):
for conn in chain(self._available_connections, self._locked_connections):
conn.disconnect_all()
def __getattr__(self, name):
def wrapped(*args, **kwargs):
return self._proxy_client(name, *args, **kwargs)
return wrapped
def _proxy_client(self, name, *args, **kwargs):
conn = self.get_connection()
try:
return getattr(conn, name)(*args, **kwargs)
finally:
self.release_connection(conn)
class CacheModuleKeys(collections.MutableSet):
"""
A set subclass that keeps track of insertion time and persists
the set in memcached.
"""
PREFIX = 'ansible_cache_keys'
def __init__(self, cache, *args, **kwargs):
self._cache = cache
self._keyset = dict(*args, **kwargs)
def __contains__(self, key):
return key in self._keyset
def __iter__(self):
return iter(self._keyset)
def __len__(self):
return len(self._keyset)
def add(self, key):
self._keyset[key] = time.time()
self._cache.set(self.PREFIX, self._keyset)
def discard(self, key):
del self._keyset[key]
self._cache.set(self.PREFIX, self._keyset)
def remove_by_timerange(self, s_min, s_max):
for k in self._keyset.keys():
t = self._keyset[k]
if s_min < t < s_max:
del self._keyset[k]
self._cache.set(self.PREFIX, self._keyset)
class CacheModule(BaseCacheModule):
def __init__(self, *args, **kwargs):
if C.CACHE_PLUGIN_CONNECTION:
connection = C.CACHE_PLUGIN_CONNECTION.split(',')
else:
connection = ['127.0.0.1:11211']
self._timeout = C.CACHE_PLUGIN_TIMEOUT
self._prefix = C.CACHE_PLUGIN_PREFIX
self._cache = ProxyClientPool(connection, debug=0)
self._keys = CacheModuleKeys(self._cache, self._cache.get(CacheModuleKeys.PREFIX) or [])
def _make_key(self, key):
return "{0}{1}".format(self._prefix, key)
def _expire_keys(self):
if self._timeout > 0:
expiry_age = time.time() - self._timeout
self._keys.remove_by_timerange(0, expiry_age)
def get(self, key):
value = self._cache.get(self._make_key(key))
# guard against the key not being removed from the keyset;
# this could happen in cases where the timeout value is changed
# between invocations
if value is None:
self.delete(key)
raise KeyError
return value
def set(self, key, value):
self._cache.set(self._make_key(key), value, time=self._timeout, min_compress_len=1)
self._keys.add(key)
def keys(self):
self._expire_keys()
return list(iter(self._keys))
def contains(self, key):
self._expire_keys()
return key in self._keys
def delete(self, key):
self._cache.delete(self._make_key(key))
self._keys.discard(key)
def flush(self):
for key in self.keys():
self.delete(key)
def copy(self):
return self._keys.copy()
def __getstate__(self):
return dict()
def __setstate__(self, data):
self.__init__()
| gpl-3.0 |
qk4l/Flexget | flexget/plugins/filter/pending_approval.py | 5 | 4957 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from datetime import datetime, timedelta
from flexget import db_schema, plugin
from flexget.event import event
from flexget.manager import Session
from flexget.utils.database import entry_synonym
from sqlalchemy import Column, String, Unicode, Boolean, Integer, DateTime
log = logging.getLogger('pending_approval')
Base = db_schema.versioned_base('pending_approval', 0)
class PendingEntry(Base):
__tablename__ = 'pending_entries'
id = Column(Integer, primary_key=True, autoincrement=True, nullable=False)
task_name = Column(Unicode)
title = Column(Unicode)
url = Column(String)
approved = Column(Boolean)
_json = Column('json', Unicode)
entry = entry_synonym('_json')
added = Column(DateTime, default=datetime.now)
def __init__(self, task_name, entry):
self.task_name = task_name
self.title = entry['title']
self.url = entry['url']
self.approved = False
self.entry = entry
def __repr__(self):
return '<PendingEntry(task_name={},title={},url={},approved={})>' \
.format(self.task_name, self.title, self.url, self.approved)
def to_dict(self):
return {
'id': self.id,
'task_name': self.task_name,
'title': self.title,
'url': self.url,
'approved': self.approved,
'added': self.added
}
class PendingApproval(object):
schema = {'type': 'boolean',
'deprecated': 'pending_approval is deprecated, switch to using pending_list'}
@staticmethod
def _item_query(entry, task, session):
return session.query(PendingEntry) \
.filter(PendingEntry.task_name == task.name) \
.filter(PendingEntry.title == entry['title']) \
.filter(PendingEntry.url == entry['url']) \
.first()
def on_task_input(self, task, config):
if not config:
return
approved_entries = []
with Session() as session:
for approved_entry in session.query(PendingEntry) \
.filter(PendingEntry.task_name == task.name) \
.filter(PendingEntry.approved == True) \
.all():
e = approved_entry.entry
e['approved'] = True
e['immortal'] = True
approved_entries.append(e)
return approved_entries
# Run after all other filters
@plugin.priority(-255)
def on_task_filter(self, task, config):
if not config:
return
with Session() as session:
for entry in task.entries:
# Cache all new task entries
if entry.get('approved'):
entry.accept('entry is marked as approved')
elif not self._item_query(entry, task, session):
log.verbose('creating new pending entry %s', entry)
session.add(PendingEntry(task_name=task.name, entry=entry))
entry.reject('new unapproved entry, caching and waiting for approval')
def on_task_learn(self, task, config):
if not config:
return
with Session() as session:
# Delete all accepted entries that have passed the pending phase
for entry in task.accepted:
if entry.get('approved'):
db_entry = self._item_query(entry, task, session)
if db_entry and db_entry.approved:
log.debug('deleting approved entry %s', db_entry)
session.delete(db_entry)
@event('manager.db_cleanup')
def db_cleanup(manager, session):
# Clean unapproved entries older than 1 year
deleted = session.query(PendingEntry).filter(PendingEntry.added < datetime.now() - timedelta(days=365)).delete()
if deleted:
log.info('Purged %i pending entries older than 1 year', deleted)
def list_pending_entries(session, task_name=None, approved=None, start=None, stop=None, sort_by='added',
descending=True):
log.debug('querying pending entries')
query = session.query(PendingEntry)
if task_name:
query = query.filter(PendingEntry.task_name == task_name)
if approved is not None:
query = query.filter(PendingEntry.approved == approved)
if descending:
query = query.order_by(getattr(PendingEntry, sort_by).desc())
else:
query = query.order_by(getattr(PendingEntry, sort_by))
return query.slice(start, stop).all()
def get_entry_by_id(session, entry_id):
return session.query(PendingEntry).filter(PendingEntry.id == entry_id).one()
@event('plugin.register')
def register_plugin():
plugin.register(PendingApproval, 'pending_approval', api_ver=2)
| mit |
40223137/2015abc | static/Brython3.1.1-20150328-091302/Lib/unittest/util.py | 794 | 4157 | """Various utility functions."""
from collections import namedtuple, OrderedDict
__unittest = True
_MAX_LENGTH = 80
def safe_repr(obj, short=False):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
def strclass(cls):
return "%s.%s" % (cls.__module__, cls.__name__)
def sorted_list_difference(expected, actual):
"""Finds elements in only one or the other of two, sorted input lists.
Returns a two-element tuple of lists. The first list contains those
elements in the "expected" list but not in the "actual" list, and the
second contains those elements in the "actual" list but not in the
"expected" list. Duplicate elements in either input list are ignored.
"""
i = j = 0
missing = []
unexpected = []
while True:
try:
e = expected[i]
a = actual[j]
if e < a:
missing.append(e)
i += 1
while expected[i] == e:
i += 1
elif e > a:
unexpected.append(a)
j += 1
while actual[j] == a:
j += 1
else:
i += 1
try:
while expected[i] == e:
i += 1
finally:
j += 1
while actual[j] == a:
j += 1
except IndexError:
missing.extend(expected[i:])
unexpected.extend(actual[j:])
break
return missing, unexpected
def unorderable_list_difference(expected, actual):
"""Same behavior as sorted_list_difference but
for lists of unorderable items (like dicts).
As it does a linear search per item (remove) it
has O(n*n) performance."""
missing = []
while expected:
item = expected.pop()
try:
actual.remove(item)
except ValueError:
missing.append(item)
# anything left in actual is unexpected
return missing, actual
def three_way_cmp(x, y):
"""Return -1 if x < y, 0 if x == y and 1 if x > y"""
return (x > y) - (x < y)
_Mismatch = namedtuple('Mismatch', 'actual expected value')
def _count_diff_all_purpose(actual, expected):
'Returns list of (cnt_act, cnt_exp, elem) triples where the counts differ'
# elements need not be hashable
s, t = list(actual), list(expected)
m, n = len(s), len(t)
NULL = object()
result = []
for i, elem in enumerate(s):
if elem is NULL:
continue
cnt_s = cnt_t = 0
for j in range(i, m):
if s[j] == elem:
cnt_s += 1
s[j] = NULL
for j, other_elem in enumerate(t):
if other_elem == elem:
cnt_t += 1
t[j] = NULL
if cnt_s != cnt_t:
diff = _Mismatch(cnt_s, cnt_t, elem)
result.append(diff)
for i, elem in enumerate(t):
if elem is NULL:
continue
cnt_t = 0
for j in range(i, n):
if t[j] == elem:
cnt_t += 1
t[j] = NULL
diff = _Mismatch(0, cnt_t, elem)
result.append(diff)
return result
def _ordered_count(iterable):
'Return dict of element counts, in the order they were first seen'
c = OrderedDict()
for elem in iterable:
c[elem] = c.get(elem, 0) + 1
return c
def _count_diff_hashable(actual, expected):
'Returns list of (cnt_act, cnt_exp, elem) triples where the counts differ'
# elements must be hashable
s, t = _ordered_count(actual), _ordered_count(expected)
result = []
for elem, cnt_s in s.items():
cnt_t = t.get(elem, 0)
if cnt_s != cnt_t:
diff = _Mismatch(cnt_s, cnt_t, elem)
result.append(diff)
for elem, cnt_t in t.items():
if elem not in s:
diff = _Mismatch(0, cnt_t, elem)
result.append(diff)
return result
| gpl-3.0 |
karimbahgat/GeoVis | geovis/colour.py | 4 | 26953 | # -*- coding: utf-8 -*-
"""Color Library
.. :doctest:
This module defines several color formats that can be converted to one or
another.
Formats
-------
HSL:
3-uple of Hue, Saturation, Value all between 0.0 and 1.0
RGB:
3-uple of Red, Green, Blue all between 0.0 and 1.0
HEX:
string object beginning with '#' and with red, green, blue value.
This format accept color in 3 or 6 value ex: '#fff' or '#ffffff'
WEB:
string object that defaults to HEX representation or human if possible
Usage
-----
Several function exists to convert from one format to another. But all
function are not written. So the best way is to use the object Color.
Please see the documentation of this object for more information.
.. note:: Some constants are defined for convenience in HSL, RGB, HEX
"""
from __future__ import with_statement, print_function
import hashlib
import re
##
## Some Constants
##
## Soften inequalities and some rounding issue based on float
FLOAT_ERROR = 0.0000005
RGB_TO_COLOR_NAMES = {
(0, 0, 0): ['Black'],
(0, 0, 128): ['Navy', 'NavyBlue'],
(0, 0, 139): ['DarkBlue'],
(0, 0, 205): ['MediumBlue'],
(0, 0, 255): ['Blue'],
(0, 100, 0): ['DarkGreen'],
(0, 128, 0): ['Green'],
(0, 139, 139): ['DarkCyan'],
(0, 191, 255): ['DeepSkyBlue'],
(0, 206, 209): ['DarkTurquoise'],
(0, 250, 154): ['MediumSpringGreen'],
(0, 255, 0): ['Lime'],
(0, 255, 127): ['SpringGreen'],
(0, 255, 255): ['Cyan', 'Aqua'],
(25, 25, 112): ['MidnightBlue'],
(30, 144, 255): ['DodgerBlue'],
(32, 178, 170): ['LightSeaGreen'],
(34, 139, 34): ['ForestGreen'],
(46, 139, 87): ['SeaGreen'],
(47, 79, 79): ['DarkSlateGray', 'DarkSlateGrey'],
(50, 205, 50): ['LimeGreen'],
(60, 179, 113): ['MediumSeaGreen'],
(64, 224, 208): ['Turquoise'],
(65, 105, 225): ['RoyalBlue'],
(70, 130, 180): ['SteelBlue'],
(72, 61, 139): ['DarkSlateBlue'],
(72, 209, 204): ['MediumTurquoise'],
(75, 0, 130): ['Indigo'],
(85, 107, 47): ['DarkOliveGreen'],
(95, 158, 160): ['CadetBlue'],
(100, 149, 237): ['CornflowerBlue'],
(102, 205, 170): ['MediumAquamarine'],
(105, 105, 105): ['DimGray', 'DimGrey'],
(106, 90, 205): ['SlateBlue'],
(107, 142, 35): ['OliveDrab'],
(112, 128, 144): ['SlateGray', 'SlateGrey'],
(119, 136, 153): ['LightSlateGray', 'LightSlateGrey'],
(123, 104, 238): ['MediumSlateBlue'],
(124, 252, 0): ['LawnGreen'],
(127, 255, 0): ['Chartreuse'],
(127, 255, 212): ['Aquamarine'],
(128, 0, 0): ['Maroon'],
(128, 0, 128): ['Purple'],
(128, 128, 0): ['Olive'],
(128, 128, 128): ['Gray', 'Grey'],
(132, 112, 255): ['LightSlateBlue'],
(135, 206, 235): ['SkyBlue'],
(135, 206, 250): ['LightSkyBlue'],
(138, 43, 226): ['BlueViolet'],
(139, 0, 0): ['DarkRed'],
(139, 0, 139): ['DarkMagenta'],
(139, 69, 19): ['SaddleBrown'],
(143, 188, 143): ['DarkSeaGreen'],
(144, 238, 144): ['LightGreen'],
(147, 112, 219): ['MediumPurple'],
(148, 0, 211): ['DarkViolet'],
(152, 251, 152): ['PaleGreen'],
(153, 50, 204): ['DarkOrchid'],
(154, 205, 50): ['YellowGreen'],
(160, 82, 45): ['Sienna'],
(165, 42, 42): ['Brown'],
(169, 169, 169): ['DarkGray', 'DarkGrey'],
(173, 216, 230): ['LightBlue'],
(173, 255, 47): ['GreenYellow'],
(175, 238, 238): ['PaleTurquoise'],
(176, 196, 222): ['LightSteelBlue'],
(176, 224, 230): ['PowderBlue'],
(178, 34, 34): ['Firebrick'],
(184, 134, 11): ['DarkGoldenrod'],
(186, 85, 211): ['MediumOrchid'],
(188, 143, 143): ['RosyBrown'],
(189, 183, 107): ['DarkKhaki'],
(192, 192, 192): ['Silver'],
(199, 21, 133): ['MediumVioletRed'],
(205, 92, 92): ['IndianRed'],
(205, 133, 63): ['Peru'],
(208, 32, 144): ['VioletRed'],
(210, 105, 30): ['Chocolate'],
(210, 180, 140): ['Tan'],
(211, 211, 211): ['LightGray', 'LightGrey'],
(216, 191, 216): ['Thistle'],
(218, 112, 214): ['Orchid'],
(218, 165, 32): ['Goldenrod'],
(219, 112, 147): ['PaleVioletRed'],
(220, 20, 60): ['Crimson'],
(220, 220, 220): ['Gainsboro'],
(221, 160, 221): ['Plum'],
(222, 184, 135): ['Burlywood'],
(224, 255, 255): ['LightCyan'],
(230, 230, 250): ['Lavender'],
(233, 150, 122): ['DarkSalmon'],
(238, 130, 238): ['Violet'],
(238, 221, 130): ['LightGoldenrod'],
(238, 232, 170): ['PaleGoldenrod'],
(240, 128, 128): ['LightCoral'],
(240, 230, 140): ['Khaki'],
(240, 248, 255): ['AliceBlue'],
(240, 255, 240): ['Honeydew'],
(240, 255, 255): ['Azure'],
(244, 164, 96): ['SandyBrown'],
(245, 222, 179): ['Wheat'],
(245, 245, 220): ['Beige'],
(245, 245, 245): ['WhiteSmoke'],
(245, 255, 250): ['MintCream'],
(248, 248, 255): ['GhostWhite'],
(250, 128, 114): ['Salmon'],
(250, 235, 215): ['AntiqueWhite'],
(250, 240, 230): ['Linen'],
(250, 250, 210): ['LightGoldenrodYellow'],
(253, 245, 230): ['OldLace'],
(255, 0, 0): ['Red'],
(255, 0, 255): ['Magenta', 'Fuchsia'],
(255, 20, 147): ['DeepPink'],
(255, 69, 0): ['OrangeRed'],
(255, 99, 71): ['Tomato'],
(255, 105, 180): ['HotPink'],
(255, 127, 80): ['Coral'],
(255, 140, 0): ['DarkOrange'],
(255, 160, 122): ['LightSalmon'],
(255, 165, 0): ['Orange'],
(255, 182, 193): ['LightPink'],
(255, 192, 203): ['Pink'],
(255, 215, 0): ['Gold'],
(255, 218, 185): ['PeachPuff'],
(255, 222, 173): ['NavajoWhite'],
(255, 228, 181): ['Moccasin'],
(255, 228, 196): ['Bisque'],
(255, 228, 225): ['MistyRose'],
(255, 235, 205): ['BlanchedAlmond'],
(255, 239, 213): ['PapayaWhip'],
(255, 240, 245): ['LavenderBlush'],
(255, 245, 238): ['Seashell'],
(255, 248, 220): ['Cornsilk'],
(255, 250, 205): ['LemonChiffon'],
(255, 250, 240): ['FloralWhite'],
(255, 250, 250): ['Snow'],
(255, 255, 0): ['Yellow'],
(255, 255, 224): ['LightYellow'],
(255, 255, 240): ['Ivory'],
(255, 255, 255): ['White']
}
## Building inverse relation
COLOR_NAME_TO_RGB = dict(
(name.lower(), rgb)
for rgb, names in RGB_TO_COLOR_NAMES.items()
for name in names)
LONG_HEX_COLOR = re.compile(r'^#[0-9a-fA-F]{6}$')
SHORT_HEX_COLOR = re.compile(r'^#[0-9a-fA-F]{3}$')
class HSL:
BLACK = (0.0 , 0.0, 0.0)
WHITE = (0.0 , 0.0, 1.0)
RED = (0.0 , 1.0, 0.5)
GREEN = (1.0/3, 1.0, 0.5)
BLUE = (2.0/3, 1.0, 0.5)
GRAY = (0.0 , 0.0, 0.5)
class C_RGB:
"""RGB colors container
Provides a quick color access.
>>> from colour import RGB
>>> RGB.WHITE
(1.0, 1.0, 1.0)
>>> RGB.BLUE
(0.0, 0.0, 1.0)
>>> RGB.DONOTEXISTS # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: ... has no attribute 'DONOTEXISTS'
"""
def __getattr__(self, value):
return hsl2rgb(getattr(HSL, value))
class C_HEX:
"""RGB colors container
Provides a quick color access.
>>> from colour import HEX
>>> HEX.WHITE
'#fff'
>>> HEX.BLUE
'#00f'
>>> HEX.DONOTEXISTS # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: ... has no attribute 'DONOTEXISTS'
"""
def __getattr__(self, value):
return rgb2hex(getattr(RGB, value))
RGB = C_RGB()
HEX = C_HEX()
##
## Convertion function
##
def hsl2rgb(hsl):
"""Convert HSL representation towards RGB
:param h: Hue, position around the chromatic circle (h=1 equiv h=0)
:param s: Saturation, color saturation (0=full gray, 1=full color)
:param l: Ligthness, Overhaul lightness (0=full black, 1=full white)
:rtype: 3-uple for RGB values in float between 0 and 1
Hue, Saturation, Range from Lightness is a float between 0 and 1
Note that Hue can be set to any value but as it is a rotation
around the chromatic circle, any value above 1 or below 0 can
be expressed by a value between 0 and 1 (Note that h=0 is equiv
to h=1).
This algorithm came from:
http://www.easyrgb.com/index.php?X=MATH&H=19#text19
Here are some quick notion of HSL to RGB convertion:
>>> from colour import hsl2rgb
With a lightness put at 0, RGB is always rgbblack
>>> hsl2rgb((0.0, 0.0, 0.0))
(0.0, 0.0, 0.0)
>>> hsl2rgb((0.5, 0.0, 0.0))
(0.0, 0.0, 0.0)
>>> hsl2rgb((0.5, 0.5, 0.0))
(0.0, 0.0, 0.0)
Same for lightness put at 1, RGB is always rgbwhite
>>> hsl2rgb((0.0, 0.0, 1.0))
(1.0, 1.0, 1.0)
>>> hsl2rgb((0.5, 0.0, 1.0))
(1.0, 1.0, 1.0)
>>> hsl2rgb((0.5, 0.5, 1.0))
(1.0, 1.0, 1.0)
With saturation put at 0, the RGB should be equal to Lightness:
>>> hsl2rgb((0.0, 0.0, 0.25))
(0.25, 0.25, 0.25)
>>> hsl2rgb((0.5, 0.0, 0.5))
(0.5, 0.5, 0.5)
>>> hsl2rgb((0.5, 0.0, 0.75))
(0.75, 0.75, 0.75)
With saturation put at 1, and lightness put to 0.5, we can find
normal full red, green, blue colors:
>>> hsl2rgb((0 , 1.0, 0.5))
(1.0, 0.0, 0.0)
>>> hsl2rgb((1 , 1.0, 0.5))
(1.0, 0.0, 0.0)
>>> hsl2rgb((1.0/3 , 1.0, 0.5))
(0.0, 1.0, 0.0)
>>> hsl2rgb((2.0/3 , 1.0, 0.5))
(0.0, 0.0, 1.0)
Of course:
>>> hsl2rgb((0.0, 2.0, 0.5)) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Saturation must be between 0 and 1.
And:
>>> hsl2rgb((0.0, 0.0, 1.5)) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Lightness must be between 0 and 1.
"""
h, s, l = [float(v) for v in hsl]
if not (0.0 - FLOAT_ERROR <= s <= 1.0 + FLOAT_ERROR):
raise ValueError("Saturation must be between 0 and 1.")
if not (0.0 - FLOAT_ERROR <= l <= 1.0 + FLOAT_ERROR):
raise ValueError("Lightness must be between 0 and 1.")
if s == 0:
return l, l, l
if l < 0.5:
v2 = l * (1.0 + s)
else:
v2 = (l + s) - (s * l)
v1 = 2.0 * l - v2
r = _hue2rgb(v1, v2, h + (1.0 / 3))
g = _hue2rgb(v1, v2, h)
b = _hue2rgb(v1, v2, h - (1.0 / 3))
return r, g, b
def rgb2hsl(rgb):
"""Convert RGB representation towards HSL
:param r: Red amount (float between 0 and 1)
:param g: Green amount (float between 0 and 1)
:param b: Blue amount (float between 0 and 1)
:rtype: 3-uple for HSL values in float between 0 and 1
This algorithm came from:
http://www.easyrgb.com/index.php?X=MATH&H=19#text19
Here are some quick notion of RGB to HSL convertion:
>>> from colour import rgb2hsl
Note that if red amount is equal to green and blue, then you
should have a gray value (from black to white).
>>> rgb2hsl((1.0, 1.0, 1.0)) # doctest: +ELLIPSIS
(..., 0.0, 1.0)
>>> rgb2hsl((0.5, 0.5, 0.5)) # doctest: +ELLIPSIS
(..., 0.0, 0.5)
>>> rgb2hsl((0.0, 0.0, 0.0)) # doctest: +ELLIPSIS
(..., 0.0, 0.0)
If only one color is different from the others, it defines the
direct Hue:
>>> rgb2hsl((0.5, 0.5, 1.0)) # doctest: +ELLIPSIS
(0.66..., 1.0, 0.75)
>>> rgb2hsl((0.2, 0.1, 0.1)) # doctest: +ELLIPSIS
(0.0, 0.33..., 0.15...)
Having only one value set, you can check that:
>>> rgb2hsl((1.0, 0.0, 0.0))
(0.0, 1.0, 0.5)
>>> rgb2hsl((0.0, 1.0, 0.0)) # doctest: +ELLIPSIS
(0.33..., 1.0, 0.5)
>>> rgb2hsl((0.0, 0.0, 1.0)) # doctest: +ELLIPSIS
(0.66..., 1.0, 0.5)
Of course:
>>> rgb2hsl((0.0, 2.0, 0.5)) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Green must be between 0 and 1. You provided 2.0.
And:
>>> rgb2hsl((0.0, 0.0, 1.5)) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Blue must be between 0 and 1. You provided 1.5.
"""
r, g, b = [float(v) for v in rgb]
for name, v in {'Red': r, 'Green': g, 'Blue': b}.items():
if not (0 - FLOAT_ERROR <= v <= 1 + FLOAT_ERROR):
raise ValueError("%s must be between 0 and 1. You provided %r."
% (name, v))
vmin = min(r, g, b) ## Min. value of RGB
vmax = max(r, g, b) ## Max. value of RGB
diff = vmax - vmin ## Delta RGB value
vsum = vmin + vmax
l = vsum / 2
if diff == 0.0: ## This is a gray, no chroma...
return (0.0, 0.0, l)
##
## Chromatic data...
##
## Saturation
if l < 0.5:
s = diff / vsum
else:
s = diff / (2.0 - vsum)
dr = (((vmax - r) / 6) + (diff / 2)) / diff
dg = (((vmax - g) / 6) + (diff / 2)) / diff
db = (((vmax - b) / 6) + (diff / 2)) / diff
if r == vmax:
h = db - dg
elif g == vmax:
h = (1.0 / 3) + dr - db
elif b == vmax:
h = (2.0 / 3) + dg - dr
if h < 0: h += 1
if h > 1: h -= 1
return (h, s, l)
def _hue2rgb(v1, v2, vH):
"""Private helper function (Do not call directly)
:param vH: rotation around the chromatic circle (between 0..1)
"""
while vH < 0: vH += 1
while vH > 1: vH -= 1
if 6 * vH < 1: return v1 + (v2 - v1) * 6 * vH
if 2 * vH < 1: return v2
if 3 * vH < 2: return v1 + (v2 - v1) * ((2.0 / 3) - vH) * 6
return v1
def rgb2hex(rgb, force_long=False):
"""Transform RGB tuple to hex RGB representation
:param rgb: RGB 3-uple of float between 0 and 1
:rtype: 3 hex char or 6 hex char string representation
Usage
-----
>>> from colour import rgb2hex
>>> rgb2hex((0.0,1.0,0.0))
'#0f0'
Rounding try to be as natural as possible:
>>> rgb2hex((0.0,0.999999,1.0))
'#0ff'
And if not possible, the 6 hex char representation is used:
>>> rgb2hex((0.23,1.0,1.0))
'#3bffff'
>>> rgb2hex((0.0,0.999999,1.0), force_long=True)
'#00ffff'
"""
hx = '#' + ''.join(["%02x" % int(c*255 + 0.5 - FLOAT_ERROR) for c in rgb])
if force_long == False and \
hx[1] == hx[2] and \
hx[3] == hx[4] and \
hx[5] == hx[6]:
return '#' + hx[1] + hx[3] + hx[5]
return hx
def hex2rgb(str_rgb):
"""Transform hex RGB representation to RGB tuple
:param str_rgb: 3 hex char or 6 hex char string representation
:rtype: RGB 3-uple of float between 0 and 1
>>> from colour import hex2rgb
>>> hex2rgb('#00ff00')
(0.0, 1.0, 0.0)
>>> hex2rgb('#0f0')
(0.0, 1.0, 0.0)
>>> hex2rgb('#aaa') # doctest: +ELLIPSIS
(0.66..., 0.66..., 0.66...)
>>> hex2rgb('#aa') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Invalid value '#aa' provided for rgb color.
"""
try:
rgb = str_rgb[1:]
if len(rgb) == 6:
r, g, b = rgb[0:2], rgb[2:4], rgb[4:6]
elif len(rgb) == 3:
r, g, b = rgb[0] * 2, rgb[1] * 2, rgb[2] * 2
else:
raise ValueError()
except:
raise ValueError("Invalid value %r provided for rgb color."
% str_rgb)
return tuple([float(int(v, 16)) / 255 for v in (r, g, b)])
def hex2web(hex):
"""Converts HEX representation to WEB
:param rgb: 3 hex char or 6 hex char string representation
:rtype: web string representation (human readable if possible)
WEB representation uses X11 rgb.txt to define convertion
between RGB and english color names.
Usage
=====
>>> from colour import hex2web
>>> hex2web('#ff0000')
'red'
>>> hex2web('#aaaaaa')
'#aaa'
>>> hex2web('#abc')
'#abc'
>>> hex2web('#acacac')
'#acacac'
"""
dec_rgb = tuple(int(v * 255) for v in hex2rgb(hex))
if dec_rgb in RGB_TO_COLOR_NAMES:
## take the first one
color_name = RGB_TO_COLOR_NAMES[dec_rgb][0]
## Enforce full lowercase for single worded color name.
return color_name if len(re.sub(r"[^A-Z]", "", color_name)) > 1 \
else color_name.lower()
# Hex format is verified by hex2rgb function. And should be 3 or 6 digit
if len(hex) == 7:
if hex[1] == hex[2] and \
hex[3] == hex[4] and \
hex[5] == hex[6]:
return '#' + hex[1] + hex[3] + hex[5]
return hex
def web2hex(web, force_long=False):
"""Converts WEB representation to HEX
:param rgb: web string representation (human readable if possible)
:rtype: 3 hex char or 6 hex char string representation
WEB representation uses X11 rgb.txt to define convertion
between RGB and english color names.
Usage
=====
>>> from colour import web2hex
>>> web2hex('red')
'#f00'
>>> web2hex('#aaa')
'#aaa'
>>> web2hex('#foo') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: '#foo' is not in web format. Need 3 or 6 hex digit.
>>> web2hex('#aaa', force_long=True)
'#aaaaaa'
>>> web2hex('#aaaaaa')
'#aaaaaa'
>>> web2hex('#aaaa') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: '#aaaa' is not in web format. Need 3 or 6 hex digit.
>>> web2hex('pinky') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: 'pinky' is not a recognized color.
And color names are case insensitive:
>>> Color('RED')
<Color red>
"""
if web.startswith('#'):
if (LONG_HEX_COLOR.match(web) or
(not force_long and SHORT_HEX_COLOR.match(web))):
return web.lower()
elif SHORT_HEX_COLOR.match(web) and force_long:
return '#' + ''.join([("%s" % (t, )) * 2 for t in web[1:]])
raise AttributeError(
"%r is not in web format. Need 3 or 6 hex digit." % web)
web = web.lower()
if web not in COLOR_NAME_TO_RGB:
raise ValueError("%r is not a recognized color." % web)
## convert dec to hex:
return rgb2hex([float(int(v)) / 255 for v in COLOR_NAME_TO_RGB[web]], force_long)
def color_scale(begin_hsl, end_hsl, nb):
"""Returns a list of nb color HSL tuples between begin_hsl and end_hsl
>>> from colour import color_scale
>>> [rgb2hex(hsl2rgb(hsl)) for hsl in color_scale((0, 1, 0.5),
... (1, 1, 0.5), 3)]
['#f00', '#0f0', '#00f', '#f00']
>>> [rgb2hex(hsl2rgb(hsl))
... for hsl in color_scale((0, 0, 0),
... (0, 0, 1),
... 15)] # doctest: +ELLIPSIS
['#000', '#111', '#222', ..., '#ccc', '#ddd', '#eee', '#fff']
"""
step = tuple([float(end_hsl[i] - begin_hsl[i]) / nb for i in range(0, 3)])
def mul(step, value):
return tuple([v * value for v in step])
def add_v(step, step2):
return tuple([v + step2[i] for i, v in enumerate(step)])
return [add_v(begin_hsl, mul(step, r)) for r in range(0, nb + 1)]
##
## Color Pickers
##
def RGB_color_picker(obj):
"""Build a color representation from the string representation of an object
This allows to quickly get a color from some data, with the
additional benefit that the color will be the same as long as the
(string representation of the) data is the same::
>>> from colour import RGB_color_picker, Color
Same inputs produce the same result::
>>> RGB_color_picker("Something") == RGB_color_picker("Something")
True
... but different inputs produce different colors::
>>> RGB_color_picker("Something") != RGB_color_picker("Something else")
True
In any case, we still get a ``Color`` object::
>>> isinstance(RGB_color_picker("Something"), Color)
True
"""
## Turn the input into a by 3-dividable string. SHA-384 is good because it
## divides into 3 components of the same size, which will be used to
## represent the RGB values of the color.
digest = hashlib.sha384(str(obj).encode('utf-8')).hexdigest()
## Split the digest into 3 sub-strings of equivalent size.
subsize = int(len(digest) / 3)
splitted_digest = [digest[i * subsize: (i + 1) * subsize]
for i in range(3)]
## Convert those hexadecimal sub-strings into integer and scale them down
## to the 0..1 range.
max_value = float(int("f" * subsize, 16))
components = (
int(d, 16) ## Make a number from a list with hex digits
/ max_value ## Scale it down to [0.0, 1.0]
for d in splitted_digest)
return Color(rgb2hex(components)) ## Profit!
def hash_or_str(obj):
try:
return hash((type(obj).__name__, obj))
except TypeError:
## Adds the type name to make sure two object of different type but
## identical string representation get distinguished.
return type(obj).__name__ + str(obj)
##
## All purpose object
##
class Color(object):
"""Abstraction of a color object
Color object keeps information of a color. It can input/output to different
format (HSL, RGB, HEX, WEB) and their partial representation.
>>> from colour import Color, HSL
>>> b = Color()
>>> b.hsl = HSL.BLUE
Access values
-------------
>>> b.hue # doctest: +ELLIPSIS
0.66...
>>> b.saturation
1.0
>>> b.luminance
0.5
>>> b.red
0.0
>>> b.blue
1.0
>>> b.green
0.0
>>> b.rgb
(0.0, 0.0, 1.0)
>>> b.hsl # doctest: +ELLIPSIS
(0.66..., 1.0, 0.5)
>>> b.hex
'#00f'
Change values
-------------
Let's change Hue toward red tint:
>>> b.hue = 0.0
>>> b.hex
'#f00'
>>> b.hue = 2.0/3
>>> b.hex
'#00f'
In the other way round:
>>> b.hex = '#f00'
>>> b.hsl
(0.0, 1.0, 0.5)
Long hex can be accessed directly:
>>> b.hex_l = '#123456'
>>> b.hex_l
'#123456'
>>> b.hex
'#123456'
>>> b.hex_l = '#ff0000'
>>> b.hex_l
'#ff0000'
>>> b.hex
'#f00'
Convenience
-----------
>>> c = Color('blue')
>>> c
<Color blue>
>>> c.hue = 0
>>> c
<Color red>
>>> c.saturation = 0.0
>>> c.hsl # doctest: +ELLIPSIS
(..., 0.0, 0.5)
>>> c.rgb
(0.5, 0.5, 0.5)
>>> c.hex
'#7f7f7f'
>>> c
<Color #7f7f7f>
>>> c.luminance = 0.0
>>> c
<Color black>
>>> c.hex
'#000'
>>> c.green = 1.0
>>> c.blue = 1.0
>>> c.hex
'#0ff'
>>> c
<Color cyan>
>>> c = Color('blue', luminance=0.75)
>>> c
<Color #7f7fff>
>>> c = Color('red', red=0.5)
>>> c
<Color #7f0000>
>>> print(c)
#7f0000
You can try to query unexisting attributes:
>>> c.lightness # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: 'lightness' not found
TODO: could add HSV, CMYK, YUV conversion.
# >>> b.hsv
# >>> b.value
# >>> b.cyan
# >>> b.magenta
# >>> b.yellow
# >>> b.key
# >>> b.cmyk
Recursive init
--------------
To support blind convertion of web strings (or already converted object),
the Color object supports instantiation with another Color object.
>>> Color(Color(Color('red')))
<Color red>
Equality support
----------------
Default equality is RGB hex comparison:
>>> Color('red') == Color('blue')
False
But this can be changed:
>>> saturation_equality = lambda c1, c2: c1.luminance == c2.luminance
>>> Color('red', equality=saturation_equality) == Color('blue')
True
"""
_hsl = None ## internal representation
def __init__(self, color=None,
pick_for=None, picker=RGB_color_picker, pick_key=hash_or_str,
**kwargs):
if pick_key is None:
pick_key = lambda x: x
if pick_for is not None:
color = picker(pick_key(pick_for))
if isinstance(color, Color):
self.web = color.web
else:
self.web = color if color else 'black'
self.equality = RGB_equivalence
for k, v in kwargs.items():
setattr(self, k, v)
def __getattr__(self, label):
if ('get_' + label) in self.__class__.__dict__:
return getattr(self, 'get_' + label)()
raise AttributeError("'%s' not found" % label)
def __setattr__(self, label, value):
if label not in ["_hsl", "equality"]:
fc = getattr(self, 'set_' + label)
fc(value)
else:
self.__dict__[label] = value
##
## Get
##
def get_hsl(self):
return tuple(self._hsl)
def get_hex(self):
return rgb2hex(self.rgb)
def get_hex_l(self):
return rgb2hex(self.rgb, force_long=True)
def get_rgb(self):
return hsl2rgb(self.hsl)
def get_hue(self):
return self.hsl[0]
def get_saturation(self):
return self.hsl[1]
def get_luminance(self):
return self.hsl[2]
def get_red(self):
return self.rgb[0]
def get_green(self):
return self.rgb[1]
def get_blue(self):
return self.rgb[2]
def get_web(self):
return hex2web(self.hex)
##
## Set
##
def set_hsl(self, value):
self._hsl = list(value)
def set_rgb(self, value):
self.hsl = rgb2hsl(value)
def set_hue(self, value):
self._hsl[0] = value
def set_saturation(self, value):
self._hsl[1] = value
def set_luminance(self, value):
self._hsl[2] = value
def set_red(self, value):
r, g, b = self.rgb
r = value
self.rgb = (r, g, b)
def set_green(self, value):
r, g, b = self.rgb
g = value
self.rgb = (r, g, b)
def set_blue(self, value):
r, g, b = self.rgb
b = value
self.rgb = (r, g, b)
def set_hex(self, value):
self.rgb = hex2rgb(value)
set_hex_l = set_hex
def set_web(self, value):
self.hex = web2hex(value)
## range of color generation
def range_to(self, value, steps):
for hsl in color_scale(self._hsl, Color(value).hsl, steps - 1):
yield Color(hsl=hsl)
##
## Convenience
##
def __str__(self):
return "%s" % self.web
def __repr__(self):
return "<Color %s>" % self.web
def __eq__(self, other):
return self.equality(self, other)
RGB_equivalence = lambda c1, c2: c1.hex_l == c2.hex_l
HSL_equivalence = lambda c1, c2: c1._hsl == c2._hsl
def make_color_factory(**kwargs_defaults):
def ColorFactory(*args, **kwargs):
new_kwargs = kwargs_defaults.copy()
new_kwargs.update(kwargs)
return Color(*args, **new_kwargs)
return ColorFactory
| mit |
ArthurGarnier/SickRage | lib/hachoir_parser/misc/bplist.py | 84 | 11670 | """
Apple/NeXT Binary Property List (BPLIST) parser.
Also includes a .createXML() function which produces an XML representation of the object.
Note that it will discard unknown objects, nulls and fill values, but should work for most files.
Documents:
- CFBinaryPList.c
http://src.gnu-darwin.org/DarwinSourceArchive/expanded/CF/CF-299/Parsing.subproj/CFBinaryPList.c
- ForFoundationOnly.h (for structure formats)
http://src.gnu-darwin.org/DarwinSourceArchive/expanded/CF/CF-299/Base.subproj/ForFoundationOnly.h
- XML <-> BPList converter
http://scw.us/iPhone/plutil/plutil.pl
Author: Robert Xiao
Created: 2008-09-21
"""
from hachoir_parser import HachoirParser
from hachoir_core.field import (RootSeekableFieldSet, FieldSet, Enum,
Bits, GenericInteger, Float32, Float64, UInt8, UInt64, Bytes, NullBytes, RawBytes, String)
from hachoir_core.endian import BIG_ENDIAN
from hachoir_core.text_handler import displayHandler
from hachoir_core.tools import humanDatetime
from datetime import datetime, timedelta
class BPListTrailer(FieldSet):
def createFields(self):
yield NullBytes(self, "unused", 6)
yield UInt8(self, "offsetIntSize", "Size (in bytes) of offsets in the offset table")
yield UInt8(self, "objectRefSize", "Size (in bytes) of object numbers in object references")
yield UInt64(self, "numObjects", "Number of objects in this file")
yield UInt64(self, "topObject", "Top-level object reference")
yield UInt64(self, "offsetTableOffset", "File offset to the offset table")
def createDescription(self):
return "Binary PList trailer"
class BPListOffsetTable(FieldSet):
def createFields(self):
size = self["../trailer/offsetIntSize"].value*8
for i in range(self["../trailer/numObjects"].value):
yield Bits(self, "offset[]", size)
class BPListSize(FieldSet):
def createFields(self):
yield Bits(self, "size", 4)
if self['size'].value == 0xF:
yield BPListObject(self, "fullsize")
def createValue(self):
if 'fullsize' in self:
return self['fullsize'].value
else:
return self['size'].value
class BPListObjectRef(GenericInteger):
def __init__(self, parent, name, description=None):
size = parent['/trailer/objectRefSize'].value*8
GenericInteger.__init__(self, parent, name, False, size, description)
def getRef(self):
return self.parent['/object[' + str(self.value) + ']']
def createDisplay(self):
return self.getRef().display
def createXML(self, prefix=''):
return self.getRef().createXML(prefix)
class BPListArray(FieldSet):
def __init__(self, parent, name, size, description=None):
FieldSet.__init__(self, parent, name, description=description)
self.numels = size
def createFields(self):
for i in range(self.numels):
yield BPListObjectRef(self, "ref[]")
def createValue(self):
return self.array('ref')
def createDisplay(self):
return '[' + ', '.join([x.display for x in self.value]) + ']'
def createXML(self,prefix=''):
return prefix + '<array>\n' + ''.join([x.createXML(prefix + '\t' ) + '\n' for x in self.value]) + prefix + '</array>'
class BPListDict(FieldSet):
def __init__(self, parent, name, size, description=None):
FieldSet.__init__(self, parent, name, description=description)
self.numels = size
def createFields(self):
for i in range(self.numels):
yield BPListObjectRef(self, "keyref[]")
for i in range(self.numels):
yield BPListObjectRef(self, "valref[]")
def createValue(self):
return zip(self.array('keyref'),self.array('valref'))
def createDisplay(self):
return '{' + ', '.join(['%s: %s'%(k.display,v.display) for k,v in self.value]) + '}'
def createXML(self, prefix=''):
return prefix + '<dict>\n' + ''.join(['%s\t<key>%s</key>\n%s\n'%(prefix,k.getRef().value.encode('utf-8'),v.createXML(prefix + '\t')) for k,v in self.value]) + prefix + '</dict>'
class BPListObject(FieldSet):
def createFields(self):
yield Enum(Bits(self, "marker_type", 4),
{0: "Simple",
1: "Int",
2: "Real",
3: "Date",
4: "Data",
5: "ASCII String",
6: "UTF-16-BE String",
8: "UID",
10: "Array",
13: "Dict",})
markertype = self['marker_type'].value
if markertype == 0:
# Simple (Null)
yield Enum(Bits(self, "value", 4),
{0: "Null",
8: "False",
9: "True",
15: "Fill Byte",})
if self['value'].display == "False":
self.xml=lambda prefix:prefix + "<false/>"
elif self['value'].display == "True":
self.xml=lambda prefix:prefix + "<true/>"
else:
self.xml=lambda prefix:prefix + ""
elif markertype == 1:
# Int
yield Bits(self, "size", 4, "log2 of number of bytes")
size=self['size'].value
# 8-bit (size=0), 16-bit (size=1) and 32-bit (size=2) numbers are unsigned
# 64-bit (size=3) numbers are signed
yield GenericInteger(self, "value", (size>=3), (2**size)*8)
self.xml=lambda prefix:prefix + "<integer>%s</integer>"%self['value'].value
elif markertype == 2:
# Real
yield Bits(self, "size", 4, "log2 of number of bytes")
if self['size'].value == 2: # 2**2 = 4 byte float
yield Float32(self, "value")
elif self['size'].value == 3: # 2**3 = 8 byte float
yield Float64(self, "value")
else:
# FIXME: What is the format of the real?
yield Bits(self, "value", (2**self['size'].value)*8)
self.xml=lambda prefix:prefix + "<real>%s</real>"%self['value'].value
elif markertype == 3:
# Date
yield Bits(self, "extra", 4, "Extra value, should be 3")
# Use a heuristic to determine which epoch to use
def cvt_time(v):
v=timedelta(seconds=v)
epoch2001 = datetime(2001,1,1)
epoch1970 = datetime(1970,1,1)
if (epoch2001 + v - datetime.today()).days > 5*365:
return epoch1970 + v
return epoch2001 + v
yield displayHandler(Float64(self, "value"),lambda x:humanDatetime(cvt_time(x)))
self.xml=lambda prefix:prefix + "<date>%sZ</date>"%(cvt_time(self['value'].value).isoformat())
elif markertype == 4:
# Data
yield BPListSize(self, "size")
if self['size'].value:
yield Bytes(self, "value", self['size'].value)
self.xml=lambda prefix:prefix + "<data>\n%s\n%s</data>"%(self['value'].value.encode('base64').strip(),prefix)
else:
self.xml=lambda prefix:prefix + '<data></data>'
elif markertype == 5:
# ASCII String
yield BPListSize(self, "size")
if self['size'].value:
yield String(self, "value", self['size'].value, charset="ASCII")
self.xml=lambda prefix:prefix + "<string>%s</string>"%(self['value'].value.replace('&','&').encode('iso-8859-1'))
else:
self.xml=lambda prefix:prefix + '<string></string>'
elif markertype == 6:
# UTF-16-BE String
yield BPListSize(self, "size")
if self['size'].value:
yield String(self, "value", self['size'].value*2, charset="UTF-16-BE")
self.xml=lambda prefix:prefix + "<string>%s</string>"%(self['value'].value.replace('&','&').encode('utf-8'))
else:
self.xml=lambda prefix:prefix + '<string></string>'
elif markertype == 8:
# UID
yield Bits(self, "size", 4, "Number of bytes minus 1")
yield GenericInteger(self, "value", False, (self['size'].value + 1)*8)
self.xml=lambda prefix:prefix + "" # no equivalent?
elif markertype == 10:
# Array
yield BPListSize(self, "size")
size = self['size'].value
if size:
yield BPListArray(self, "value", size)
self.xml=lambda prefix:self['value'].createXML(prefix)
elif markertype == 13:
# Dict
yield BPListSize(self, "size")
yield BPListDict(self, "value", self['size'].value)
self.xml=lambda prefix:self['value'].createXML(prefix)
else:
yield Bits(self, "value", 4)
self.xml=lambda prefix:''
def createValue(self):
if 'value' in self:
return self['value'].value
elif self['marker_type'].value in [4,5,6]:
return u''
else:
return None
def createDisplay(self):
if 'value' in self:
return unicode(self['value'].display)
elif self['marker_type'].value in [4,5,6]:
return u''
else:
return None
def createXML(self, prefix=''):
if 'value' in self:
try:
return self.xml(prefix)
except AttributeError:
return ''
return ''
def getFieldType(self):
return '%s<%s>'%(FieldSet.getFieldType(self), self['marker_type'].display)
class BPList(HachoirParser, RootSeekableFieldSet):
endian = BIG_ENDIAN
MAGIC = "bplist00"
PARSER_TAGS = {
"id": "bplist",
"category": "misc",
"file_ext": ("plist",),
"magic": ((MAGIC, 0),),
"min_size": 8 + 32, # bplist00 + 32-byte trailer
"description": "Apple/NeXT Binary Property List",
}
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **args)
def validate(self):
if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC:
return "Invalid magic"
return True
def createFields(self):
yield Bytes(self, "magic", 8, "File magic (bplist00)")
if self.size:
self.seekByte(self.size//8-32, True)
else:
# FIXME: UNTESTED
while True:
try:
self.seekByte(1024)
except:
break
self.seekByte(self.size//8-32)
yield BPListTrailer(self, "trailer")
self.seekByte(self['trailer/offsetTableOffset'].value)
yield BPListOffsetTable(self, "offset_table")
for i in self.array("offset_table/offset"):
if self.current_size > i.value*8:
self.seekByte(i.value)
elif self.current_size < i.value*8:
# try to detect files with gaps or unparsed content
yield RawBytes(self, "padding[]", i.value-self.current_size//8)
yield BPListObject(self, "object[]")
def createXML(self, prefix=''):
return '''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
''' + self['/object[' + str(self['/trailer/topObject'].value) + ']'].createXML(prefix) + '''
</plist>'''
| gpl-3.0 |
ujjvala-addsol/addsol_hr | openerp/addons/edi/models/__init__.py | 442 | 1116 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import edi
import res_partner
import res_company
import res_currency
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ufosky-server/zulip | zerver/management/commands/add_users_to_streams.py | 113 | 2394 | from __future__ import absolute_import
from optparse import make_option
from django.core.management.base import BaseCommand
from zerver.lib.actions import create_stream_if_needed, do_add_subscription
from zerver.models import Realm, UserProfile, get_user_profile_by_email
class Command(BaseCommand):
help = """Add some or all users in a realm to a set of streams."""
option_list = BaseCommand.option_list + (
make_option('-d', '--domain',
dest='domain',
type='str',
help='The name of the realm in which you are adding people to streams.'),
make_option('-s', '--streams',
dest='streams',
type='str',
help='A comma-separated list of stream names.'),
make_option('-u', '--users',
dest='users',
type='str',
help='A comma-separated list of email addresses.'),
make_option('-a', '--all-users',
dest='all_users',
action="store_true",
default=False,
help='Add all users in this realm to these streams.'),
)
def handle(self, **options):
if options["domain"] is None or options["streams"] is None or \
(options["users"] is None and options["all_users"] is None):
self.print_help("python manage.py", "add_users_to_streams")
exit(1)
stream_names = set([stream.strip() for stream in options["streams"].split(",")])
realm = Realm.objects.get(domain=options["domain"])
if options["all_users"]:
user_profiles = UserProfile.objects.filter(realm=realm)
else:
emails = set([email.strip() for email in options["users"].split(",")])
user_profiles = []
for email in emails:
user_profiles.append(get_user_profile_by_email(email))
for stream_name in set(stream_names):
for user_profile in user_profiles:
stream, _ = create_stream_if_needed(user_profile.realm, stream_name)
did_subscribe = do_add_subscription(user_profile, stream)
print "%s %s to %s" % (
"Subscribed" if did_subscribe else "Already subscribed",
user_profile.email, stream_name)
| apache-2.0 |
eviljeff/olympia | src/olympia/users/backends.py | 5 | 1197 | from django.db.models import Q
from .models import UserProfile
class TestUserBackend(object):
"""Authentication backend to easily log in a user while testing."""
def authenticate(self, request=None, username=None, email=None,
password=None):
# This needs to explicitly throw when there is a password since django
# will skip this backend if a user passes a password.
# http://bit.ly/2duYr93
if password is not None:
raise TypeError('password is not allowed')
try:
return UserProfile.objects.get(
Q(email=email) | Q(username=username))
except UserProfile.DoesNotExist:
return None
def get_user(self, user_id):
try:
return UserProfile.objects.get(pk=user_id)
except UserProfile.DoesNotExist:
return None
class NoAuthForYou(object):
"""An authentication backend for read-only mode."""
supports_anonymous_user = False
supports_inactive_user = False
supports_object_permissions = False
def authenticate(self, *args, **kw):
return None
def get_user(self, *args, **kw):
return None
| bsd-3-clause |
coreos/depot_tools | third_party/boto/ses/connection.py | 56 | 21582 | # Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011 Harry Marr http://hmarr.com/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import re
import urllib
import base64
from boto.connection import AWSAuthConnection
from boto.exception import BotoServerError
from boto.regioninfo import RegionInfo
import boto
import boto.jsonresponse
from boto.ses import exceptions as ses_exceptions
class SESConnection(AWSAuthConnection):
ResponseError = BotoServerError
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'email.us-east-1.amazonaws.com'
APIVersion = '2010-12-01'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
security_token=None, validate_certs=True):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
AWSAuthConnection.__init__(self, self.region.endpoint,
aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass, debug,
https_connection_factory, path,
security_token=security_token,
validate_certs=validate_certs)
def _required_auth_capability(self):
return ['ses']
def _build_list_params(self, params, items, label):
"""Add an AWS API-compatible parameter list to a dictionary.
:type params: dict
:param params: The parameter dictionary
:type items: list
:param items: Items to be included in the list
:type label: string
:param label: The parameter list's name
"""
if isinstance(items, basestring):
items = [items]
for i in range(1, len(items) + 1):
params['%s.%d' % (label, i)] = items[i - 1]
def _make_request(self, action, params=None):
"""Make a call to the SES API.
:type action: string
:param action: The API method to use (e.g. SendRawEmail)
:type params: dict
:param params: Parameters that will be sent as POST data with the API
call.
"""
ct = 'application/x-www-form-urlencoded; charset=UTF-8'
headers = {'Content-Type': ct}
params = params or {}
params['Action'] = action
for k, v in params.items():
if isinstance(v, unicode): # UTF-8 encode only if it's Unicode
params[k] = v.encode('utf-8')
response = super(SESConnection, self).make_request(
'POST',
'/',
headers=headers,
data=urllib.urlencode(params)
)
body = response.read()
if response.status == 200:
list_markers = ('VerifiedEmailAddresses', 'Identities',
'VerificationAttributes', 'SendDataPoints')
item_markers = ('member', 'item', 'entry')
e = boto.jsonresponse.Element(list_marker=list_markers,
item_marker=item_markers)
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
else:
# HTTP codes other than 200 are considered errors. Go through
# some error handling to determine which exception gets raised,
self._handle_error(response, body)
def _handle_error(self, response, body):
"""
Handle raising the correct exception, depending on the error. Many
errors share the same HTTP response code, meaning we have to get really
kludgey and do string searches to figure out what went wrong.
"""
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
if "Address blacklisted." in body:
# Delivery failures happened frequently enough with the recipient's
# email address for Amazon to blacklist it. After a day or three,
# they'll be automatically removed, and delivery can be attempted
# again (if you write the code to do so in your application).
ExceptionToRaise = ses_exceptions.SESAddressBlacklistedError
exc_reason = "Address blacklisted."
elif "Email address is not verified." in body:
# This error happens when the "Reply-To" value passed to
# send_email() hasn't been verified yet.
ExceptionToRaise = ses_exceptions.SESAddressNotVerifiedError
exc_reason = "Email address is not verified."
elif "Daily message quota exceeded." in body:
# Encountered when your account exceeds the maximum total number
# of emails per 24 hours.
ExceptionToRaise = ses_exceptions.SESDailyQuotaExceededError
exc_reason = "Daily message quota exceeded."
elif "Maximum sending rate exceeded." in body:
# Your account has sent above its allowed requests a second rate.
ExceptionToRaise = ses_exceptions.SESMaxSendingRateExceededError
exc_reason = "Maximum sending rate exceeded."
elif "Domain ends with dot." in body:
# Recipient address ends with a dot/period. This is invalid.
ExceptionToRaise = ses_exceptions.SESDomainEndsWithDotError
exc_reason = "Domain ends with dot."
elif "Local address contains control or whitespace" in body:
# I think this pertains to the recipient address.
ExceptionToRaise = ses_exceptions.SESLocalAddressCharacterError
exc_reason = "Local address contains control or whitespace."
elif "Illegal address" in body:
# A clearly mal-formed address.
ExceptionToRaise = ses_exceptions.SESIllegalAddressError
exc_reason = "Illegal address"
# The re.search is to distinguish from the
# SESAddressNotVerifiedError error above.
elif re.search('Identity.*is not verified', body):
ExceptionToRaise = ses_exceptions.SESIdentityNotVerifiedError
exc_reason = "Identity is not verified."
elif "ownership not confirmed" in body:
ExceptionToRaise = ses_exceptions.SESDomainNotConfirmedError
exc_reason = "Domain ownership is not confirmed."
else:
# This is either a common AWS error, or one that we don't devote
# its own exception to.
ExceptionToRaise = self.ResponseError
exc_reason = response.reason
raise ExceptionToRaise(response.status, exc_reason, body)
def send_email(self, source, subject, body, to_addresses,
cc_addresses=None, bcc_addresses=None,
format='text', reply_addresses=None,
return_path=None, text_body=None, html_body=None):
"""Composes an email message based on input data, and then immediately
queues the message for sending.
:type source: string
:param source: The sender's email address.
:type subject: string
:param subject: The subject of the message: A short summary of the
content, which will appear in the recipient's inbox.
:type body: string
:param body: The message body.
:type to_addresses: list of strings or string
:param to_addresses: The To: field(s) of the message.
:type cc_addresses: list of strings or string
:param cc_addresses: The CC: field(s) of the message.
:type bcc_addresses: list of strings or string
:param bcc_addresses: The BCC: field(s) of the message.
:type format: string
:param format: The format of the message's body, must be either "text"
or "html".
:type reply_addresses: list of strings or string
:param reply_addresses: The reply-to email address(es) for the
message. If the recipient replies to the
message, each reply-to address will
receive the reply.
:type return_path: string
:param return_path: The email address to which bounce notifications are
to be forwarded. If the message cannot be delivered
to the recipient, then an error message will be
returned from the recipient's ISP; this message
will then be forwarded to the email address
specified by the ReturnPath parameter.
:type text_body: string
:param text_body: The text body to send with this email.
:type html_body: string
:param html_body: The html body to send with this email.
"""
format = format.lower().strip()
if body is not None:
if format == "text":
if text_body is not None:
raise Warning("You've passed in both a body and a "
"text_body; please choose one or the other.")
text_body = body
else:
if html_body is not None:
raise Warning("You've passed in both a body and an "
"html_body; please choose one or the other.")
html_body = body
params = {
'Source': source,
'Message.Subject.Data': subject,
}
if return_path:
params['ReturnPath'] = return_path
if html_body is not None:
params['Message.Body.Html.Data'] = html_body
if text_body is not None:
params['Message.Body.Text.Data'] = text_body
if(format not in ("text", "html")):
raise ValueError("'format' argument must be 'text' or 'html'")
if(not (html_body or text_body)):
raise ValueError("No text or html body found for mail")
self._build_list_params(params, to_addresses,
'Destination.ToAddresses.member')
if cc_addresses:
self._build_list_params(params, cc_addresses,
'Destination.CcAddresses.member')
if bcc_addresses:
self._build_list_params(params, bcc_addresses,
'Destination.BccAddresses.member')
if reply_addresses:
self._build_list_params(params, reply_addresses,
'ReplyToAddresses.member')
return self._make_request('SendEmail', params)
def send_raw_email(self, raw_message, source=None, destinations=None):
"""Sends an email message, with header and content specified by the
client. The SendRawEmail action is useful for sending multipart MIME
emails, with attachments or inline content. The raw text of the message
must comply with Internet email standards; otherwise, the message
cannot be sent.
:type source: string
:param source: The sender's email address. Amazon's docs say:
If you specify the Source parameter, then bounce notifications and
complaints will be sent to this email address. This takes precedence
over any Return-Path header that you might include in the raw text of
the message.
:type raw_message: string
:param raw_message: The raw text of the message. The client is
responsible for ensuring the following:
- Message must contain a header and a body, separated by a blank line.
- All required header fields must be present.
- Each part of a multipart MIME message must be formatted properly.
- MIME content types must be among those supported by Amazon SES.
Refer to the Amazon SES Developer Guide for more details.
- Content must be base64-encoded, if MIME requires it.
:type destinations: list of strings or string
:param destinations: A list of destinations for the message.
"""
if isinstance(raw_message, unicode):
raw_message = raw_message.encode('utf-8')
params = {
'RawMessage.Data': base64.b64encode(raw_message),
}
if source:
params['Source'] = source
if destinations:
self._build_list_params(params, destinations,
'Destinations.member')
return self._make_request('SendRawEmail', params)
def list_verified_email_addresses(self):
"""Fetch a list of the email addresses that have been verified.
:rtype: dict
:returns: A ListVerifiedEmailAddressesResponse structure. Note that
keys must be unicode strings.
"""
return self._make_request('ListVerifiedEmailAddresses')
def get_send_quota(self):
"""Fetches the user's current activity limits.
:rtype: dict
:returns: A GetSendQuotaResponse structure. Note that keys must be
unicode strings.
"""
return self._make_request('GetSendQuota')
def get_send_statistics(self):
"""Fetches the user's sending statistics. The result is a list of data
points, representing the last two weeks of sending activity.
Each data point in the list contains statistics for a 15-minute
interval.
:rtype: dict
:returns: A GetSendStatisticsResponse structure. Note that keys must be
unicode strings.
"""
return self._make_request('GetSendStatistics')
def delete_verified_email_address(self, email_address):
"""Deletes the specified email address from the list of verified
addresses.
:type email_adddress: string
:param email_address: The email address to be removed from the list of
verified addreses.
:rtype: dict
:returns: A DeleteVerifiedEmailAddressResponse structure. Note that
keys must be unicode strings.
"""
return self._make_request('DeleteVerifiedEmailAddress', {
'EmailAddress': email_address,
})
def verify_email_address(self, email_address):
"""Verifies an email address. This action causes a confirmation email
message to be sent to the specified address.
:type email_adddress: string
:param email_address: The email address to be verified.
:rtype: dict
:returns: A VerifyEmailAddressResponse structure. Note that keys must
be unicode strings.
"""
return self._make_request('VerifyEmailAddress', {
'EmailAddress': email_address,
})
def verify_domain_dkim(self, domain):
"""
Returns a set of DNS records, or tokens, that must be published in the
domain name's DNS to complete the DKIM verification process. These
tokens are DNS ``CNAME`` records that point to DKIM public keys hosted
by Amazon SES. To complete the DKIM verification process, these tokens
must be published in the domain's DNS. The tokens must remain
published in order for Easy DKIM signing to function correctly.
After the tokens are added to the domain's DNS, Amazon SES will be able
to DKIM-sign email originating from that domain. To enable or disable
Easy DKIM signing for a domain, use the ``SetIdentityDkimEnabled``
action. For more information about Easy DKIM, go to the `Amazon SES
Developer Guide
<http://docs.amazonwebservices.com/ses/latest/DeveloperGuide>`_.
:type domain: string
:param domain: The domain name.
"""
return self._make_request('VerifyDomainDkim', {
'Domain': domain,
})
def set_identity_dkim_enabled(self, identity, dkim_enabled):
"""Enables or disables DKIM signing of email sent from an identity.
* If Easy DKIM signing is enabled for a domain name identity (e.g.,
* ``example.com``),
then Amazon SES will DKIM-sign all email sent by addresses under that
domain name (e.g., ``user@example.com``)
* If Easy DKIM signing is enabled for an email address, then Amazon SES
will DKIM-sign all email sent by that email address.
For email addresses (e.g., ``user@example.com``), you can only enable
Easy DKIM signing if the corresponding domain (e.g., ``example.com``)
has been set up for Easy DKIM using the AWS Console or the
``VerifyDomainDkim`` action.
:type identity: string
:param identity: An email address or domain name.
:type dkim_enabled: bool
:param dkim_enabled: Specifies whether or not to enable DKIM signing.
"""
return self._make_request('SetIdentityDkimEnabled', {
'Identity': identity,
'DkimEnabled': 'true' if dkim_enabled else 'false'
})
def get_identity_dkim_attributes(self, identities):
"""Get attributes associated with a list of verified identities.
Given a list of verified identities (email addresses and/or domains),
returns a structure describing identity notification attributes.
:type identities: list
:param identities: A list of verified identities (email addresses
and/or domains).
"""
params = {}
self._build_list_params(params, identities, 'Identities.member')
return self._make_request('GetIdentityDkimAttributes', params)
def list_identities(self):
"""Returns a list containing all of the identities (email addresses
and domains) for a specific AWS Account, regardless of
verification status.
:rtype: dict
:returns: A ListIdentitiesResponse structure. Note that
keys must be unicode strings.
"""
return self._make_request('ListIdentities')
def get_identity_verification_attributes(self, identities):
"""Given a list of identities (email addresses and/or domains),
returns the verification status and (for domain identities)
the verification token for each identity.
:type identities: list of strings or string
:param identities: List of identities.
:rtype: dict
:returns: A GetIdentityVerificationAttributesResponse structure.
Note that keys must be unicode strings.
"""
params = {}
self._build_list_params(params, identities,
'Identities.member')
return self._make_request('GetIdentityVerificationAttributes', params)
def verify_domain_identity(self, domain):
"""Verifies a domain.
:type domain: string
:param domain: The domain to be verified.
:rtype: dict
:returns: A VerifyDomainIdentityResponse structure. Note that
keys must be unicode strings.
"""
return self._make_request('VerifyDomainIdentity', {
'Domain': domain,
})
def verify_email_identity(self, email_address):
"""Verifies an email address. This action causes a confirmation
email message to be sent to the specified address.
:type email_adddress: string
:param email_address: The email address to be verified.
:rtype: dict
:returns: A VerifyEmailIdentityResponse structure. Note that keys must
be unicode strings.
"""
return self._make_request('VerifyEmailIdentity', {
'EmailAddress': email_address,
})
def delete_identity(self, identity):
"""Deletes the specified identity (email address or domain) from
the list of verified identities.
:type identity: string
:param identity: The identity to be deleted.
:rtype: dict
:returns: A DeleteIdentityResponse structure. Note that keys must
be unicode strings.
"""
return self._make_request('DeleteIdentity', {
'Identity': identity,
})
| bsd-3-clause |
tbeadle/docker-py | docker/utils/decorators.py | 11 | 1521 | import functools
from .. import errors
from . import utils
def check_resource(f):
@functools.wraps(f)
def wrapped(self, resource_id=None, *args, **kwargs):
if resource_id is None:
if kwargs.get('container'):
resource_id = kwargs.pop('container')
elif kwargs.get('image'):
resource_id = kwargs.pop('image')
if isinstance(resource_id, dict):
resource_id = resource_id.get('Id')
if not resource_id:
raise errors.NullResource(
'image or container param is undefined'
)
return f(self, resource_id, *args, **kwargs)
return wrapped
def minimum_version(version):
def decorator(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if utils.version_lt(self._version, version):
raise errors.InvalidVersion(
'{0} is not available for version < {1}'.format(
f.__name__, version
)
)
return f(self, *args, **kwargs)
return wrapper
return decorator
def update_headers(f):
def inner(self, *args, **kwargs):
if 'HttpHeaders' in self._auth_configs:
if 'headers' not in kwargs:
kwargs['headers'] = self._auth_configs['HttpHeaders']
else:
kwargs['headers'].update(self._auth_configs['HttpHeaders'])
return f(self, *args, **kwargs)
return inner
| apache-2.0 |
samhoo/askbot-realworld | askbot/urls.py | 1 | 9313 | """
askbot askbot url configuraion file
"""
import os.path
from django.conf import settings
from django.conf.urls.defaults import url, patterns, include
from django.conf.urls.defaults import handler500, handler404
from django.contrib import admin
from askbot import views
from askbot.feed import RssLastestQuestionsFeed, RssIndividualQuestionFeed
from askbot.sitemap import QuestionsSitemap
from askbot.skins.utils import update_media_revision
admin.autodiscover()
update_media_revision()#needs to be run once, so put it here
if hasattr(settings, "ASKBOT_TRANSLATE_URL") and settings.ASKBOT_TRANSLATE_URL:
from django.utils.translation import ugettext as _
else:
_ = lambda s:s
feeds = {
'rss': RssLastestQuestionsFeed,
'question':RssIndividualQuestionFeed
}
sitemaps = {
'questions': QuestionsSitemap
}
APP_PATH = os.path.dirname(__file__)
urlpatterns = patterns('',
url(r'^$', views.readers.index, name='index'),
url(
r'^sitemap.xml$',
'django.contrib.sitemaps.views.sitemap',
{'sitemaps': sitemaps},
name='sitemap'
),
url(
r'^m/(?P<skin>[^/]+)/media/(?P<resource>.*)$',
views.meta.media,
name='askbot_media',
),
#no translation for this url!!
url(r'import-data/$', views.writers.import_data, name='import_data'),
url(r'^%s$' % _('about/'), views.meta.about, name='about'),
url(r'^%s$' % _('faq/'), views.meta.faq, name='faq'),
url(r'^%s$' % _('privacy/'), views.meta.privacy, name='privacy'),
url(
r'^%s(?P<id>\d+)/%s$' % (_('answers/'), _('edit/')),
views.writers.edit_answer,
name='edit_answer'
),
url(
r'^%s(?P<id>\d+)/%s$' % (_('answers/'), _('revisions/')),
views.readers.revisions,
kwargs = {'object_name': 'Answer'},
name='answer_revisions'
),
url(#this url works both normally and through ajax
r'^%s$' % _('questions/'),
views.readers.questions,
name='questions'
),
url(
r'^api/get_questions/',
views.commands.api_get_questions,
name = 'api_get_questions'
),
url(
r'^%s%s$' % (_('questions/'), _('ask/')),
views.writers.ask,
name='ask'
),
url(
r'^%s(?P<id>\d+)/%s$' % (_('questions/'), _('edit/')),
views.writers.edit_question,
name='edit_question'
),
url(#this url is both regular and ajax
r'^%s(?P<id>\d+)/%s$' % (_('questions/'), _('retag/')),
views.writers.retag_question,
name='retag_question'
),
url(
r'^%s(?P<id>\d+)/%s$' % (_('questions/'), _('close/')),
views.commands.close,
name='close'
),
url(
r'^%s(?P<id>\d+)/%s$' % (_('questions/'), _('reopen/')),
views.commands.reopen,
name='reopen'
),
url(
r'^%s(?P<id>\d+)/%s$' % (_('questions/'), _('answer/')),
views.writers.answer,
name='answer'
),
url(#ajax only
r'^%s(?P<id>\d+)/%s$' % (_('questions/'), _('vote/')),
views.commands.vote,
name='vote'
),
url(
r'^%s(?P<id>\d+)/%s$' % (_('questions/'), _('revisions/')),
views.readers.revisions,
kwargs = {'object_name': 'Question'},
name='question_revisions'
),
url(
r'^%s%s$' % (_('widgets/'), _('questions/')),
views.readers.widget_questions,
name='widget_questions'
),
url(#ajax only
r'^comment/upvote/$',
views.commands.upvote_comment,
name = 'upvote_comment'
),
url(#ajax only
r'^post_comments/$',
views.writers.post_comments,
name='post_comments'
),
url(#ajax only
r'^edit_comment/$',
views.writers.edit_comment,
name='edit_comment'
),
url(#ajax only
r'^comment/delete/$',
views.writers.delete_comment,
name='delete_comment'
),
url(#ajax only
r'^comment/get_text/$',
views.readers.get_comment,
name='get_comment'
),
url(#ajax only
r'^question/get_body/$',
views.readers.get_question_body,
name='get_question_body'
),
url(
r'^%s$' % _('tags/'),
views.readers.tags,
name='tags'
),
url(#ajax only
r'^%s%s$' % ('mark-tag/', 'interesting/'),
views.commands.mark_tag,
kwargs={'reason':'good','action':'add'},
name='mark_interesting_tag'
),
url(#ajax only
r'^%s%s$' % ('mark-tag/', 'ignored/'),
views.commands.mark_tag,
kwargs={'reason':'bad','action':'add'},
name='mark_ignored_tag'
),
url(#ajax only
r'^unmark-tag/',
views.commands.mark_tag,
kwargs={'action':'remove'},
name='unmark_tag'
),
url(#ajax only
r'^set-tag-filter-strategy/',
views.commands.set_tag_filter_strategy,
name = 'set_tag_filter_strategy'
),
url(
r'^get-tags-by-wildcard/',
views.commands.get_tags_by_wildcard,
name = 'get_tags_by_wildcard'
),
url(
r'^get-tag-list/',
views.commands.get_tag_list,
name = 'get_tag_list'
),
url(
r'^swap-question-with-answer/',
views.commands.swap_question_with_answer,
name = 'swap_question_with_answer'
),
url(
r'^%s$' % _('subscribe-for-tags/'),
views.commands.subscribe_for_tags,
name = 'subscribe_for_tags'
),
url(
r'^%s$' % _('users/'),
views.users.users,
name='users'
),
#todo: rename as user_edit, b/c that's how template is named
url(
r'^%s(?P<id>\d+)/%s$' % (_('users/'), _('edit/')),
views.users.edit_user,
name='edit_user'
),
url(
r'^%s(?P<id>\d+)/(?P<slug>.+)/%s$' % (
_('users/'),
_('subscriptions/'),
),
views.users.user,
kwargs = {'tab_name': 'email_subscriptions'},
name = 'user_subscriptions'
),
url(
r'^%s(?P<id>\d+)/(?P<slug>.+)/$' % _('users/'),
views.users.user,
name='user_profile'
),
url(
r'^%s$' % _('users/update_has_custom_avatar/'),
views.users.update_has_custom_avatar,
name='user_update_has_custom_avatar'
),
url(
r'^%s$' % _('badges/'),
views.meta.badges,
name='badges'
),
url(
r'^%s(?P<id>\d+)//*' % _('badges/'),
views.meta.badge,
name='badge'
),
url(#ajax only
r'^%s%s$' % (_('messages/'), _('markread/')),
views.commands.read_message,
name='read_message'
),
url(#ajax only
r'^manage_inbox/$',
views.commands.manage_inbox,
name='manage_inbox'
),
url(
r'^feeds/(?P<url>.*)/$',
'django.contrib.syndication.views.feed',
{'feed_dict': feeds},
name='feeds'
),
#upload url is ajax only
url( r'^%s$' % _('upload/'), views.writers.upload, name='upload'),
url(r'^%s$' % _('feedback/'), views.meta.feedback, name='feedback'),
#url(r'^feeds/rss/$', RssLastestQuestionsFeed, name="latest_questions_feed"),
url(
r'^doc/(?P<path>.*)$',
'django.views.static.serve',
{'document_root': os.path.join(APP_PATH,'doc','build','html').replace('\\','/')},
name='askbot_docs',
),
url(
'^custom\.css$',
views.meta.config_variable,
kwargs = {
'variable_name': 'CUSTOM_CSS',
'mimetype': 'text/css'
},
name = 'custom_css'
),
url(
'^custom\.js$',
views.meta.config_variable,
kwargs = {
'variable_name': 'CUSTOM_JS',
'mimetype': 'text/javascript'
},
name = 'custom_js'
),
url(
r'^jsi18n/$',
'django.views.i18n.javascript_catalog',
{'domain': 'djangojs','packages': ('askbot',)},
name = 'askbot_jsi18n'
),
)
if getattr(settings, 'ASKBOT_USE_STACKEXCHANGE_URLS', False):
urlpatterns += (url(
r'^%s(?P<id>\d+)/' % _('questions/'),
views.readers.question,
name='question'
),)
else:
urlpatterns += (url(
r'^%s(?P<id>\d+)/' % _('question/'),
views.readers.question,
name='question'
),)
if 'askbot.deps.django_authopenid' in settings.INSTALLED_APPS:
urlpatterns += (
url(r'^%s' % _('account/'), include('askbot.deps.django_authopenid.urls')),
)
if 'avatar' in settings.INSTALLED_APPS:
#unforturately we have to wire avatar urls here,
#because views add and change are adapted to
#use jinja2 templates
urlpatterns += (
url('^avatar/add/$', views.avatar_views.add, name='avatar_add'),
url(
'^avatar/change/$',
views.avatar_views.change,
name='avatar_change'
),
url(
'^avatar/delete/$',
views.avatar_views.delete,
name='avatar_delete'
),
url(#this urs we inherit from the original avatar app
'^avatar/render_primary/(?P<user_id>[\+\d]+)/(?P<size>[\d]+)/$',
views.avatar_views.render_primary,
name='avatar_render_primary'
),
)
| gpl-3.0 |
praw-dev/praw | praw/util/token_manager.py | 1 | 6726 | """Token Manager classes.
There should be a 1-to-1 mapping between an instance of a subclass of
:class:`.BaseTokenManager` and a :class:`.Reddit` instance.
A few proof of concept token manager classes are provided here, but it is expected that
PRAW users will create their own token manager classes suitable for their needs.
See :ref:`using_refresh_tokens` for examples on how to leverage these classes.
"""
import sqlite3
from abc import ABC, abstractmethod
class BaseTokenManager(ABC):
"""An abstract class for all token managers."""
def __init__(self):
"""Prepare attributes needed by all token manager classes."""
self._reddit = None
@property
def reddit(self):
"""Return the :class:`.Reddit` instance bound to the token manager."""
return self._reddit
@reddit.setter
def reddit(self, value):
if self._reddit is not None:
raise RuntimeError(
"``reddit`` can only be set once and is done automatically"
)
self._reddit = value
@abstractmethod
def post_refresh_callback(self, authorizer):
"""Handle callback that is invoked after a refresh token is used.
:param authorizer: The ``prawcore.Authorizer`` instance used containing
``access_token`` and ``refresh_token`` attributes.
This function will be called after refreshing the access and refresh tokens.
This callback can be used for saving the updated ``refresh_token``.
"""
@abstractmethod
def pre_refresh_callback(self, authorizer):
"""Handle callback that is invoked before refreshing PRAW's authorization.
:param authorizer: The ``prawcore.Authorizer`` instance used containing
``access_token`` and ``refresh_token`` attributes.
This callback can be used to inspect and modify the attributes of the
``prawcore.Authorizer`` instance, such as setting the ``refresh_token``.
"""
class FileTokenManager(BaseTokenManager):
"""Provides a single-file based token manager.
It is expected that the file with the initial ``refresh_token`` is created prior to
use.
.. warning::
The same ``file`` should not be used by more than one instance of this class
concurrently. Doing so may result in data corruption. Consider using
:class:`.SQLiteTokenManager` if you want more than one instance of PRAW to
concurrently manage a specific ``refresh_token`` chain.
"""
def __init__(self, filename):
"""Load and save refresh tokens from a file.
:param filename: The file the contains the refresh token.
"""
super().__init__()
self._filename = filename
def post_refresh_callback(self, authorizer):
"""Update the saved copy of the refresh token."""
with open(self._filename, "w") as fp:
fp.write(authorizer.refresh_token)
def pre_refresh_callback(self, authorizer):
"""Load the refresh token from the file."""
if authorizer.refresh_token is None:
with open(self._filename) as fp:
authorizer.refresh_token = fp.read().strip()
class SQLiteTokenManager(BaseTokenManager):
"""Provides a SQLite3 based token manager.
Unlike, :class:`.FileTokenManager`, the initial database need not be created ahead
of time, as it'll automatically be created on first use. However, initial
``refresh_tokens`` will need to be registered via :meth:`.register` prior to use.
See :ref:`sqlite_token_manager` for an example of use.
.. warning::
This class is untested on Windows because we encountered file locking issues in
the test environment.
"""
def __init__(self, database, key):
"""Load and save refresh tokens from a SQLite database.
:param database: The path to the SQLite database.
:param key: The key used to locate the ``refresh_token``. This ``key`` can be
anything. You might use the ``client_id`` if you expect to have unique
``refresh_tokens`` for each ``client_id``, or you might use a Redditor's
``username`` if you're manage multiple users' authentications.
"""
super().__init__()
self._connection = sqlite3.connect(database)
self._connection.execute(
"CREATE TABLE IF NOT EXISTS tokens (id, refresh_token, updated_at)"
)
self._connection.execute(
"CREATE UNIQUE INDEX IF NOT EXISTS ux_tokens_id on tokens(id)"
)
self._connection.commit()
self.key = key
def _get(self):
cursor = self._connection.execute(
"SELECT refresh_token FROM tokens WHERE id=?", (self.key,)
)
result = cursor.fetchone()
if result is None:
raise KeyError
return result[0]
def _set(self, refresh_token):
"""Set the refresh token in the database.
This function will overwrite an existing value if the corresponding ``key``
already exists.
"""
self._connection.execute(
"REPLACE INTO tokens VALUES (?, ?, datetime('now'))",
(self.key, refresh_token),
)
self._connection.commit()
def is_registered(self):
"""Return whether or not ``key`` already has a ``refresh_token``."""
cursor = self._connection.execute(
"SELECT refresh_token FROM tokens WHERE id=?", (self.key,)
)
return cursor.fetchone() is not None
def post_refresh_callback(self, authorizer):
"""Update the refresh token in the database."""
self._set(authorizer.refresh_token)
# While the following line is not strictly necessary, it ensures that the
# refresh token is not used elsewhere. And also forces the pre_refresh_callback
# to always load the latest refresh_token from the database.
authorizer.refresh_token = None
def pre_refresh_callback(self, authorizer):
"""Load the refresh token from the database."""
assert authorizer.refresh_token is None
authorizer.refresh_token = self._get()
def register(self, refresh_token):
"""Register the initial refresh token in the database.
:returns: ``True`` if ``refresh_token`` is saved to the database, otherwise,
``False`` if there is already a ``refresh_token`` for the associated
``key``.
"""
cursor = self._connection.execute(
"INSERT OR IGNORE INTO tokens VALUES (?, ?, datetime('now'))",
(self.key, refresh_token),
)
self._connection.commit()
return cursor.rowcount == 1
| bsd-2-clause |
empyrean-project/Empyrean | share/seeds/generate-seeds.py | 1 | 4186 | #!/usr/bin/python
# Copyright (c) 2014 Wladmir J. van der Laan
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef H_CHAINPARAMSSEEDS\n')
g.write('#define H_CHAINPARAMSSEEDS\n')
g.write('// List of fixed seed nodes for the bitcoin network\n')
g.write('// AUTOGENERATED by contrib/devtools/generate-seeds.py\n\n')
g.write('// Each line contains a 16-byte IPv6 address and a port.\n')
g.write('// IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 4243)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 14243)
g.write('#endif\n')
if __name__ == '__main__':
main()
| mit |
AdmiralenOla/Scoary | scoary/classes.py | 1 | 22436 | import sys
import logging
import os
import errno
# Note: The Matrix and QuadTree implementations are heavily based on
# original implementations by Christian Storm Pedersen.
# Python 2/3 annoyances
try:
xrange
except NameError:
xrange = range
class PimpedFileHandler(logging.FileHandler):
"""
A filehandler that can create directories if needed.
"""
def __init__(self, filename, mode='w', encoding=None, delay=0):
"""
Initiates the filehandler and create dir if it does not exist.
"""
self.makedir(filename)
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
def makedir(self, filename):
path = os.path.dirname(filename)
# Check if dir exists. If not, try to create it. Else, do nothing:
if not os.path.isdir(path):
try:
os.makedirs(path)
except OSError as e:
sys.exit("CRITICAL: Need write permission to outdir")
else: # Dir exists, but might not have permission
if not (os.access(path, os.W_OK) and
os.access(path, os.X_OK)):
sys.exit("CRITICAL: Need write permission to outdir")
def counted(fn):
def wrapper(*args, **kwargs):
wrapper.called += 1
return fn(*args, **kwargs)
wrapper.called = 0
wrapper.__name__ = fn.__name__
return wrapper
class ScoaryLogger(logging.Logger):
def __init__(self, name, level=logging.DEBUG):
super(ScoaryLogger, self).__init__(name, level)
@counted
def info(self, *args, **kwargs):
super(ScoaryLogger, self).info(*args, **kwargs)
@counted
def warning(self, *args, **kwargs):
super(ScoaryLogger, self).warning(*args, **kwargs)
@counted
def critical(self, *args, **kwargs):
super(ScoaryLogger, self).warning(*args, **kwargs)
@counted
def error(self, *args, **kwargs):
super(ScoaryLogger, self).warning(*args, **kwargs)
class Matrix:
"""
A matrix stored as a list of lists
"""
def __init__(self, dim, elm=sys.maxsize):
"""
Builds empty nxn matrix
"""
self.undef = sys.maxsize
self.dim = int(dim)
self.data = []
for i in xrange(self.dim):
self.data.append(self.dim * [elm])
def __getitem__(self, i):
"""
Get row i from Matrix
"""
return self.data[i]
def __str__(self):
s = ""
for i in xrange(self.dim):
for j in xrange(self.dim):
s += str(self.data[i][j]) + " "
s += "\n"
return s.strip("\n")
class QuadTree:
"""
A basic QuadTree with names
"""
def __init__(self, dim, names=None):
"""
Constructs a quad tree of dimension dim and fills it with 0's
"""
self.undef = sys.maxsize
self.dim = dim
n = self.dim + self.dim % 2
if names is None:
names = [str(x) for x in xrange(dim)]
self.names = names
self.level = []
while n > 1:
n += (n % 2)
self.level.append(Matrix(n))
n = (n+1) // 2
def get_elm(self, i, j):
"""
Returns the element at position (i,j) in the quad tree
"""
return self.level[0][i][j]
def insert_row(self, i, row):
"""
Inserts row (of dim elements) as row number i
"""
curr_row = row
for l in self.level:
if len(curr_row) % 2 == 1:
curr_row.append(self.undef)
next_row = []
for j in xrange(len(curr_row)):
l[i][j] = curr_row[j]
if j % 2 == 1:
next_row.append(self.quad_min(i, j, l))
i //= 2
curr_row = next_row
def insert_col(self, j, col):
"""
Inserts col (of dim elements) as col number j
"""
curr_col = col
for l in self.level:
if len(curr_col) % 2 == 1:
curr_col.append(self.undef)
next_col = []
for i in xrange(len(curr_col)):
l[i][j] = curr_col[i]
if i % 2 == 1:
next_col.append(self.quad_min(i, j, l))
j //= 2
curr_col = next_col
def min(self):
"""
Returns minimum element stored in tree
"""
return self.quad_min(0, 0, self.level[-1])
def argmin(self):
"""
Returns coordinates of minimum element in tree
"""
i = j = 0
for l in reversed(self.level[1:]):
i, j = self.quad_argmin(i, j, l)
i *= 2
j *= 2
return self.quad_argmin(i, j, self.level[0])
def quad_min_all(self, i, j, l):
"""
Returns the minimum element stored in the quad (i,j) and its
coordinates
"""
# Need even numbers
i = (i//2) * 2
j = (j//2) * 2
return min((l[i][j], i, j),
(l[i+1][j], i+1, j),
(l[i][j+1], i, j+1),
(l[i+1][j+1], i+1, j+1))
def quad_min(self, i, j, l):
"""
Returns the minimum element stored in the quad containing (i,j)
"""
return self.quad_min_all(i, j, l)[0]
def quad_argmin(self, i, j, l):
"""
Returns the coordinates of the minimum element in the quad
containing (i,j)
"""
return self.quad_min_all(i, j, l)[1:]
class PhyloTree:
"""
A class that represents a binary tree. Phylotrees can be nested.
They can also contain tips at left, right or both nodes. Stores the
max number of paths under each of the following 5 conditions:
1. Free path to AB.
2. Free path to Ab.
3. Free path to aB.
4. Free path to ab.
5. No free path.
"""
def __init__(self, leftnode, rightnode, GTC):
"""
Constructs a phylotree and links it to its left and right nodes
"""
# First check if left and right are tips (type = str). Then we
# need to look up the gene-trait combination value at that node.
# Also check if left and right nodes are PhyloTrees. If not,
# recursively create PhyloTrees.
if type(leftnode) is str:
self.leftnode = Tip(GTC[leftnode])
elif isinstance(leftnode, PhyloTree):
self.leftnode = leftnode
else:
self.leftnode = PhyloTree(leftnode=leftnode[0],
rightnode=leftnode[1],
GTC=GTC)
if type(rightnode) is str:
self.rightnode = Tip(GTC[rightnode])
elif isinstance(rightnode, PhyloTree):
self.rightnode = rightnode
else:
self.rightnode = PhyloTree(leftnode=rightnode[0],
rightnode=rightnode[1],
GTC=GTC)
# Initialize the max number of paths. Set to -1 meaning they
# cannot be reached
self.maxvalues = \
{"AB": -1, "Ab": -1, "aB": -1, "ab": -1, "0": -1}
self.max_propairs = \
{"AB": -1, "Ab": -1, "aB": -1, "ab": -1, "0": -1}
self.max_antipairs = \
{"AB": -1, "Ab": -1, "aB": -1, "ab": -1, "0": -1}
self.calculate_max()
self.max_contrasting_pairs = max(self.maxvalues.values())
self.max_contrasting_propairs = max(self.max_propairs.values())
self.max_contrasting_antipairs = max(
self.max_antipairs.values())
def calculate_max(self):
"""
A method for calculating the max number of pairings under the 5
conditions
"""
for condition in ["AB", "Ab", "aB", "ab", "nofree"]:
if condition in ["AB", "Ab", "aB", "ab"]:
pairings = self.calculate_max_condition(condition)
self.maxvalues[condition] = pairings["Total"]
self.max_propairs[condition] = pairings["Pro"]
self.max_antipairs[condition] = pairings["Anti"]
else: # Condition == nofree
pairings = self.calculate_max_nofree()
self.maxvalues["0"] = pairings["Total"]
self.max_propairs["0"] = pairings["Pro"]
self.max_antipairs["0"] = pairings["Anti"]
def calculate_max_condition(self, condition):
"""
When passed for example 'AB', finds out the 9 distinct
conditions and calculates the max. Here, the possibilities are
illustrated when passed 'AB' (eg. a free path to AB exists):
Left Right
AB No free
AB Ab
AB aB
AB ab
AB AB
No free AB
Ab AB
aB AB
ab AB
"""
Possible_conditions = set(["AB", "Ab", "aB", "ab"])
Possible_conditions.remove(condition)
# Now we have a list of the elements that are NOT condition:
Otherconditions = list(Possible_conditions)
max_pairs_1 = -1
max_pairs_2 = -1
max_pairs_3 = -1
max_pairs_4 = -1
max_pairs_5 = -1
max_pairs_6 = -1
max_pairs_7 = -1
max_pairs_8 = -1
max_pairs_9 = -1
max_propairs_1 = -1
max_propairs_2 = -1
max_propairs_3 = -1
max_propairs_4 = -1
max_propairs_5 = -1
max_propairs_6 = -1
max_propairs_7 = -1
max_propairs_8 = -1
max_propairs_9 = -1
max_antipairs_1 = -1
max_antipairs_2 = -1
max_antipairs_3 = -1
max_antipairs_4 = -1
max_antipairs_5 = -1
max_antipairs_6 = -1
max_antipairs_7 = -1
max_antipairs_8 = -1
max_antipairs_9 = -1
if (self.leftnode.maxvalues[condition] > -1 and
self.rightnode.maxvalues["0"] > -1):
max_pairs_1 = (self.leftnode.maxvalues[condition] +
self.rightnode.maxvalues["0"])
max_propairs_1 = (self.leftnode.max_propairs[condition] +
self.rightnode.max_propairs["0"])
max_antipairs_1 = (self.leftnode.max_antipairs[condition] +
self.rightnode.max_antipairs["0"])
if (self.leftnode.maxvalues[condition] > -1 and
self.rightnode.maxvalues[Otherconditions[0]] > -1):
max_pairs_2 = (self.leftnode.maxvalues[condition] +
self.rightnode.maxvalues[Otherconditions[0]])
max_propairs_2 = (self.leftnode.max_propairs[condition] +
self.rightnode.max_propairs[Otherconditions[0]])
max_antipairs_2 = (self.leftnode.max_antipairs[condition] +
self.rightnode.max_antipairs[Otherconditions[0]])
if (self.leftnode.maxvalues[condition] > -1 and
self.rightnode.maxvalues[Otherconditions[1]] > -1):
max_pairs_3 = (self.leftnode.maxvalues[condition] +
self.rightnode.maxvalues[Otherconditions[1]])
max_propairs_3 = (self.leftnode.max_propairs[condition] +
self.rightnode.max_propairs[Otherconditions[1]])
max_antipairs_3 = (self.leftnode.max_antipairs[condition] +
self.rightnode.max_antipairs[Otherconditions[1]])
if (self.leftnode.maxvalues[condition] > -1 and
self.rightnode.maxvalues[Otherconditions[2]] > -1):
max_pairs_4 = (self.leftnode.maxvalues[condition] +
self.rightnode.maxvalues[Otherconditions[2]])
max_propairs_4 = (self.leftnode.max_propairs[condition] +
self.rightnode.max_propairs[Otherconditions[2]])
max_antipairs_4 = (self.leftnode.max_antipairs[condition] +
self.rightnode.max_antipairs[Otherconditions[2]])
if (self.leftnode.maxvalues[condition] > -1 and
self.rightnode.maxvalues[condition] > -1):
max_pairs_5 = (self.leftnode.maxvalues[condition] +
self.rightnode.maxvalues[condition])
max_propairs_5 = (self.leftnode.max_propairs[condition] +
self.rightnode.max_propairs[condition])
max_antipairs_5 = (self.leftnode.max_antipairs[condition] +
self.rightnode.max_antipairs[condition])
if (self.leftnode.maxvalues["0"] > -1 and
self.rightnode.maxvalues[condition] > -1):
max_pairs_6 = (self.leftnode.maxvalues["0"] +
self.rightnode.maxvalues[condition])
max_propairs_6 = (self.leftnode.max_propairs["0"] +
self.rightnode.max_propairs[condition])
max_antipairs_6 = (self.leftnode.max_antipairs["0"] +
self.rightnode.max_antipairs[condition])
if (self.leftnode.maxvalues[Otherconditions[0]] > -1 and
self.rightnode.maxvalues[condition] > -1):
max_pairs_7 = (self.leftnode.maxvalues[Otherconditions[0]] +
self.rightnode.maxvalues[condition])
max_propairs_7 = \
(self.leftnode.max_propairs[Otherconditions[0]] +
self.rightnode.max_propairs[condition])
max_antipairs_7 = \
(self.leftnode.max_antipairs[Otherconditions[0]] +
self.rightnode.max_antipairs[condition])
if (self.leftnode.maxvalues[Otherconditions[1]] > -1 and
self.rightnode.maxvalues[condition] > -1):
max_pairs_8 = (self.leftnode.maxvalues[Otherconditions[1]] +
self.rightnode.maxvalues[condition])
max_propairs_8 = \
(self.leftnode.max_propairs[Otherconditions[1]] +
self.rightnode.max_propairs[condition])
max_antipairs_8 = \
(self.leftnode.max_antipairs[Otherconditions[1]] +
self.rightnode.max_antipairs[condition])
if (self.leftnode.maxvalues[Otherconditions[2]] > -1 and
self.rightnode.maxvalues[condition] > -1):
max_pairs_9 = (self.leftnode.maxvalues[Otherconditions[2]] +
self.rightnode.maxvalues[condition])
max_propairs_9 = \
(self.leftnode.max_propairs[Otherconditions[2]] +
self.rightnode.max_propairs[condition])
max_antipairs_9 = \
(self.leftnode.max_antipairs[Otherconditions[2]] +
self.rightnode.max_antipairs[condition])
max_pairs = max(max_pairs_1, max_pairs_2, max_pairs_3,
max_pairs_4, max_pairs_5, max_pairs_6,
max_pairs_7, max_pairs_8, max_pairs_9)
# Calculate maximum number of propairs, given a maxmimum number
# of pairs
max_propairs = -1
if max_pairs == max_pairs_1:
max_propairs = max(max_propairs, max_propairs_1)
if max_pairs == max_pairs_2:
max_propairs = max(max_propairs, max_propairs_2)
if max_pairs == max_pairs_3:
max_propairs = max(max_propairs, max_propairs_3)
if max_pairs == max_pairs_4:
max_propairs = max(max_propairs, max_propairs_4)
if max_pairs == max_pairs_5:
max_propairs = max(max_propairs, max_propairs_5)
if max_pairs == max_pairs_6:
max_propairs = max(max_propairs, max_propairs_6)
if max_pairs == max_pairs_7:
max_propairs = max(max_propairs, max_propairs_7)
if max_pairs == max_pairs_8:
max_propairs = max(max_propairs, max_propairs_8)
if max_pairs == max_pairs_9:
max_propairs = max(max_propairs, max_propairs_9)
# Calculate maximum number of antipairs, given a maxmimum number
# of pairs
max_antipairs = -1
if max_pairs == max_pairs_1:
max_antipairs = max(max_antipairs, max_antipairs_1)
if max_pairs == max_pairs_2:
max_antipairs = max(max_antipairs, max_antipairs_2)
if max_pairs == max_pairs_3:
max_antipairs = max(max_antipairs, max_antipairs_3)
if max_pairs == max_pairs_4:
max_antipairs = max(max_antipairs, max_antipairs_4)
if max_pairs == max_pairs_5:
max_antipairs = max(max_antipairs, max_antipairs_5)
if max_pairs == max_pairs_6:
max_antipairs = max(max_antipairs, max_antipairs_6)
if max_pairs == max_pairs_7:
max_antipairs = max(max_antipairs, max_antipairs_7)
if max_pairs == max_pairs_8:
max_antipairs = max(max_antipairs, max_antipairs_8)
if max_pairs == max_pairs_9:
max_antipairs = max(max_antipairs, max_antipairs_9)
return {"Total": max_pairs,
"Pro": max_propairs,
"Anti": max_antipairs}
def calculate_max_nofree(self):
"""
Under the condition of no free paths, only 5 distinct
possibilities exits. (No free paths requires either that there
are no free paths in the left or right nodes, or that a new pair
is formed across the root from the left and right nodes)
Left Right Result
No free No free
AB ab +1 pair (pro)
ab AB +1 pair (pro)
Ab aB +1 pair (anti)
aB Ab +1 pair (anti)
"""
# No free pairs in either:
max_pairs_nofree = -1
max_pairs_1100 = -1
max_pairs_0011 = -1
max_pairs_1001 = -1
max_pairs_0110 = -1
max_propairs_nofree = -1
max_propairs_1100 = -1
max_propairs_0011 = -1
max_propairs_1001 = -1
max_propairs_0110 = -1
max_antipairs_nofree = -1
max_antipairs_1100 = -1
max_antipairs_0011 = -1
max_antipairs_1001 = -1
max_antipairs_0110 = -1
if (self.leftnode.maxvalues["0"] > -1 and
self.rightnode.maxvalues["0"] > -1):
max_pairs_nofree = (self.leftnode.maxvalues["0"] +
self.rightnode.maxvalues["0"])
max_propairs_nofree = (self.leftnode.max_propairs["0"] +
self.rightnode.max_propairs["0"])
max_antipairs_nofree = (self.leftnode.max_antipairs["0"] +
self.rightnode.max_antipairs["0"])
if (self.leftnode.maxvalues["AB"] > -1 and
self.rightnode.maxvalues["ab"] > -1):
max_pairs_1100 = (self.leftnode.maxvalues["AB"] +
self.rightnode.maxvalues["ab"] + 1)
max_propairs_1100 = (self.leftnode.max_propairs["AB"] +
self.rightnode.max_propairs["ab"])
max_antipairs_1100 = (self.leftnode.max_antipairs["AB"] +
self.rightnode.max_antipairs["ab"])
max_propairs_1100 += 1
if (self.leftnode.maxvalues["ab"] > -1 and
self.rightnode.maxvalues["AB"] > -1):
max_pairs_0011 = (self.leftnode.maxvalues["ab"] +
self.rightnode.maxvalues["AB"] + 1)
max_propairs_0011 = (self.leftnode.max_propairs["ab"] +
self.rightnode.max_propairs["AB"])
max_antipairs_0011 = (self.leftnode.max_antipairs["ab"] +
self.rightnode.max_antipairs["AB"])
max_propairs_0011 += 1
if (self.leftnode.maxvalues["Ab"] > -1 and
self.rightnode.maxvalues["aB"] > -1):
max_pairs_1001 = (self.leftnode.maxvalues["Ab"] +
self.rightnode.maxvalues["aB"] + 1)
max_propairs_1001 = (self.leftnode.max_propairs["Ab"] +
self.rightnode.max_propairs["aB"])
max_antipairs_1001 = (self.leftnode.max_antipairs["Ab"] +
self.rightnode.max_antipairs["aB"])
max_antipairs_1001 += 1
if (self.leftnode.maxvalues["aB"] > -1 and
self.rightnode.maxvalues["Ab"] > -1):
max_pairs_0110 = (self.leftnode.maxvalues["aB"] +
self.rightnode.maxvalues["Ab"] + 1)
max_propairs_0110 = (self.leftnode.max_propairs["aB"] +
self.rightnode.max_propairs["Ab"])
max_antipairs_0110 = (self.leftnode.max_antipairs["aB"] +
self.rightnode.max_antipairs["Ab"])
max_antipairs_0110 += 1
max_pairs = max(max_pairs_nofree, max_pairs_1100,
max_pairs_0011, max_pairs_1001, max_pairs_0110)
# Calculate max number of propairs
max_propairs = -1 # Max_propairs can never go below -1
if max_pairs == max_pairs_nofree:
max_propairs = max(max_propairs, max_propairs_nofree)
if max_pairs == max_pairs_1100:
max_propairs = max(max_propairs, max_propairs_1100)
if max_pairs == max_pairs_0011:
max_propairs = max(max_propairs, max_propairs_0011)
if max_pairs == max_pairs_1001:
max_propairs = max(max_propairs, max_propairs_1001)
if max_pairs == max_pairs_0110:
max_propairs = max(max_propairs, max_propairs_0110)
# Calculate max number of antipairs
max_antipairs = -1 # Max_antipairs can never go below -1
if max_pairs == max_pairs_nofree:
max_antipairs = max(max_antipairs, max_antipairs_nofree)
if max_pairs == max_pairs_1100:
max_antipairs = max(max_antipairs, max_antipairs_1100)
if max_pairs == max_pairs_0011:
max_antipairs = max(max_antipairs, max_antipairs_0011)
if max_pairs == max_pairs_1001:
max_antipairs = max(max_antipairs, max_antipairs_1001)
if max_pairs == max_pairs_0110:
max_antipairs = max(max_antipairs, max_antipairs_0110)
return {"Total": max_pairs,
"Pro": max_propairs,
"Anti": max_antipairs}
class Tip:
"""
A class that references a single tip, which can only be AB, Ab, aB,
ab, A- or a-
"""
def __init__(self, tipvalue):
"""
Sets up the tip
"""
self.tipvalue = tipvalue
self.maxvalues = {}
for condition in ["AB", "Ab", "aB", "ab", "0"]:
if condition == tipvalue:
self.maxvalues[condition] = 0
else:
self.maxvalues[condition] = -1
self.max_propairs = {k: v for (k, v) in self.maxvalues.items()}
self.max_antipairs = {k: v for (k, v) in self.maxvalues.items()}
if __name__ == '__main__':
pass
| gpl-3.0 |
Qalthos/ansible | lib/ansible/module_utils/facts/network/iscsi.py | 25 | 4647 | # iSCSI initiator related facts collection for Ansible.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import subprocess
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.facts.utils import get_file_content
from ansible.module_utils.facts.network.base import NetworkCollector
class IscsiInitiatorNetworkCollector(NetworkCollector):
name = 'iscsi'
_fact_ids = set()
def collect(self, module=None, collected_facts=None):
"""
Example of contents of /etc/iscsi/initiatorname.iscsi:
## DO NOT EDIT OR REMOVE THIS FILE!
## If you remove this file, the iSCSI daemon will not start.
## If you change the InitiatorName, existing access control lists
## may reject this initiator. The InitiatorName must be unique
## for each iSCSI initiator. Do NOT duplicate iSCSI InitiatorNames.
InitiatorName=iqn.1993-08.org.debian:01:44a42c8ddb8b
Example of output from the AIX lsattr command:
# lsattr -E -l iscsi0
disc_filename /etc/iscsi/targets Configuration file False
disc_policy file Discovery Policy True
initiator_name iqn.localhost.hostid.7f000002 iSCSI Initiator Name True
isns_srvnames auto iSNS Servers IP Addresses True
isns_srvports iSNS Servers Port Numbers True
max_targets 16 Maximum Targets Allowed True
num_cmd_elems 200 Maximum number of commands to queue to driver True
Example of output from the HP-UX iscsiutil command:
#iscsiutil -l
Initiator Name : iqn.1986-03.com.hp:mcel_VMhost3.1f355cf6-e2db-11e0-a999-b44c0aef5537
Initiator Alias :
Authentication Method : None
CHAP Method : CHAP_UNI
Initiator CHAP Name :
CHAP Secret :
NAS Hostname :
NAS Secret :
Radius Server Hostname :
Header Digest : None, CRC32C (default)
Data Digest : None, CRC32C (default)
SLP Scope list for iSLPD :
"""
iscsi_facts = {}
iscsi_facts['iscsi_iqn'] = ""
if sys.platform.startswith('linux') or sys.platform.startswith('sunos'):
for line in get_file_content('/etc/iscsi/initiatorname.iscsi', '').splitlines():
if line.startswith('#') or line.startswith(';') or line.strip() == '':
continue
if line.startswith('InitiatorName='):
iscsi_facts['iscsi_iqn'] = line.split('=', 1)[1]
break
elif sys.platform.startswith('aix'):
cmd = get_bin_path('lsattr')
if cmd:
cmd += " -E -l iscsi0"
rc, out, err = module.run_command(cmd)
if rc == 0 and out:
line = self.findstr(out, 'initiator_name')
iscsi_facts['iscsi_iqn'] = line.split()[1].rstrip()
elif sys.platform.startswith('hp-ux'):
# try to find it in the default PATH and opt_dirs
cmd = get_bin_path('iscsiutil', opt_dirs=['/opt/iscsi/bin'])
if cmd:
cmd += " -l"
rc, out, err = module.run_command(cmd)
if out:
line = self.findstr(out, 'Initiator Name')
iscsi_facts['iscsi_iqn'] = line.split(":", 1)[1].rstrip()
return iscsi_facts
def findstr(self, text, match):
for line in text.splitlines():
if match in line:
found = line
return found
| gpl-3.0 |
alexsmx/djangoAppengineSrcTemplate | django/db/backends/mysql/creation.py | 311 | 3019 | from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated MySQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'integer AUTO_INCREMENT',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer UNSIGNED',
'PositiveSmallIntegerField': 'smallint UNSIGNED',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'longtext',
'TimeField': 'time',
}
def sql_table_creation_suffix(self):
suffix = []
if self.connection.settings_dict['TEST_CHARSET']:
suffix.append('CHARACTER SET %s' % self.connection.settings_dict['TEST_CHARSET'])
if self.connection.settings_dict['TEST_COLLATION']:
suffix.append('COLLATE %s' % self.connection.settings_dict['TEST_COLLATION'])
return ' '.join(suffix)
def sql_for_inline_foreign_key_references(self, field, known_models, style):
"All inline references are pending under MySQL"
return [], True
def sql_for_inline_many_to_many_references(self, model, field, style):
from django.db import models
opts = model._meta
qn = self.connection.ops.quote_name
table_output = [
' %s %s %s,' %
(style.SQL_FIELD(qn(field.m2m_column_name())),
style.SQL_COLTYPE(models.ForeignKey(model).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL')),
' %s %s %s,' %
(style.SQL_FIELD(qn(field.m2m_reverse_name())),
style.SQL_COLTYPE(models.ForeignKey(field.rel.to).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL'))
]
deferred = [
(field.m2m_db_table(), field.m2m_column_name(), opts.db_table,
opts.pk.column),
(field.m2m_db_table(), field.m2m_reverse_name(),
field.rel.to._meta.db_table, field.rel.to._meta.pk.column)
]
return table_output, deferred
| bsd-3-clause |
bzbarsky/servo | tests/wpt/css-tests/tools/pytest/_pytest/pdb.py | 176 | 3491 | """ interactive debugging with PDB, the Python Debugger. """
from __future__ import absolute_import
import pdb
import sys
import pytest
def pytest_addoption(parser):
group = parser.getgroup("general")
group._addoption('--pdb',
action="store_true", dest="usepdb", default=False,
help="start the interactive Python debugger on errors.")
def pytest_namespace():
return {'set_trace': pytestPDB().set_trace}
def pytest_configure(config):
if config.getvalue("usepdb"):
config.pluginmanager.register(PdbInvoke(), 'pdbinvoke')
old = (pdb.set_trace, pytestPDB._pluginmanager)
def fin():
pdb.set_trace, pytestPDB._pluginmanager = old
pytestPDB._config = None
pdb.set_trace = pytest.set_trace
pytestPDB._pluginmanager = config.pluginmanager
pytestPDB._config = config
config._cleanup.append(fin)
class pytestPDB:
""" Pseudo PDB that defers to the real pdb. """
_pluginmanager = None
_config = None
def set_trace(self):
""" invoke PDB set_trace debugging, dropping any IO capturing. """
import _pytest.config
frame = sys._getframe().f_back
if self._pluginmanager is not None:
capman = self._pluginmanager.getplugin("capturemanager")
if capman:
capman.suspendcapture(in_=True)
tw = _pytest.config.create_terminal_writer(self._config)
tw.line()
tw.sep(">", "PDB set_trace (IO-capturing turned off)")
self._pluginmanager.hook.pytest_enter_pdb(config=self._config)
pdb.Pdb().set_trace(frame)
class PdbInvoke:
def pytest_exception_interact(self, node, call, report):
capman = node.config.pluginmanager.getplugin("capturemanager")
if capman:
out, err = capman.suspendcapture(in_=True)
sys.stdout.write(out)
sys.stdout.write(err)
_enter_pdb(node, call.excinfo, report)
def pytest_internalerror(self, excrepr, excinfo):
for line in str(excrepr).split("\n"):
sys.stderr.write("INTERNALERROR> %s\n" %line)
sys.stderr.flush()
tb = _postmortem_traceback(excinfo)
post_mortem(tb)
def _enter_pdb(node, excinfo, rep):
# XXX we re-use the TerminalReporter's terminalwriter
# because this seems to avoid some encoding related troubles
# for not completely clear reasons.
tw = node.config.pluginmanager.getplugin("terminalreporter")._tw
tw.line()
tw.sep(">", "traceback")
rep.toterminal(tw)
tw.sep(">", "entering PDB")
tb = _postmortem_traceback(excinfo)
post_mortem(tb)
rep._pdbshown = True
return rep
def _postmortem_traceback(excinfo):
# A doctest.UnexpectedException is not useful for post_mortem.
# Use the underlying exception instead:
from doctest import UnexpectedException
if isinstance(excinfo.value, UnexpectedException):
return excinfo.value.exc_info[2]
else:
return excinfo._excinfo[2]
def _find_last_non_hidden_frame(stack):
i = max(0, len(stack) - 1)
while i and stack[i][0].f_locals.get("__tracebackhide__", False):
i -= 1
return i
def post_mortem(t):
class Pdb(pdb.Pdb):
def get_stack(self, f, t):
stack, i = pdb.Pdb.get_stack(self, f, t)
if f is None:
i = _find_last_non_hidden_frame(stack)
return stack, i
p = Pdb()
p.reset()
p.interaction(None, t)
| mpl-2.0 |
Serag8/Bachelor | google_appengine/lib/django-1.3/django/contrib/gis/geos/tests/test_mutable_list.py | 244 | 14587 | # Copyright (c) 2008-2009 Aryeh Leib Taurog, http://www.aryehleib.com
# All rights reserved.
#
# Modified from original contribution by Aryeh Leib Taurog, which was
# released under the New BSD license.
from django.contrib.gis.geos.mutable_list import ListMixin
from django.utils import unittest
class UserListA(ListMixin):
_mytype = tuple
def __init__(self, i_list, *args, **kwargs):
self._list = self._mytype(i_list)
super(UserListA, self).__init__(*args, **kwargs)
def __len__(self): return len(self._list)
def __str__(self): return str(self._list)
def __repr__(self): return repr(self._list)
def _set_list(self, length, items):
# this would work:
# self._list = self._mytype(items)
# but then we wouldn't be testing length parameter
itemList = ['x'] * length
for i, v in enumerate(items):
itemList[i] = v
self._list = self._mytype(itemList)
def _get_single_external(self, index):
return self._list[index]
class UserListB(UserListA):
_mytype = list
def _set_single(self, index, value):
self._list[index] = value
def nextRange(length):
nextRange.start += 100
return range(nextRange.start, nextRange.start + length)
nextRange.start = 0
class ListMixinTest(unittest.TestCase):
"""
Tests base class ListMixin by comparing a list clone which is
a ListMixin subclass with a real Python list.
"""
limit = 3
listType = UserListA
def lists_of_len(self, length=None):
if length is None: length = self.limit
pl = range(length)
return pl, self.listType(pl)
def limits_plus(self, b):
return range(-self.limit - b, self.limit + b)
def step_range(self):
return range(-1 - self.limit, 0) + range(1, 1 + self.limit)
def test01_getslice(self):
'Slice retrieval'
pl, ul = self.lists_of_len()
for i in self.limits_plus(1):
self.assertEqual(pl[i:], ul[i:], 'slice [%d:]' % (i))
self.assertEqual(pl[:i], ul[:i], 'slice [:%d]' % (i))
for j in self.limits_plus(1):
self.assertEqual(pl[i:j], ul[i:j], 'slice [%d:%d]' % (i,j))
for k in self.step_range():
self.assertEqual(pl[i:j:k], ul[i:j:k], 'slice [%d:%d:%d]' % (i,j,k))
for k in self.step_range():
self.assertEqual(pl[i::k], ul[i::k], 'slice [%d::%d]' % (i,k))
self.assertEqual(pl[:i:k], ul[:i:k], 'slice [:%d:%d]' % (i,k))
for k in self.step_range():
self.assertEqual(pl[::k], ul[::k], 'slice [::%d]' % (k))
def test02_setslice(self):
'Slice assignment'
def setfcn(x,i,j,k,L): x[i:j:k] = range(L)
pl, ul = self.lists_of_len()
for slen in range(self.limit + 1):
ssl = nextRange(slen)
ul[:] = ssl
pl[:] = ssl
self.assertEqual(pl, ul[:], 'set slice [:]')
for i in self.limits_plus(1):
ssl = nextRange(slen)
ul[i:] = ssl
pl[i:] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d:]' % (i))
ssl = nextRange(slen)
ul[:i] = ssl
pl[:i] = ssl
self.assertEqual(pl, ul[:], 'set slice [:%d]' % (i))
for j in self.limits_plus(1):
ssl = nextRange(slen)
ul[i:j] = ssl
pl[i:j] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d:%d]' % (i, j))
for k in self.step_range():
ssl = nextRange( len(ul[i:j:k]) )
ul[i:j:k] = ssl
pl[i:j:k] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d:%d:%d]' % (i, j, k))
sliceLen = len(ul[i:j:k])
self.assertRaises(ValueError, setfcn, ul, i, j, k, sliceLen + 1)
if sliceLen > 2:
self.assertRaises(ValueError, setfcn, ul, i, j, k, sliceLen - 1)
for k in self.step_range():
ssl = nextRange( len(ul[i::k]) )
ul[i::k] = ssl
pl[i::k] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d::%d]' % (i, k))
ssl = nextRange( len(ul[:i:k]) )
ul[:i:k] = ssl
pl[:i:k] = ssl
self.assertEqual(pl, ul[:], 'set slice [:%d:%d]' % (i, k))
for k in self.step_range():
ssl = nextRange(len(ul[::k]))
ul[::k] = ssl
pl[::k] = ssl
self.assertEqual(pl, ul[:], 'set slice [::%d]' % (k))
def test03_delslice(self):
'Delete slice'
for Len in range(self.limit):
pl, ul = self.lists_of_len(Len)
del pl[:]
del ul[:]
self.assertEqual(pl[:], ul[:], 'del slice [:]')
for i in range(-Len - 1, Len + 1):
pl, ul = self.lists_of_len(Len)
del pl[i:]
del ul[i:]
self.assertEqual(pl[:], ul[:], 'del slice [%d:]' % (i))
pl, ul = self.lists_of_len(Len)
del pl[:i]
del ul[:i]
self.assertEqual(pl[:], ul[:], 'del slice [:%d]' % (i))
for j in range(-Len - 1, Len + 1):
pl, ul = self.lists_of_len(Len)
del pl[i:j]
del ul[i:j]
self.assertEqual(pl[:], ul[:], 'del slice [%d:%d]' % (i,j))
for k in range(-Len - 1,0) + range(1,Len):
pl, ul = self.lists_of_len(Len)
del pl[i:j:k]
del ul[i:j:k]
self.assertEqual(pl[:], ul[:], 'del slice [%d:%d:%d]' % (i,j,k))
for k in range(-Len - 1,0) + range(1,Len):
pl, ul = self.lists_of_len(Len)
del pl[:i:k]
del ul[:i:k]
self.assertEqual(pl[:], ul[:], 'del slice [:%d:%d]' % (i,k))
pl, ul = self.lists_of_len(Len)
del pl[i::k]
del ul[i::k]
self.assertEqual(pl[:], ul[:], 'del slice [%d::%d]' % (i,k))
for k in range(-Len - 1,0) + range(1,Len):
pl, ul = self.lists_of_len(Len)
del pl[::k]
del ul[::k]
self.assertEqual(pl[:], ul[:], 'del slice [::%d]' % (k))
def test04_get_set_del_single(self):
'Get/set/delete single item'
pl, ul = self.lists_of_len()
for i in self.limits_plus(0):
self.assertEqual(pl[i], ul[i], 'get single item [%d]' % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
pl[i] = 100
ul[i] = 100
self.assertEqual(pl[:], ul[:], 'set single item [%d]' % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
del pl[i]
del ul[i]
self.assertEqual(pl[:], ul[:], 'del single item [%d]' % i)
def test05_out_of_range_exceptions(self):
'Out of range exceptions'
def setfcn(x, i): x[i] = 20
def getfcn(x, i): return x[i]
def delfcn(x, i): del x[i]
pl, ul = self.lists_of_len()
for i in (-1 - self.limit, self.limit):
self.assertRaises(IndexError, setfcn, ul, i) # 'set index %d' % i)
self.assertRaises(IndexError, getfcn, ul, i) # 'get index %d' % i)
self.assertRaises(IndexError, delfcn, ul, i) # 'del index %d' % i)
def test06_list_methods(self):
'List methods'
pl, ul = self.lists_of_len()
pl.append(40)
ul.append(40)
self.assertEqual(pl[:], ul[:], 'append')
pl.extend(range(50,55))
ul.extend(range(50,55))
self.assertEqual(pl[:], ul[:], 'extend')
pl.reverse()
ul.reverse()
self.assertEqual(pl[:], ul[:], 'reverse')
for i in self.limits_plus(1):
pl, ul = self.lists_of_len()
pl.insert(i,50)
ul.insert(i,50)
self.assertEqual(pl[:], ul[:], 'insert at %d' % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
self.assertEqual(pl.pop(i), ul.pop(i), 'popped value at %d' % i)
self.assertEqual(pl[:], ul[:], 'after pop at %d' % i)
pl, ul = self.lists_of_len()
self.assertEqual(pl.pop(), ul.pop(i), 'popped value')
self.assertEqual(pl[:], ul[:], 'after pop')
pl, ul = self.lists_of_len()
def popfcn(x, i): x.pop(i)
self.assertRaises(IndexError, popfcn, ul, self.limit)
self.assertRaises(IndexError, popfcn, ul, -1 - self.limit)
pl, ul = self.lists_of_len()
for val in range(self.limit):
self.assertEqual(pl.index(val), ul.index(val), 'index of %d' % val)
for val in self.limits_plus(2):
self.assertEqual(pl.count(val), ul.count(val), 'count %d' % val)
for val in range(self.limit):
pl, ul = self.lists_of_len()
pl.remove(val)
ul.remove(val)
self.assertEqual(pl[:], ul[:], 'after remove val %d' % val)
def indexfcn(x, v): return x.index(v)
def removefcn(x, v): return x.remove(v)
self.assertRaises(ValueError, indexfcn, ul, 40)
self.assertRaises(ValueError, removefcn, ul, 40)
def test07_allowed_types(self):
'Type-restricted list'
pl, ul = self.lists_of_len()
ul._allowed = (int, long)
ul[1] = 50
ul[:2] = [60, 70, 80]
def setfcn(x, i, v): x[i] = v
self.assertRaises(TypeError, setfcn, ul, 2, 'hello')
self.assertRaises(TypeError, setfcn, ul, slice(0,3,2), ('hello','goodbye'))
def test08_min_length(self):
'Length limits'
pl, ul = self.lists_of_len()
ul._minlength = 1
def delfcn(x,i): del x[:i]
def setfcn(x,i): x[:i] = []
for i in range(self.limit - ul._minlength + 1, self.limit + 1):
self.assertRaises(ValueError, delfcn, ul, i)
self.assertRaises(ValueError, setfcn, ul, i)
del ul[:ul._minlength]
ul._maxlength = 4
for i in range(0, ul._maxlength - len(ul)):
ul.append(i)
self.assertRaises(ValueError, ul.append, 10)
def test09_iterable_check(self):
'Error on assigning non-iterable to slice'
pl, ul = self.lists_of_len(self.limit + 1)
def setfcn(x, i, v): x[i] = v
self.assertRaises(TypeError, setfcn, ul, slice(0,3,2), 2)
def test10_checkindex(self):
'Index check'
pl, ul = self.lists_of_len()
for i in self.limits_plus(0):
if i < 0:
self.assertEqual(ul._checkindex(i), i + self.limit, '_checkindex(neg index)')
else:
self.assertEqual(ul._checkindex(i), i, '_checkindex(pos index)')
for i in (-self.limit - 1, self.limit):
self.assertRaises(IndexError, ul._checkindex, i)
ul._IndexError = TypeError
self.assertRaises(TypeError, ul._checkindex, -self.limit - 1)
def test_11_sorting(self):
'Sorting'
pl, ul = self.lists_of_len()
pl.insert(0, pl.pop())
ul.insert(0, ul.pop())
pl.sort()
ul.sort()
self.assertEqual(pl[:], ul[:], 'sort')
mid = pl[len(pl) / 2]
pl.sort(key=lambda x: (mid-x)**2)
ul.sort(key=lambda x: (mid-x)**2)
self.assertEqual(pl[:], ul[:], 'sort w/ key')
pl.insert(0, pl.pop())
ul.insert(0, ul.pop())
pl.sort(reverse=True)
ul.sort(reverse=True)
self.assertEqual(pl[:], ul[:], 'sort w/ reverse')
mid = pl[len(pl) / 2]
pl.sort(key=lambda x: (mid-x)**2)
ul.sort(key=lambda x: (mid-x)**2)
self.assertEqual(pl[:], ul[:], 'sort w/ key')
def test_12_arithmetic(self):
'Arithmetic'
pl, ul = self.lists_of_len()
al = range(10,14)
self.assertEqual(list(pl + al), list(ul + al), 'add')
self.assertEqual(type(ul), type(ul + al), 'type of add result')
self.assertEqual(list(al + pl), list(al + ul), 'radd')
self.assertEqual(type(al), type(al + ul), 'type of radd result')
objid = id(ul)
pl += al
ul += al
self.assertEqual(pl[:], ul[:], 'in-place add')
self.assertEqual(objid, id(ul), 'in-place add id')
for n in (-1,0,1,3):
pl, ul = self.lists_of_len()
self.assertEqual(list(pl * n), list(ul * n), 'mul by %d' % n)
self.assertEqual(type(ul), type(ul * n), 'type of mul by %d result' % n)
self.assertEqual(list(n * pl), list(n * ul), 'rmul by %d' % n)
self.assertEqual(type(ul), type(n * ul), 'type of rmul by %d result' % n)
objid = id(ul)
pl *= n
ul *= n
self.assertEqual(pl[:], ul[:], 'in-place mul by %d' % n)
self.assertEqual(objid, id(ul), 'in-place mul by %d id' % n)
pl, ul = self.lists_of_len()
self.assertEqual(pl, ul, 'cmp for equal')
self.assertTrue(pl >= ul, 'cmp for gte self')
self.assertTrue(pl <= ul, 'cmp for lte self')
self.assertTrue(ul >= pl, 'cmp for self gte')
self.assertTrue(ul <= pl, 'cmp for self lte')
self.assertTrue(pl + [5] > ul, 'cmp')
self.assertTrue(pl + [5] >= ul, 'cmp')
self.assertTrue(pl < ul + [2], 'cmp')
self.assertTrue(pl <= ul + [2], 'cmp')
self.assertTrue(ul + [5] > pl, 'cmp')
self.assertTrue(ul + [5] >= pl, 'cmp')
self.assertTrue(ul < pl + [2], 'cmp')
self.assertTrue(ul <= pl + [2], 'cmp')
pl[1] = 20
self.assertTrue(pl > ul, 'cmp for gt self')
self.assertTrue(ul < pl, 'cmp for self lt')
pl[1] = -20
self.assertTrue(pl < ul, 'cmp for lt self')
self.assertTrue(pl < ul, 'cmp for lt self')
class ListMixinTestSingle(ListMixinTest):
listType = UserListB
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(ListMixinTest))
s.addTest(unittest.makeSuite(ListMixinTestSingle))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
if __name__ == '__main__':
run()
| mit |
solin/hermes_common | convert_api.py | 9 | 1932 | #! /usr/bin/env python
from time import strftime, localtime
def convert_h():
f = open("_hermes_common_api_new.h", "w")
f.write("/* Generated by convert_api.py on %s */\n\n" % \
strftime("%a %b %d %H:%M:%S %Y", localtime()));
lines = open("_hermes_common_api.h").readlines()
line = lines[0]; f.write(line); del lines[0]
line = lines[0]; f.write(line); del lines[0]
f.write("""\n\
// To avoid compilation warnings:
#undef _XOPEN_SOURCE
#undef _POSIX_C_SOURCE
\n""")
line = lines[0]
while not line.startswith("static"):
f.write(line)
del lines[0]
line = lines[0]
f.write("#include <complex>\n")
f.write("typedef ::std::complex<double> __pyx_t_double_complex;\n\n")
while line.startswith("static"):
line = line.replace("static", "extern")
f.write(line)
del lines[0]
line = lines[0]
f.write("""
extern int import__hermes_common(void);
#endif\n""")
def convert_cpp():
f = open("_hermes_common_api_new.cpp", "w")
f.write("/* Generated by convert_api.py on %s */\n\n" % \
strftime("%a %b %d %H:%M:%S %Y", localtime()));
lines = open("_hermes_common_api.h").readlines()
line = lines[0]
while not line.startswith("static"):
del lines[0]
line = lines[0]
f.write("""\
#include "_hermes_common_api_new.h"
""")
while line.startswith("static"):
line = line.replace("static ", "")
f.write(line)
del lines[0]
line = lines[0]
f.write(line)
line_old = line
for line in lines:
if line.startswith("#ifndef"):
continue
if line.startswith("#define"):
continue
if line.startswith("#endif"):
if not line_old.startswith(" "):
continue
line = line.replace("static ", "")
f.write(line)
line_old = line
convert_h()
convert_cpp()
| bsd-3-clause |
aral/isvat | django/contrib/gis/sitemaps/georss.py | 314 | 2134 | from django.core import urlresolvers
from django.contrib.sitemaps import Sitemap
class GeoRSSSitemap(Sitemap):
"""
A minimal hook to produce sitemaps for GeoRSS feeds.
"""
def __init__(self, feed_dict, slug_dict=None):
"""
This sitemap object initializes on a feed dictionary (as would be passed
to `django.contrib.gis.views.feed`) and a slug dictionary.
If the slug dictionary is not defined, then it's assumed the keys provide
the URL parameter to the feed. However, if you have a complex feed (e.g.,
you override `get_object`, then you'll need to provide a slug dictionary.
The slug dictionary should have the same keys as the feed dictionary, but
each value in the slug dictionary should be a sequence of slugs that may
be used for valid feeds. For example, let's say we have a feed that
returns objects for a specific ZIP code in our feed dictionary:
feed_dict = {'zipcode' : ZipFeed}
Then we would use a slug dictionary with a list of the zip code slugs
corresponding to feeds you want listed in the sitemap:
slug_dict = {'zipcode' : ['77002', '77054']}
"""
# Setting up.
self.feed_dict = feed_dict
self.locations = []
if slug_dict is None: slug_dict = {}
# Getting the feed locations.
for section in feed_dict.keys():
if slug_dict.get(section, False):
for slug in slug_dict[section]:
self.locations.append('%s/%s' % (section, slug))
else:
self.locations.append(section)
def get_urls(self, page=1, site=None):
"""
This method is overrridden so the appropriate `geo_format` attribute
is placed on each URL element.
"""
urls = Sitemap.get_urls(self, page=page, site=site)
for url in urls: url['geo_format'] = 'georss'
return urls
def items(self):
return self.locations
def location(self, obj):
return urlresolvers.reverse('django.contrib.gis.views.feed', args=(obj,))
| mit |
sombree/android_kernel_samsung_jf | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
imsparsh/python-for-android | python3-alpha/python3-src/Doc/conf.py | 45 | 6009 | #
# Python documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
import sys, os, time
sys.path.append(os.path.abspath('tools/sphinxext'))
# General configuration
# ---------------------
extensions = ['sphinx.ext.refcounting', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'pyspecific']
templates_path = ['tools/sphinxext']
# General substitutions.
project = 'Python'
copyright = '1990-%s, Python Software Foundation' % time.strftime('%Y')
# The default replacements for |version| and |release|.
#
# The short X.Y version.
# version = '2.6'
# The full version, including alpha/beta/rc tags.
# release = '2.6a0'
# We look for the Include/patchlevel.h file in the current Python source tree
# and replace the values accordingly.
import patchlevel
version, release = patchlevel.get_version_info()
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of files that shouldn't be included in the build.
unused_docs = [
'maclib/scrap',
'library/xmllib',
'library/xml.etree',
]
# Ignore .rst in Sphinx its self.
exclude_trees = ['tools/sphinx']
# Relative filename of the reference count data file.
refcount_file = 'data/refcounts.dat'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# By default, highlight as Python 3.
highlight_language = 'python3'
# Options for HTML output
# -----------------------
html_theme = 'default'
html_theme_options = {'collapsiblesidebar': True}
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, filenames relative to this file.
html_sidebars = {
'index': 'indexsidebar.html',
}
# Additional templates that should be rendered to pages.
html_additional_pages = {
'download': 'download.html',
'index': 'indexcontent.html',
}
# Output an OpenSearch description file.
html_use_opensearch = 'http://docs.python.org/dev/py3k'
# Additional static files.
html_static_path = ['tools/sphinxext/static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'python' + release.replace('.', '')
# Split the index
html_split_index = True
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = r'Guido van Rossum\\Fred L. Drake, Jr., editor'
latex_documents = [
('c-api/index', 'c-api.tex',
'The Python/C API', _stdauthor, 'manual'),
('distutils/index', 'distutils.tex',
'Distributing Python Modules', _stdauthor, 'manual'),
('documenting/index', 'documenting.tex',
'Documenting Python', 'Georg Brandl', 'manual'),
('extending/index', 'extending.tex',
'Extending and Embedding Python', _stdauthor, 'manual'),
('install/index', 'install.tex',
'Installing Python Modules', _stdauthor, 'manual'),
('library/index', 'library.tex',
'The Python Library Reference', _stdauthor, 'manual'),
('reference/index', 'reference.tex',
'The Python Language Reference', _stdauthor, 'manual'),
('tutorial/index', 'tutorial.tex',
'Python Tutorial', _stdauthor, 'manual'),
('using/index', 'using.tex',
'Python Setup and Usage', _stdauthor, 'manual'),
('faq/index', 'faq.tex',
'Python Frequently Asked Questions', _stdauthor, 'manual'),
('whatsnew/' + version, 'whatsnew.tex',
'What\'s New in Python', 'A. M. Kuchling', 'howto'),
]
# Collect all HOWTOs individually
latex_documents.extend(('howto/' + fn[:-4], 'howto-' + fn[:-4] + '.tex',
'', _stdauthor, 'howto')
for fn in os.listdir('howto')
if fn.endswith('.rst') and fn != 'index.rst')
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\authoraddress{
\strong{Python Software Foundation}\\
Email: \email{docs@python.org}
}
\let\Verbatim=\OriginalVerbatim
\let\endVerbatim=\endOriginalVerbatim
'''
# Documents to append as an appendix to all manuals.
latex_appendices = ['glossary', 'about', 'license', 'copyright']
# Get LaTeX to handle Unicode correctly
latex_elements = {'inputenc': r'\usepackage[utf8x]{inputenc}', 'utf8extra': ''}
# Options for the coverage checker
# --------------------------------
# The coverage checker will ignore all modules/functions/classes whose names
# match any of the following regexes (using re.match).
coverage_ignore_modules = [
r'[T|t][k|K]',
r'Tix',
r'distutils.*',
]
coverage_ignore_functions = [
'test($|_)',
]
coverage_ignore_classes = [
]
# Glob patterns for C source files for C API coverage, relative to this directory.
coverage_c_path = [
'../Include/*.h',
]
# Regexes to find C items in the source files.
coverage_c_regexes = {
'cfunction': (r'^PyAPI_FUNC\(.*\)\s+([^_][\w_]+)'),
'data': (r'^PyAPI_DATA\(.*\)\s+([^_][\w_]+)'),
'macro': (r'^#define ([^_][\w_]+)\(.*\)[\s|\\]'),
}
# The coverage checker will ignore all C items whose names match these regexes
# (using re.match) -- the keys must be the same as in coverage_c_regexes.
coverage_ignore_c_items = {
# 'cfunction': [...]
}
| apache-2.0 |
whitgroves/taskmap | .venvs/python3.6.0/lib/python3.6/site-packages/pip/_vendor/requests/packages/chardet/chardistribution.py | 2755 | 9226 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
| unlicense |
kasi86/linux | scripts/gdb/linux/utils.py | 509 | 4833 | #
# gdb helper commands and functions for Linux kernel debugging
#
# common utilities
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
class CachedType:
def __init__(self, name):
self._type = None
self._name = name
def _new_objfile_handler(self, event):
self._type = None
gdb.events.new_objfile.disconnect(self._new_objfile_handler)
def get_type(self):
if self._type is None:
self._type = gdb.lookup_type(self._name)
if self._type is None:
raise gdb.GdbError(
"cannot resolve type '{0}'".format(self._name))
if hasattr(gdb, 'events') and hasattr(gdb.events, 'new_objfile'):
gdb.events.new_objfile.connect(self._new_objfile_handler)
return self._type
long_type = CachedType("long")
def get_long_type():
global long_type
return long_type.get_type()
def offset_of(typeobj, field):
element = gdb.Value(0).cast(typeobj)
return int(str(element[field].address).split()[0], 16)
def container_of(ptr, typeobj, member):
return (ptr.cast(get_long_type()) -
offset_of(typeobj, member)).cast(typeobj)
class ContainerOf(gdb.Function):
"""Return pointer to containing data structure.
$container_of(PTR, "TYPE", "ELEMENT"): Given PTR, return a pointer to the
data structure of the type TYPE in which PTR is the address of ELEMENT.
Note that TYPE and ELEMENT have to be quoted as strings."""
def __init__(self):
super(ContainerOf, self).__init__("container_of")
def invoke(self, ptr, typename, elementname):
return container_of(ptr, gdb.lookup_type(typename.string()).pointer(),
elementname.string())
ContainerOf()
BIG_ENDIAN = 0
LITTLE_ENDIAN = 1
target_endianness = None
def get_target_endianness():
global target_endianness
if target_endianness is None:
endian = gdb.execute("show endian", to_string=True)
if "little endian" in endian:
target_endianness = LITTLE_ENDIAN
elif "big endian" in endian:
target_endianness = BIG_ENDIAN
else:
raise gdb.GdbError("unknown endianness '{0}'".format(str(endian)))
return target_endianness
def read_memoryview(inf, start, length):
return memoryview(inf.read_memory(start, length))
def read_u16(buffer):
value = [0, 0]
if type(buffer[0]) is str:
value[0] = ord(buffer[0])
value[1] = ord(buffer[1])
else:
value[0] = buffer[0]
value[1] = buffer[1]
if get_target_endianness() == LITTLE_ENDIAN:
return value[0] + (value[1] << 8)
else:
return value[1] + (value[0] << 8)
def read_u32(buffer):
if get_target_endianness() == LITTLE_ENDIAN:
return read_u16(buffer[0:2]) + (read_u16(buffer[2:4]) << 16)
else:
return read_u16(buffer[2:4]) + (read_u16(buffer[0:2]) << 16)
def read_u64(buffer):
if get_target_endianness() == LITTLE_ENDIAN:
return read_u32(buffer[0:4]) + (read_u32(buffer[4:8]) << 32)
else:
return read_u32(buffer[4:8]) + (read_u32(buffer[0:4]) << 32)
target_arch = None
def is_target_arch(arch):
if hasattr(gdb.Frame, 'architecture'):
return arch in gdb.newest_frame().architecture().name()
else:
global target_arch
if target_arch is None:
target_arch = gdb.execute("show architecture", to_string=True)
return arch in target_arch
GDBSERVER_QEMU = 0
GDBSERVER_KGDB = 1
gdbserver_type = None
def get_gdbserver_type():
def exit_handler(event):
global gdbserver_type
gdbserver_type = None
gdb.events.exited.disconnect(exit_handler)
def probe_qemu():
try:
return gdb.execute("monitor info version", to_string=True) != ""
except:
return False
def probe_kgdb():
try:
thread_info = gdb.execute("info thread 2", to_string=True)
return "shadowCPU0" in thread_info
except:
return False
global gdbserver_type
if gdbserver_type is None:
if probe_qemu():
gdbserver_type = GDBSERVER_QEMU
elif probe_kgdb():
gdbserver_type = GDBSERVER_KGDB
if gdbserver_type is not None and hasattr(gdb, 'events'):
gdb.events.exited.connect(exit_handler)
return gdbserver_type
def gdb_eval_or_none(expresssion):
try:
return gdb.parse_and_eval(expresssion)
except:
return None
def dentry_name(d):
parent = d['d_parent']
if parent == d or parent == 0:
return ""
p = dentry_name(d['d_parent']) + "/"
return p + d['d_iname'].string()
| gpl-2.0 |
idovear/odoo | addons/purchase_requisition/wizard/bid_line_qty.py | 374 | 1711 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
class bid_line_qty(osv.osv_memory):
_name = "bid.line.qty"
_description = "Change Bid line quantity"
_columns = {
'qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
}
def change_qty(self, cr, uid, ids, context=None):
active_ids = context and context.get('active_ids', [])
data = self.browse(cr, uid, ids, context=context)[0]
self.pool.get('purchase.order.line').write(cr, uid, active_ids, {'quantity_bid': data.qty})
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
druuu/django | django/contrib/gis/gdal/prototypes/raster.py | 320 | 4013 | """
This module houses the ctypes function prototypes for GDAL DataSource (raster)
related data structures.
"""
from ctypes import POINTER, c_char_p, c_double, c_int, c_void_p
from functools import partial
from django.contrib.gis.gdal.libgdal import GDAL_VERSION, std_call
from django.contrib.gis.gdal.prototypes.generation import (
const_string_output, double_output, int_output, void_output,
voidptr_output,
)
# For more detail about c function names and definitions see
# http://gdal.org/gdal_8h.html
# http://gdal.org/gdalwarper_8h.html
# Prepare partial functions that use cpl error codes
void_output = partial(void_output, cpl=True)
const_string_output = partial(const_string_output, cpl=True)
double_output = partial(double_output, cpl=True)
# Raster Driver Routines
register_all = void_output(std_call('GDALAllRegister'), [])
get_driver = voidptr_output(std_call('GDALGetDriver'), [c_int])
get_driver_by_name = voidptr_output(std_call('GDALGetDriverByName'), [c_char_p], errcheck=False)
get_driver_count = int_output(std_call('GDALGetDriverCount'), [])
get_driver_description = const_string_output(std_call('GDALGetDescription'), [c_void_p])
# Raster Data Source Routines
create_ds = voidptr_output(std_call('GDALCreate'), [c_void_p, c_char_p, c_int, c_int, c_int, c_int, c_void_p])
open_ds = voidptr_output(std_call('GDALOpen'), [c_char_p, c_int])
if GDAL_VERSION >= (2, 0):
close_ds = voidptr_output(std_call('GDALClose'), [c_void_p])
else:
close_ds = void_output(std_call('GDALClose'), [c_void_p])
flush_ds = int_output(std_call('GDALFlushCache'), [c_void_p])
copy_ds = voidptr_output(std_call('GDALCreateCopy'),
[c_void_p, c_char_p, c_void_p, c_int, POINTER(c_char_p), c_void_p, c_void_p]
)
add_band_ds = void_output(std_call('GDALAddBand'), [c_void_p, c_int])
get_ds_description = const_string_output(std_call('GDALGetDescription'), [c_void_p])
get_ds_driver = voidptr_output(std_call('GDALGetDatasetDriver'), [c_void_p])
get_ds_xsize = int_output(std_call('GDALGetRasterXSize'), [c_void_p])
get_ds_ysize = int_output(std_call('GDALGetRasterYSize'), [c_void_p])
get_ds_raster_count = int_output(std_call('GDALGetRasterCount'), [c_void_p])
get_ds_raster_band = voidptr_output(std_call('GDALGetRasterBand'), [c_void_p, c_int])
get_ds_projection_ref = const_string_output(std_call('GDALGetProjectionRef'), [c_void_p])
set_ds_projection_ref = void_output(std_call('GDALSetProjection'), [c_void_p, c_char_p])
get_ds_geotransform = void_output(std_call('GDALGetGeoTransform'), [c_void_p, POINTER(c_double * 6)], errcheck=False)
set_ds_geotransform = void_output(std_call('GDALSetGeoTransform'), [c_void_p, POINTER(c_double * 6)])
# Raster Band Routines
band_io = void_output(std_call('GDALRasterIO'),
[c_void_p, c_int, c_int, c_int, c_int, c_int, c_void_p, c_int, c_int, c_int, c_int, c_int]
)
get_band_xsize = int_output(std_call('GDALGetRasterBandXSize'), [c_void_p])
get_band_ysize = int_output(std_call('GDALGetRasterBandYSize'), [c_void_p])
get_band_index = int_output(std_call('GDALGetBandNumber'), [c_void_p])
get_band_description = const_string_output(std_call('GDALGetDescription'), [c_void_p])
get_band_ds = voidptr_output(std_call('GDALGetBandDataset'), [c_void_p])
get_band_datatype = int_output(std_call('GDALGetRasterDataType'), [c_void_p])
get_band_nodata_value = double_output(std_call('GDALGetRasterNoDataValue'), [c_void_p, POINTER(c_int)])
set_band_nodata_value = void_output(std_call('GDALSetRasterNoDataValue'), [c_void_p, c_double])
get_band_minimum = double_output(std_call('GDALGetRasterMinimum'), [c_void_p, POINTER(c_int)])
get_band_maximum = double_output(std_call('GDALGetRasterMaximum'), [c_void_p, POINTER(c_int)])
# Reprojection routine
reproject_image = void_output(std_call('GDALReprojectImage'),
[c_void_p, c_char_p, c_void_p, c_char_p, c_int, c_double, c_double, c_void_p, c_void_p, c_void_p]
)
auto_create_warped_vrt = voidptr_output(std_call('GDALAutoCreateWarpedVRT'),
[c_void_p, c_char_p, c_char_p, c_int, c_double, c_void_p]
)
| bsd-3-clause |
bbondy/brianbondy.gae | libs/flask/__init__.py | 345 | 1672 | # -*- coding: utf-8 -*-
"""
flask
~~~~~
A microframework based on Werkzeug. It's extensively documented
and follows best practice patterns.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
__version__ = '0.10'
# utilities we import from Werkzeug and Jinja2 that are unused
# in the module but are exported as public interface.
from werkzeug.exceptions import abort
from werkzeug.utils import redirect
from jinja2 import Markup, escape
from .app import Flask, Request, Response
from .config import Config
from .helpers import url_for, flash, send_file, send_from_directory, \
get_flashed_messages, get_template_attribute, make_response, safe_join, \
stream_with_context
from .globals import current_app, g, request, session, _request_ctx_stack, \
_app_ctx_stack
from .ctx import has_request_context, has_app_context, \
after_this_request, copy_current_request_context
from .module import Module
from .blueprints import Blueprint
from .templating import render_template, render_template_string
# the signals
from .signals import signals_available, template_rendered, request_started, \
request_finished, got_request_exception, request_tearing_down, \
appcontext_tearing_down, appcontext_pushed, \
appcontext_popped, message_flashed
# We're not exposing the actual json module but a convenient wrapper around
# it.
from . import json
# This was the only thing that flask used to export at one point and it had
# a more generic name.
jsonify = json.jsonify
# backwards compat, goes away in 1.0
from .sessions import SecureCookieSession as Session
json_available = True
| mit |
blacklin/kbengine | kbe/src/lib/python/Lib/test/test_charmapcodec.py | 175 | 1794 | """ Python character mapping codec test
This uses the test codec in testcodec.py and thus also tests the
encodings package lookup scheme.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright 2000 Guido van Rossum.
"""#"
import test.support, unittest
import codecs
# Register a search function which knows about our codec
def codec_search_function(encoding):
if encoding == 'testcodec':
from test import testcodec
return tuple(testcodec.getregentry())
return None
codecs.register(codec_search_function)
# test codec's name (see test/testcodec.py)
codecname = 'testcodec'
class CharmapCodecTest(unittest.TestCase):
def test_constructorx(self):
self.assertEqual(str(b'abc', codecname), 'abc')
self.assertEqual(str(b'xdef', codecname), 'abcdef')
self.assertEqual(str(b'defx', codecname), 'defabc')
self.assertEqual(str(b'dxf', codecname), 'dabcf')
self.assertEqual(str(b'dxfx', codecname), 'dabcfabc')
def test_encodex(self):
self.assertEqual('abc'.encode(codecname), b'abc')
self.assertEqual('xdef'.encode(codecname), b'abcdef')
self.assertEqual('defx'.encode(codecname), b'defabc')
self.assertEqual('dxf'.encode(codecname), b'dabcf')
self.assertEqual('dxfx'.encode(codecname), b'dabcfabc')
def test_constructory(self):
self.assertEqual(str(b'ydef', codecname), 'def')
self.assertEqual(str(b'defy', codecname), 'def')
self.assertEqual(str(b'dyf', codecname), 'df')
self.assertEqual(str(b'dyfy', codecname), 'df')
def test_maptoundefined(self):
self.assertRaises(UnicodeError, str, b'abc\001', codecname)
def test_main():
test.support.run_unittest(CharmapCodecTest)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
codificat/sos | sos/plugins/tuned.py | 4 | 1295 | # Copyright (C) 2014 Red Hat, Inc., Peter Portante <peter.portante@redhat.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin
class Tuned(Plugin, RedHatPlugin):
"""Tuned system tuning daemon
"""
packages = ('tuned',)
profiles = ('system', 'performance')
plugin_name = 'tuned'
def setup(self):
self.add_cmd_output([
"tuned-adm list",
"tuned-adm active",
"tuned-adm recommend"
])
self.add_copy_spec([
"/etc/tuned",
"/usr/lib/tuned",
"/var/log/tuned/tuned.log"
])
# vim: et ts=4 sw=4
| gpl-2.0 |
apache/airflow | airflow/example_dags/tutorial.py | 2 | 3855 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
### Tutorial Documentation
Documentation that goes along with the Airflow tutorial located
[here](https://airflow.apache.org/tutorial.html)
"""
# [START tutorial]
# [START import_module]
from datetime import timedelta
from textwrap import dedent
# The DAG object; we'll need this to instantiate a DAG
from airflow import DAG
# Operators; we need this to operate!
from airflow.operators.bash import BashOperator
from airflow.utils.dates import days_ago
# [END import_module]
# [START default_args]
# These args will get passed on to each operator
# You can override them on a per-task basis during operator initialization
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'email': ['airflow@example.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
# 'wait_for_downstream': False,
# 'dag': dag,
# 'sla': timedelta(hours=2),
# 'execution_timeout': timedelta(seconds=300),
# 'on_failure_callback': some_function,
# 'on_success_callback': some_other_function,
# 'on_retry_callback': another_function,
# 'sla_miss_callback': yet_another_function,
# 'trigger_rule': 'all_success'
}
# [END default_args]
# [START instantiate_dag]
with DAG(
'tutorial',
default_args=default_args,
description='A simple tutorial DAG',
schedule_interval=timedelta(days=1),
start_date=days_ago(2),
tags=['example'],
) as dag:
# [END instantiate_dag]
# t1, t2 and t3 are examples of tasks created by instantiating operators
# [START basic_task]
t1 = BashOperator(
task_id='print_date',
bash_command='date',
)
t2 = BashOperator(
task_id='sleep',
depends_on_past=False,
bash_command='sleep 5',
retries=3,
)
# [END basic_task]
# [START documentation]
t1.doc_md = dedent(
"""\
#### Task Documentation
You can document your task using the attributes `doc_md` (markdown),
`doc` (plain text), `doc_rst`, `doc_json`, `doc_yaml` which gets
rendered in the UI's Task Instance Details page.

"""
)
dag.doc_md = __doc__ # providing that you have a docstring at the beginning of the DAG
dag.doc_md = """
This is a documentation placed anywhere
""" # otherwise, type it like this
# [END documentation]
# [START jinja_template]
templated_command = dedent(
"""
{% for i in range(5) %}
echo "{{ ds }}"
echo "{{ macros.ds_add(ds, 7)}}"
echo "{{ params.my_param }}"
{% endfor %}
"""
)
t3 = BashOperator(
task_id='templated',
depends_on_past=False,
bash_command=templated_command,
params={'my_param': 'Parameter I passed in'},
)
# [END jinja_template]
t1 >> [t2, t3]
# [END tutorial]
| apache-2.0 |
ritchyteam/odoo | addons/report/models/abstract_report.py | 96 | 2900 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class AbstractReport(osv.AbstractModel):
"""Model used to embed old style reports"""
_name = 'report.abstract_report'
_template = None
_wrapped_report_class = None
def render_html(self, cr, uid, ids, data=None, context=None):
context = dict(context or {})
# If the key 'landscape' is present in data['form'], passing it into the context
if data and data.get('form', {}).get('landscape'):
context['landscape'] = True
if context and context.get('active_ids'):
# Browse the selected objects via their reference in context
model = context.get('active_model') or context.get('model')
objects_model = self.pool[model]
objects = objects_model.browse(cr, uid, context['active_ids'], context=context)
else:
# If no context is set (for instance, during test execution), build one
model = self.pool['report']._get_report_from_name(cr, uid, self._template).model
objects_model = self.pool[model]
objects = objects_model.browse(cr, uid, ids, context=context)
context['active_model'] = model
context['active_ids'] = ids
# Generate the old style report
wrapped_report = self._wrapped_report_class(cr, uid, '', context=context)
wrapped_report.set_context(objects, data, context['active_ids'])
# Rendering self._template with the wrapped report instance localcontext as
# rendering environment
docargs = wrapped_report.localcontext
docargs['docs'] = docargs.get('objects')
# Used in template translation (see translate_doc method from report model)
docargs['doc_ids'] = context['active_ids']
docargs['doc_model'] = model
return self.pool['report'].render(cr, uid, [], self._template, docargs, context=context)
| agpl-3.0 |
mcgachey/edx-platform | lms/djangoapps/certificates/tests/test_queue.py | 14 | 8216 | # -*- coding: utf-8 -*-
"""Tests for the XQueue certificates interface. """
from contextlib import contextmanager
import ddt
import json
from mock import patch, Mock
from nose.plugins.attrib import attr
from django.test import TestCase
from django.test.utils import override_settings
from opaque_keys.edx.locator import CourseLocator
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from xmodule.modulestore.tests.factories import CourseFactory
# It is really unfortunate that we are using the XQueue client
# code from the capa library. In the future, we should move this
# into a shared library. We import it here so we can mock it
# and verify that items are being correctly added to the queue
# in our `XQueueCertInterface` implementation.
from capa.xqueue_interface import XQueueInterface
from certificates.queue import XQueueCertInterface
from certificates.models import (
ExampleCertificateSet,
ExampleCertificate,
GeneratedCertificate,
CertificateStatuses,
)
from verify_student.tests.factories import SoftwareSecurePhotoVerificationFactory
@ddt.ddt
@attr('shard_1')
@override_settings(CERT_QUEUE='certificates')
class XQueueCertInterfaceAddCertificateTest(ModuleStoreTestCase):
"""Test the "add to queue" operation of the XQueue interface. """
def setUp(self):
super(XQueueCertInterfaceAddCertificateTest, self).setUp()
self.user = UserFactory.create()
self.course = CourseFactory.create()
self.enrollment = CourseEnrollmentFactory(
user=self.user,
course_id=self.course.id,
is_active=True,
mode="honor",
)
self.xqueue = XQueueCertInterface()
self.user_2 = UserFactory.create()
SoftwareSecurePhotoVerificationFactory.create(user=self.user_2, status='approved')
def test_add_cert_callback_url(self):
with patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75})):
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
mock_send.return_value = (0, None)
self.xqueue.add_cert(self.user, self.course.id)
# Verify that the task was sent to the queue with the correct callback URL
self.assertTrue(mock_send.called)
__, kwargs = mock_send.call_args_list[0]
actual_header = json.loads(kwargs['header'])
self.assertIn('https://edx.org/update_certificate?key=', actual_header['lms_callback_url'])
def test_no_create_action_in_queue_for_html_view_certs(self):
"""
Tests there is no certificate create message in the queue if generate_pdf is False
"""
with patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75})):
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
self.xqueue.add_cert(self.user, self.course.id, generate_pdf=False)
# Verify that add_cert method does not add message to queue
self.assertFalse(mock_send.called)
certificate = GeneratedCertificate.objects.get(user=self.user, course_id=self.course.id)
self.assertEqual(certificate.status, CertificateStatuses.downloadable)
self.assertIsNotNone(certificate.verify_uuid)
@ddt.data('honor', 'audit')
def test_add_cert_with_honor_certificates(self, mode):
"""Test certificates generations for honor and audit modes."""
template_name = 'certificate-template-{id.org}-{id.course}.pdf'.format(
id=self.course.id
)
self.assert_queue_response(mode, mode, template_name)
@ddt.data('credit', 'verified')
def test_add_cert_with_verified_certificates(self, mode):
"""Test if enrollment mode is verified or credit along with valid
software-secure verification than verified certificate should be generated.
"""
template_name = 'certificate-template-{id.org}-{id.course}-verified.pdf'.format(
id=self.course.id
)
self.assert_queue_response(mode, 'verified', template_name)
def assert_queue_response(self, mode, expected_mode, expected_template_name):
"""Dry method for course enrollment and adding request to queue."""
CourseEnrollmentFactory(
user=self.user_2,
course_id=self.course.id,
is_active=True,
mode=mode,
)
with patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75})):
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
mock_send.return_value = (0, None)
self.xqueue.add_cert(self.user_2, self.course.id)
# Verify that the task was sent to the queue with the correct callback URL
self.assertTrue(mock_send.called)
__, kwargs = mock_send.call_args_list[0]
actual_header = json.loads(kwargs['header'])
self.assertIn('https://edx.org/update_certificate?key=', actual_header['lms_callback_url'])
certificate = GeneratedCertificate.objects.get(user=self.user_2, course_id=self.course.id)
self.assertEqual(certificate.mode, expected_mode)
body = json.loads(kwargs['body'])
self.assertIn(expected_template_name, body['template_pdf'])
@attr('shard_1')
@override_settings(CERT_QUEUE='certificates')
class XQueueCertInterfaceExampleCertificateTest(TestCase):
"""Tests for the XQueue interface for certificate generation. """
COURSE_KEY = CourseLocator(org='test', course='test', run='test')
TEMPLATE = 'test.pdf'
DESCRIPTION = 'test'
ERROR_MSG = 'Kaboom!'
def setUp(self):
super(XQueueCertInterfaceExampleCertificateTest, self).setUp()
self.xqueue = XQueueCertInterface()
def test_add_example_cert(self):
cert = self._create_example_cert()
with self._mock_xqueue() as mock_send:
self.xqueue.add_example_cert(cert)
# Verify that the correct payload was sent to the XQueue
self._assert_queue_task(mock_send, cert)
# Verify the certificate status
self.assertEqual(cert.status, ExampleCertificate.STATUS_STARTED)
def test_add_example_cert_error(self):
cert = self._create_example_cert()
with self._mock_xqueue(success=False):
self.xqueue.add_example_cert(cert)
# Verify the error status of the certificate
self.assertEqual(cert.status, ExampleCertificate.STATUS_ERROR)
self.assertIn(self.ERROR_MSG, cert.error_reason)
def _create_example_cert(self):
"""Create an example certificate. """
cert_set = ExampleCertificateSet.objects.create(course_key=self.COURSE_KEY)
return ExampleCertificate.objects.create(
example_cert_set=cert_set,
description=self.DESCRIPTION,
template=self.TEMPLATE
)
@contextmanager
def _mock_xqueue(self, success=True):
"""Mock the XQueue method for sending a task to the queue. """
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
mock_send.return_value = (0, None) if success else (1, self.ERROR_MSG)
yield mock_send
def _assert_queue_task(self, mock_send, cert):
"""Check that the task was added to the queue. """
expected_header = {
'lms_key': cert.access_key,
'lms_callback_url': 'https://edx.org/update_example_certificate?key={key}'.format(key=cert.uuid),
'queue_name': 'certificates'
}
expected_body = {
'action': 'create',
'username': cert.uuid,
'name': u'John Doë',
'course_id': unicode(self.COURSE_KEY),
'template_pdf': 'test.pdf',
'example_certificate': True
}
self.assertTrue(mock_send.called)
__, kwargs = mock_send.call_args_list[0]
actual_header = json.loads(kwargs['header'])
actual_body = json.loads(kwargs['body'])
self.assertEqual(expected_header, actual_header)
self.assertEqual(expected_body, actual_body)
| agpl-3.0 |
silly-wacky-3-town-toon/SOURCE-COD | toontown/estate/DistributedTrunk.py | 1 | 17185 | from panda3d.core import *
from direct.gui.DirectGui import *
from direct.task.Task import Task
from direct.interval.IntervalGlobal import *
import DistributedCloset
import ClosetGlobals
import TrunkGUI
from toontown.toon import ToonDNA
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
N_A = 0
class DistributedTrunk(DistributedCloset.DistributedCloset):
notify = directNotify.newCategory('DistributedTrunk')
def __init__(self, cr):
DistributedCloset.DistributedCloset.__init__(self, cr)
self.hatList = []
self.glassesList = []
self.backpackList = []
self.shoesList = []
self.oldHatList = []
self.oldGlassesList = []
self.oldBackpackList = []
self.oldShoesList = []
self.swapHatEvent = ''
self.swapGlassesEvent = ''
self.swapBackpackEvent = ''
self.swapShoesEvent = ''
self.hatDeleted = 0
self.glassesDeleted = 0
self.backpackDeleted = 0
self.shoesDeleted = 0
def printInfo(self):
print 'avid: %s, gender: %s' % (self.av.doId, self.av.style.gender)
print 'current hat = %s, glasses = %s, backpack = %s, shoes = %s' % (self.av.getHat(),
self.av.getGlasses(),
self.av.getBackpack(),
self.av.getShoes())
print 'hatList = %s' % self.av.getHatList()
print 'glassesList = %s' % self.av.getGlassesList()
print 'backpackList = %s' % self.av.getBackpackList()
print 'shoesList = %s' % self.av.getShoesList()
def setState(self, mode, avId, ownerId, gender, hatList, glassesList, backpackList, shoesList):
self.notify.debug('setState, mode=%s, avId=%s, ownerId=%d' % (mode, avId, ownerId))
self.isOwner = avId == ownerId
self.ownerGender = gender
if mode == ClosetGlobals.CLOSED:
self.fsm.request('closed')
return
elif mode == ClosetGlobals.OPEN:
self.customerId = avId
self.av = self.cr.doId2do.get(self.customerId, None)
if self.av:
if base.localAvatar.getDoId() == self.customerId:
self.gender = self.av.style.gender
self.hatList = hatList
self.glassesList = glassesList
self.backpackList = backpackList
self.shoesList = shoesList
self.oldHatList = self.hatList[0:]
self.oldGlassesList = self.glassesList[0:]
self.oldBackpackList = self.backpackList[0:]
self.oldShoesList = self.shoesList[0:]
if not self.isOwner:
self.__popupNotOwnerPanel()
else:
taskMgr.doMethodLater(0.5, self.popupChangeClothesGUI, self.uniqueName('popupChangeClothesGUI'))
self.fsm.request('open')
return
def load(self):
lNode = self.find('**/lid_origin')
lLid = self.find('**/lid')
if lNode.isEmpty() or lLid.isEmpty():
self.lid = None
else:
lLid.wrtReparentTo(lNode)
self.lid = lNode
if not lNode.isEmpty():
self.scale = lLid.getScale()[0] * 0.6
return
def popupChangeClothesGUI(self, task):
self.notify.debug('popupChangeClothesGUI')
self.purchaseDoneEvent = self.uniqueName('purchaseDone')
self.swapHatEvent = self.uniqueName('swapHat')
self.swapGlassesEvent = self.uniqueName('swapGlasses')
self.swapBackpackEvent = self.uniqueName('swapBackpack')
self.swapShoesEvent = self.uniqueName('swapShoes')
self.cancelEvent = self.uniqueName('cancel')
self.accept(self.purchaseDoneEvent, self.__proceedToCheckout)
self.accept(self.swapHatEvent, self.__handleSwapHat)
self.accept(self.swapGlassesEvent, self.__handleSwapGlasses)
self.accept(self.swapBackpackEvent, self.__handleSwapBackpack)
self.accept(self.swapShoesEvent, self.__handleSwapShoes)
self.accept(self.cancelEvent, self._handleCancel)
self.deleteEvent = self.uniqueName('delete')
if self.isOwner:
self.accept(self.deleteEvent, self.__handleDelete)
if not self.closetGUI:
self.closetGUI = TrunkGUI.TrunkGUI(self.isOwner, self.purchaseDoneEvent, self.cancelEvent, self.swapHatEvent, self.swapGlassesEvent, self.swapBackpackEvent, self.swapShoesEvent, self.deleteEvent, self.hatList, self.glassesList, self.backpackList, self.shoesList)
self.closetGUI.load()
if self.gender != self.ownerGender:
self.closetGUI.setGender(self.ownerGender)
self.closetGUI.enter(base.localAvatar)
self.closetGUI.showButtons()
oldHat = self.av.getHat()
oldGlasses = self.av.getGlasses()
oldBackpack = self.av.getBackpack()
oldShoes = self.av.getShoes()
self.oldStyle = {ToonDNA.HAT: oldHat,
ToonDNA.GLASSES: oldGlasses,
ToonDNA.BACKPACK: oldBackpack,
ToonDNA.SHOES: oldShoes}
return Task.done
def resetCloset(self):
self.ignoreAll()
taskMgr.remove(self.uniqueName('popupChangeClothesGUI'))
taskMgr.remove(self.uniqueName('lerpCamera'))
taskMgr.remove(self.uniqueName('lerpToon'))
if self.closetGUI:
self.closetGUI.hideButtons()
self.closetGUI.exit()
self.closetGUI.unload()
self.closetGUI = None
del self.av
self.av = base.localAvatar
oldHat = self.av.getHat()
oldGlasses = self.av.getGlasses()
oldBackpack = self.av.getBackpack()
oldShoes = self.av.getShoes()
self.oldStyle = {ToonDNA.HAT: oldHat,
ToonDNA.GLASSES: oldGlasses,
ToonDNA.BACKPACK: oldBackpack,
ToonDNA.SHOES: oldShoes}
self.hatDeleted = 0
self.glassesDeleted = 0
self.backpackDeleted = 0
self.shoesDeleted = 0
return Task.done
def _handleCancel(self):
if self.oldStyle:
oldHat = self.oldStyle[ToonDNA.HAT]
oldGlasses = self.oldStyle[ToonDNA.GLASSES]
oldBackpack = self.oldStyle[ToonDNA.BACKPACK]
oldShoes = self.oldStyle[ToonDNA.SHOES]
self.d_setDNA(oldHat[0], oldHat[1], oldHat[2], oldGlasses[0], oldGlasses[1], oldGlasses[2], oldBackpack[0], oldBackpack[1], oldBackpack[2], oldShoes[0], oldShoes[1], oldShoes[2], 1)
else:
self.notify.info('avoided crash in handleCancel')
self._handlePurchaseDone()
if self.closetGUI:
self.closetGUI.resetClothes(self.oldStyle)
if self.popupInfo != None:
self.popupInfo.destroy()
self.popupInfo = None
return
def __handleSwapHat(self):
item = self.av.getHat()
self.d_setDNA(item[0], item[1], item[2], N_A, N_A, N_A, N_A, N_A, N_A, N_A, N_A, N_A, 0, ToonDNA.HAT)
if self.closetGUI:
self.closetGUI.updateTrashButtons()
def __handleSwapGlasses(self):
item = self.av.getGlasses()
self.d_setDNA(N_A, N_A, N_A, item[0], item[1], item[2], N_A, N_A, N_A, N_A, N_A, N_A, 0, ToonDNA.GLASSES)
if self.closetGUI:
self.closetGUI.updateTrashButtons()
def __handleSwapBackpack(self):
item = self.av.getBackpack()
self.d_setDNA(N_A, N_A, N_A, N_A, N_A, N_A, item[0], item[1], item[2], N_A, N_A, N_A, 0, ToonDNA.BACKPACK)
if self.closetGUI:
self.closetGUI.updateTrashButtons()
def __handleSwapShoes(self):
item = self.av.getShoes()
self.d_setDNA(N_A, N_A, N_A, N_A, N_A, N_A, N_A, N_A, N_A, item[0], item[1], item[2], 0, ToonDNA.SHOES)
if self.closetGUI:
self.closetGUI.updateTrashButtons()
def __handleDelete(self, which):
if which == ToonDNA.HAT:
itemList = self.closetGUI.hats
trashIndex = self.closetGUI.hatChoice
swapFunc = self.closetGUI.swapHat
removeFunc = self.closetGUI.removeHat
trashItem = self.av.getHat()
self.hatDeleted = self.hatDeleted | 1
elif which == ToonDNA.GLASSES:
itemList = self.closetGUI.glasses
trashIndex = self.closetGUI.glassesChoice
swapFunc = self.closetGUI.swapGlasses
removeFunc = self.closetGUI.removeGlasses
trashItem = self.av.getGlasses()
self.glassesDeleted = self.glassesDeleted | 1
elif which == ToonDNA.BACKPACK:
itemList = self.closetGUI.backpacks
trashIndex = self.closetGUI.backpackChoice
swapFunc = self.closetGUI.swapBackpack
removeFunc = self.closetGUI.removeBackpack
trashItem = self.av.getBackpack()
self.backpackDeleted = self.backpackDeleted | 1
elif which == ToonDNA.SHOES:
itemList = self.closetGUI.shoes
trashIndex = self.closetGUI.shoesChoice
swapFunc = self.closetGUI.swapShoes
removeFunc = self.closetGUI.removeShoes
trashItem = self.av.getShoes()
self.shoesDeleted = self.shoesDeleted | 1
else:
self.notify.warning("we don't know about this item(type = %s)" % which)
return
if len(itemList) > 1:
if trashIndex == 0:
swapFunc(1)
else:
swapFunc(-1)
removeFunc(trashIndex)
self.sendUpdate('removeItem', [trashItem[0],
trashItem[1],
trashItem[2],
which])
swapFunc(0)
self.closetGUI.updateTrashButtons()
else:
self.notify.warning("cant delete this item(type = %s), since we don't have a replacement" % which)
def __proceedToCheckout(self):
if self.hatDeleted or self.glassesDeleted or self.backpackDeleted or self.shoesDeleted:
self.__popupAreYouSurePanel()
else:
self._handlePurchaseDone()
def _handlePurchaseDone(self, timeout = 0):
if timeout == 1:
oldHat = self.oldStyle[ToonDNA.HAT]
oldGlasses = self.oldStyle[ToonDNA.GLASSES]
oldBackpack = self.oldStyle[ToonDNA.BACKPACK]
oldShoes = self.oldStyle[ToonDNA.SHOES]
self.d_setDNA(oldHat[0], oldHat[1], oldHat[2], oldGlasses[0], oldGlasses[1], oldGlasses[2], oldBackpack[0], oldBackpack[1], oldBackpack[2], oldShoes[0], oldShoes[1], oldShoes[2], 1)
else:
which = 0
if hasattr(self.closetGUI, 'hatChoice') and hasattr(self.closetGUI, 'glassesChoice') and hasattr(self.closetGUI, 'backpackChoice') and hasattr(self.closetGUI, 'shoesChoice'):
if self.closetGUI.hatChoice != 0 or self.hatDeleted:
which = which | ToonDNA.HAT
if self.closetGUI.glassesChoice != 0 or self.glassesDeleted:
which = which | ToonDNA.GLASSES
if self.closetGUI.backpackChoice != 0 or self.backpackDeleted:
which = which | ToonDNA.BACKPACK
if self.closetGUI.shoesChoice != 0 or self.shoesDeleted:
which = which | ToonDNA.SHOES
hat = self.av.getHat()
glasses = self.av.getGlasses()
backpack = self.av.getBackpack()
shoes = self.av.getShoes()
self.d_setDNA(hat[0], hat[1], hat[2], glasses[0], glasses[1], glasses[2], backpack[0], backpack[1], backpack[2], shoes[0], shoes[1], shoes[2], 2, which)
def d_setDNA(self, hatIdx, hatTexture, hatColor, glassesIdx, glassesTexture, glassesColor, backpackIdx, backpackTexture, backpackColor, shoesIdx, shoesTexture, shoesColor, finished, which = ToonDNA.HAT | ToonDNA.GLASSES | ToonDNA.BACKPACK | ToonDNA.SHOES):
self.sendUpdate('setDNA', [hatIdx,
hatTexture,
hatColor,
glassesIdx,
glassesTexture,
glassesColor,
backpackIdx,
backpackTexture,
backpackColor,
shoesIdx,
shoesTexture,
shoesColor,
finished,
which])
def setCustomerDNA(self, avId, hatIdx, hatTexture, hatColor, glassesIdx, glassesTexture, glassesColor, backpackIdx, backpackTexture, backpackColor, shoesIdx, shoesTexture, shoesColor, which):
if avId and avId != base.localAvatar.doId:
av = base.cr.doId2do.get(avId, None)
if av:
if self.av == base.cr.doId2do[avId]:
if which & ToonDNA.HAT:
self.av.setHat(hatIdx, hatTexture, hatColor)
if which & ToonDNA.GLASSES:
self.av.setGlasses(glassesIdx, glassesTexture, glassesColor)
if which & ToonDNA.BACKPACK:
self.av.setBackpack(backpackIdx, backpackTexture, backpackColor)
if which & ToonDNA.SHOES:
self.av.setShoes(shoesIdx, shoesTexture, shoesColor)
self.av.generateToonAccessories()
return
def __popupNotOwnerPanel(self):
if self.popupInfo != None:
self.popupInfo.destroy()
self.popupInfo = None
self.purchaseDoneEvent = self.uniqueName('purchaseDone')
self.swapHatEvent = self.uniqueName('swapHat')
self.swapGlassesEvent = self.uniqueName('swapGlasses')
self.swapBackpackEvent = self.uniqueName('swapBackpack')
self.swapShoesEvent = self.uniqueName('swapShoes')
self.cancelEvent = self.uniqueName('cancel')
self.accept(self.purchaseDoneEvent, self.__proceedToCheckout)
self.accept(self.swapHatEvent, self.__handleSwapHat)
self.accept(self.swapGlassesEvent, self.__handleSwapGlasses)
self.accept(self.swapBackpackEvent, self.__handleSwapBackpack)
self.accept(self.swapShoesEvent, self.__handleSwapShoes)
self.accept(self.cancelEvent, self._handleCancel)
self.deleteEvent = self.uniqueName('delete')
if self.isOwner:
self.accept(self.deleteEvent, self.__handleDelete)
buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
okButtonImage = (buttons.find('**/ChtBx_OKBtn_UP'), buttons.find('**/ChtBx_OKBtn_DN'), buttons.find('**/ChtBx_OKBtn_Rllvr'))
self.popupInfo = DirectFrame(parent=hidden, relief=None, state='normal', text=TTLocalizer.TrunkNotOwnerMessage, frameSize=(-1, 1, -1, 1), text_wordwrap=10, geom=DGG.getDefaultDialogGeom(), geom_color=ToontownGlobals.GlobalDialogColor, geom_scale=(0.88, 1, 0.55), geom_pos=(0, 0, -.08), text_scale=0.08, text_pos=(0, 0.06))
DirectButton(self.popupInfo, image=okButtonImage, relief=None, text=TTLocalizer.ClosetPopupOK, text_scale=0.05, text_pos=(0.0, -0.1), textMayChange=0, pos=(0.0, 0.0, -0.21), command=self._handleNotOwnerMessageOK)
buttons.removeNode()
self.popupInfo.reparentTo(aspect2d)
return
def __popupAreYouSurePanel(self):
if self.popupInfo != None:
self.popupInfo.destroy()
self.popupInfo = None
buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
okButtonImage = (buttons.find('**/ChtBx_OKBtn_UP'), buttons.find('**/ChtBx_OKBtn_DN'), buttons.find('**/ChtBx_OKBtn_Rllvr'))
cancelButtonImage = (buttons.find('**/CloseBtn_UP'), buttons.find('**/CloseBtn_DN'), buttons.find('**/CloseBtn_Rllvr'))
self.popupInfo = DirectFrame(parent=hidden, relief=None, state='normal', text=TTLocalizer.TrunkAreYouSureMessage, frameSize=(-1, 1, -1, 1), text_wordwrap=10, geom=DGG.getDefaultDialogGeom(), geom_color=ToontownGlobals.GlobalDialogColor, geom_scale=(0.88, 1, 0.55), geom_pos=(0, 0, -.08), text_scale=0.08, text_pos=(0, 0.08))
DirectButton(self.popupInfo, image=okButtonImage, relief=None, text=TTLocalizer.ClosetPopupOK, text_scale=0.05, text_pos=(0.0, -0.1), textMayChange=0, pos=(-0.1, 0.0, -0.21), command=self._handleYesImSure)
DirectButton(self.popupInfo, image=cancelButtonImage, relief=None, text=TTLocalizer.ClosetPopupCancel, text_scale=0.05, text_pos=(0.0, -0.1), textMayChange=0, pos=(0.1, 0.0, -0.21), command=self._handleNotSure)
buttons.removeNode()
self.popupInfo.reparentTo(aspect2d)
return
def _openDoors(self):
if self.closetTrack:
self.closetTrack.finish()
openHpr = Vec3(0, -80, 0)
if self.av:
self.av.applyCheesyEffect(ToontownGlobals.CENormal)
self.closetTrack = Parallel()
if self.lid:
self.closetTrack.append(self.lid.hprInterval(0.5, openHpr))
self.closetTrack.start()
def _closeDoors(self):
if self.closetTrack:
self.closetTrack.finish()
closeHpr = Vec3(0, 0, 0)
if self.av:
self.av.reconsiderCheesyEffect()
self.closetTrack = Parallel()
if self.lid:
self.closetTrack.append(self.lid.hprInterval(0.5, closeHpr))
self.closetTrack.start()
| apache-2.0 |
xin3liang/platform_external_chromium_org_tools_grit | grit/gather/rc.py | 62 | 11190 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Support for gathering resources from RC files.
'''
import re
from grit import exception
from grit import lazy_re
from grit import tclib
from grit.gather import regexp
# Find portions that need unescaping in resource strings. We need to be
# careful that a \\n is matched _first_ as a \\ rather than matching as
# a \ followed by a \n.
# TODO(joi) Handle ampersands if we decide to change them into <ph>
# TODO(joi) May need to handle other control characters than \n
_NEED_UNESCAPE = lazy_re.compile(r'""|\\\\|\\n|\\t')
# Find portions that need escaping to encode string as a resource string.
_NEED_ESCAPE = lazy_re.compile(r'"|\n|\t|\\|\ \;')
# How to escape certain characters
_ESCAPE_CHARS = {
'"' : '""',
'\n' : '\\n',
'\t' : '\\t',
'\\' : '\\\\',
' ' : ' '
}
# How to unescape certain strings
_UNESCAPE_CHARS = dict([[value, key] for key, value in _ESCAPE_CHARS.items()])
class Section(regexp.RegexpGatherer):
'''A section from a resource file.'''
@staticmethod
def Escape(text):
'''Returns a version of 'text' with characters escaped that need to be
for inclusion in a resource section.'''
def Replace(match):
return _ESCAPE_CHARS[match.group()]
return _NEED_ESCAPE.sub(Replace, text)
@staticmethod
def UnEscape(text):
'''Returns a version of 'text' with escaped characters unescaped.'''
def Replace(match):
return _UNESCAPE_CHARS[match.group()]
return _NEED_UNESCAPE.sub(Replace, text)
def _RegExpParse(self, rexp, text_to_parse):
'''Overrides _RegExpParse to add shortcut group handling. Otherwise
the same.
'''
super(Section, self)._RegExpParse(rexp, text_to_parse)
if not self.is_skeleton and len(self.GetTextualIds()) > 0:
group_name = self.GetTextualIds()[0]
for c in self.GetCliques():
c.AddToShortcutGroup(group_name)
def ReadSection(self):
rc_text = self._LoadInputFile()
out = ''
begin_count = 0
assert self.extkey
first_line_re = re.compile(r'\s*' + self.extkey + r'\b')
for line in rc_text.splitlines(True):
if out or first_line_re.match(line):
out += line
# we stop once we reach the END for the outermost block.
begin_count_was = begin_count
if len(out) > 0 and line.strip() == 'BEGIN':
begin_count += 1
elif len(out) > 0 and line.strip() == 'END':
begin_count -= 1
if begin_count_was == 1 and begin_count == 0:
break
if len(out) == 0:
raise exception.SectionNotFound('%s in file %s' % (self.extkey, self.rc_file))
self.text_ = out.strip()
class Dialog(Section):
'''A resource section that contains a dialog resource.'''
# A typical dialog resource section looks like this:
#
# IDD_ABOUTBOX DIALOGEX 22, 17, 230, 75
# STYLE DS_SETFONT | DS_MODALFRAME | WS_CAPTION | WS_SYSMENU
# CAPTION "About"
# FONT 8, "System", 0, 0, 0x0
# BEGIN
# ICON IDI_KLONK,IDC_MYICON,14,9,20,20
# LTEXT "klonk Version ""yibbee"" 1.0",IDC_STATIC,49,10,119,8,
# SS_NOPREFIX
# LTEXT "Copyright (C) 2005",IDC_STATIC,49,20,119,8
# DEFPUSHBUTTON "OK",IDOK,195,6,30,11,WS_GROUP
# CONTROL "Jack ""Black"" Daniels",IDC_RADIO1,"Button",
# BS_AUTORADIOBUTTON,46,51,84,10
# END
# We are using a sorted set of keys, and we assume that the
# group name used for descriptions (type) will come after the "text"
# group in alphabetical order. We also assume that there cannot be
# more than one description per regular expression match.
# If that's not the case some descriptions will be clobbered.
dialog_re_ = lazy_re.compile('''
# The dialog's ID in the first line
(?P<id1>[A-Z0-9_]+)\s+DIALOG(EX)?
|
# The caption of the dialog
(?P<type1>CAPTION)\s+"(?P<text1>.*?([^"]|""))"\s
|
# Lines for controls that have text and an ID
\s+(?P<type2>[A-Z]+)\s+"(?P<text2>.*?([^"]|"")?)"\s*,\s*(?P<id2>[A-Z0-9_]+)\s*,
|
# Lines for controls that have text only
\s+(?P<type3>[A-Z]+)\s+"(?P<text3>.*?([^"]|"")?)"\s*,
|
# Lines for controls that reference other resources
\s+[A-Z]+\s+[A-Z0-9_]+\s*,\s*(?P<id3>[A-Z0-9_]*[A-Z][A-Z0-9_]*)
|
# This matches "NOT SOME_STYLE" so that it gets consumed and doesn't get
# matched by the next option (controls that have only an ID and then just
# numbers)
\s+NOT\s+[A-Z][A-Z0-9_]+
|
# Lines for controls that have only an ID and then just numbers
\s+[A-Z]+\s+(?P<id4>[A-Z0-9_]*[A-Z][A-Z0-9_]*)\s*,
''', re.MULTILINE | re.VERBOSE)
def Parse(self):
'''Knows how to parse dialog resource sections.'''
self.ReadSection()
self._RegExpParse(self.dialog_re_, self.text_)
class Menu(Section):
'''A resource section that contains a menu resource.'''
# A typical menu resource section looks something like this:
#
# IDC_KLONK MENU
# BEGIN
# POPUP "&File"
# BEGIN
# MENUITEM "E&xit", IDM_EXIT
# MENUITEM "This be ""Klonk"" me like", ID_FILE_THISBE
# POPUP "gonk"
# BEGIN
# MENUITEM "Klonk && is ""good""", ID_GONK_KLONKIS
# END
# END
# POPUP "&Help"
# BEGIN
# MENUITEM "&About ...", IDM_ABOUT
# END
# END
# Description used for the messages generated for menus, to explain to
# the translators how to handle them.
MENU_MESSAGE_DESCRIPTION = (
'This message represents a menu. Each of the items appears in sequence '
'(some possibly within sub-menus) in the menu. The XX01XX placeholders '
'serve to separate items. Each item contains an & (ampersand) character '
'in front of the keystroke that should be used as a shortcut for that item '
'in the menu. Please make sure that no two items in the same menu share '
'the same shortcut.'
)
# A dandy regexp to suck all the IDs and translateables out of a menu
# resource
menu_re_ = lazy_re.compile('''
# Match the MENU ID on the first line
^(?P<id1>[A-Z0-9_]+)\s+MENU
|
# Match the translateable caption for a popup menu
POPUP\s+"(?P<text1>.*?([^"]|""))"\s
|
# Match the caption & ID of a MENUITEM
MENUITEM\s+"(?P<text2>.*?([^"]|""))"\s*,\s*(?P<id2>[A-Z0-9_]+)
''', re.MULTILINE | re.VERBOSE)
def Parse(self):
'''Knows how to parse menu resource sections. Because it is important that
menu shortcuts are unique within the menu, we return each menu as a single
message with placeholders to break up the different menu items, rather than
return a single message per menu item. we also add an automatic description
with instructions for the translators.'''
self.ReadSection()
self.single_message_ = tclib.Message(description=self.MENU_MESSAGE_DESCRIPTION)
self._RegExpParse(self.menu_re_, self.text_)
class Version(Section):
'''A resource section that contains a VERSIONINFO resource.'''
# A typical version info resource can look like this:
#
# VS_VERSION_INFO VERSIONINFO
# FILEVERSION 1,0,0,1
# PRODUCTVERSION 1,0,0,1
# FILEFLAGSMASK 0x3fL
# #ifdef _DEBUG
# FILEFLAGS 0x1L
# #else
# FILEFLAGS 0x0L
# #endif
# FILEOS 0x4L
# FILETYPE 0x2L
# FILESUBTYPE 0x0L
# BEGIN
# BLOCK "StringFileInfo"
# BEGIN
# BLOCK "040904e4"
# BEGIN
# VALUE "CompanyName", "TODO: <Company name>"
# VALUE "FileDescription", "TODO: <File description>"
# VALUE "FileVersion", "1.0.0.1"
# VALUE "LegalCopyright", "TODO: (c) <Company name>. All rights reserved."
# VALUE "InternalName", "res_format_test.dll"
# VALUE "OriginalFilename", "res_format_test.dll"
# VALUE "ProductName", "TODO: <Product name>"
# VALUE "ProductVersion", "1.0.0.1"
# END
# END
# BLOCK "VarFileInfo"
# BEGIN
# VALUE "Translation", 0x409, 1252
# END
# END
#
#
# In addition to the above fields, VALUE fields named "Comments" and
# "LegalTrademarks" may also be translateable.
version_re_ = lazy_re.compile('''
# Match the ID on the first line
^(?P<id1>[A-Z0-9_]+)\s+VERSIONINFO
|
# Match all potentially translateable VALUE sections
\s+VALUE\s+"
(
CompanyName|FileDescription|LegalCopyright|
ProductName|Comments|LegalTrademarks
)",\s+"(?P<text1>.*?([^"]|""))"\s
''', re.MULTILINE | re.VERBOSE)
def Parse(self):
'''Knows how to parse VERSIONINFO resource sections.'''
self.ReadSection()
self._RegExpParse(self.version_re_, self.text_)
# TODO(joi) May need to override the Translate() method to change the
# "Translation" VALUE block to indicate the correct language code.
class RCData(Section):
'''A resource section that contains some data .'''
# A typical rcdataresource section looks like this:
#
# IDR_BLAH RCDATA { 1, 2, 3, 4 }
dialog_re_ = lazy_re.compile('''
^(?P<id1>[A-Z0-9_]+)\s+RCDATA\s+(DISCARDABLE)?\s+\{.*?\}
''', re.MULTILINE | re.VERBOSE | re.DOTALL)
def Parse(self):
'''Implementation for resource types w/braces (not BEGIN/END)
'''
rc_text = self._LoadInputFile()
out = ''
begin_count = 0
openbrace_count = 0
assert self.extkey
first_line_re = re.compile(r'\s*' + self.extkey + r'\b')
for line in rc_text.splitlines(True):
if out or first_line_re.match(line):
out += line
# We stop once the braces balance (could happen in one line).
begin_count_was = begin_count
if len(out) > 0:
openbrace_count += line.count('{')
begin_count += line.count('{')
begin_count -= line.count('}')
if ((begin_count_was == 1 and begin_count == 0) or
(openbrace_count > 0 and begin_count == 0)):
break
if len(out) == 0:
raise exception.SectionNotFound('%s in file %s' % (self.extkey, self.rc_file))
self.text_ = out
self._RegExpParse(self.dialog_re_, out)
class Accelerators(Section):
'''An ACCELERATORS table.
'''
# A typical ACCELERATORS section looks like this:
#
# IDR_ACCELERATOR1 ACCELERATORS
# BEGIN
# "^C", ID_ACCELERATOR32770, ASCII, NOINVERT
# "^V", ID_ACCELERATOR32771, ASCII, NOINVERT
# VK_INSERT, ID_ACCELERATOR32772, VIRTKEY, CONTROL, NOINVERT
# END
accelerators_re_ = lazy_re.compile('''
# Match the ID on the first line
^(?P<id1>[A-Z0-9_]+)\s+ACCELERATORS\s+
|
# Match accelerators specified as VK_XXX
\s+VK_[A-Z0-9_]+,\s*(?P<id2>[A-Z0-9_]+)\s*,
|
# Match accelerators specified as e.g. "^C"
\s+"[^"]*",\s+(?P<id3>[A-Z0-9_]+)\s*,
''', re.MULTILINE | re.VERBOSE)
def Parse(self):
'''Knows how to parse ACCELERATORS resource sections.'''
self.ReadSection()
self._RegExpParse(self.accelerators_re_, self.text_)
| bsd-2-clause |
wildchildyn/autism-website | yanni_env/lib/python3.6/site-packages/psycopg2/tests/test_sql.py | 8 | 15586 | #!/usr/bin/env python
# test_sql.py - tests for the psycopg2.sql module
#
# Copyright (C) 2016 Daniele Varrazzo <daniele.varrazzo@gmail.com>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import datetime as dt
from io import StringIO
from .testutils import (unittest, ConnectingTestCase,
skip_before_postgres, skip_before_python, skip_copy_if_green)
import psycopg2
from psycopg2 import sql
class SqlFormatTests(ConnectingTestCase):
@skip_before_python(2, 7)
def test_pos(self):
s = sql.SQL("select {} from {}").format(
sql.Identifier('field'), sql.Identifier('table'))
s1 = s.as_string(self.conn)
self.assertTrue(isinstance(s1, str))
self.assertEqual(s1, 'select "field" from "table"')
def test_pos_spec(self):
s = sql.SQL("select {0} from {1}").format(
sql.Identifier('field'), sql.Identifier('table'))
s1 = s.as_string(self.conn)
self.assertTrue(isinstance(s1, str))
self.assertEqual(s1, 'select "field" from "table"')
s = sql.SQL("select {1} from {0}").format(
sql.Identifier('table'), sql.Identifier('field'))
s1 = s.as_string(self.conn)
self.assertTrue(isinstance(s1, str))
self.assertEqual(s1, 'select "field" from "table"')
def test_dict(self):
s = sql.SQL("select {f} from {t}").format(
f=sql.Identifier('field'), t=sql.Identifier('table'))
s1 = s.as_string(self.conn)
self.assertTrue(isinstance(s1, str))
self.assertEqual(s1, 'select "field" from "table"')
def test_unicode(self):
s = sql.SQL("select {0} from {1}").format(
sql.Identifier('field'), sql.Identifier('table'))
s1 = s.as_string(self.conn)
self.assertTrue(isinstance(s1, str))
self.assertEqual(s1, 'select "field" from "table"')
def test_compose_literal(self):
s = sql.SQL("select {0};").format(sql.Literal(dt.date(2016, 12, 31)))
s1 = s.as_string(self.conn)
self.assertEqual(s1, "select '2016-12-31'::date;")
def test_compose_empty(self):
s = sql.SQL("select foo;").format()
s1 = s.as_string(self.conn)
self.assertEqual(s1, "select foo;")
def test_percent_escape(self):
s = sql.SQL("42 % {0}").format(sql.Literal(7))
s1 = s.as_string(self.conn)
self.assertEqual(s1, "42 % 7")
def test_braces_escape(self):
s = sql.SQL("{{{0}}}").format(sql.Literal(7))
self.assertEqual(s.as_string(self.conn), "{7}")
s = sql.SQL("{{1,{0}}}").format(sql.Literal(7))
self.assertEqual(s.as_string(self.conn), "{1,7}")
def test_compose_badnargs(self):
self.assertRaises(IndexError, sql.SQL("select {0};").format)
@skip_before_python(2, 7)
def test_compose_badnargs_auto(self):
self.assertRaises(IndexError, sql.SQL("select {};").format)
self.assertRaises(ValueError, sql.SQL("select {} {1};").format, 10, 20)
self.assertRaises(ValueError, sql.SQL("select {0} {};").format, 10, 20)
def test_compose_bad_args_type(self):
self.assertRaises(IndexError, sql.SQL("select {0};").format, a=10)
self.assertRaises(KeyError, sql.SQL("select {x};").format, 10)
def test_must_be_composable(self):
self.assertRaises(TypeError, sql.SQL("select {0};").format, 'foo')
self.assertRaises(TypeError, sql.SQL("select {0};").format, 10)
def test_no_modifiers(self):
self.assertRaises(ValueError, sql.SQL("select {a!r};").format, a=10)
self.assertRaises(ValueError, sql.SQL("select {a:<};").format, a=10)
def test_must_be_adaptable(self):
class Foo(object):
pass
self.assertRaises(psycopg2.ProgrammingError,
sql.SQL("select {0};").format(sql.Literal(Foo())).as_string, self.conn)
def test_execute(self):
cur = self.conn.cursor()
cur.execute("""
create table test_compose (
id serial primary key,
foo text, bar text, "ba'z" text)
""")
cur.execute(
sql.SQL("insert into {0} (id, {1}) values (%s, {2})").format(
sql.Identifier('test_compose'),
sql.SQL(', ').join(map(sql.Identifier, ['foo', 'bar', "ba'z"])),
(sql.Placeholder() * 3).join(', ')),
(10, 'a', 'b', 'c'))
cur.execute("select * from test_compose")
self.assertEqual(cur.fetchall(), [(10, 'a', 'b', 'c')])
def test_executemany(self):
cur = self.conn.cursor()
cur.execute("""
create table test_compose (
id serial primary key,
foo text, bar text, "ba'z" text)
""")
cur.executemany(
sql.SQL("insert into {0} (id, {1}) values (%s, {2})").format(
sql.Identifier('test_compose'),
sql.SQL(', ').join(map(sql.Identifier, ['foo', 'bar', "ba'z"])),
(sql.Placeholder() * 3).join(', ')),
[(10, 'a', 'b', 'c'), (20, 'd', 'e', 'f')])
cur.execute("select * from test_compose")
self.assertEqual(cur.fetchall(),
[(10, 'a', 'b', 'c'), (20, 'd', 'e', 'f')])
@skip_copy_if_green
@skip_before_postgres(8, 2)
def test_copy(self):
cur = self.conn.cursor()
cur.execute("""
create table test_compose (
id serial primary key,
foo text, bar text, "ba'z" text)
""")
s = StringIO("10\ta\tb\tc\n20\td\te\tf\n")
cur.copy_expert(
sql.SQL("copy {t} (id, foo, bar, {f}) from stdin").format(
t=sql.Identifier("test_compose"), f=sql.Identifier("ba'z")), s)
s1 = StringIO()
cur.copy_expert(
sql.SQL("copy (select {f} from {t} order by id) to stdout").format(
t=sql.Identifier("test_compose"), f=sql.Identifier("ba'z")), s1)
s1.seek(0)
self.assertEqual(s1.read(), 'c\nf\n')
class IdentifierTests(ConnectingTestCase):
def test_class(self):
self.assertTrue(issubclass(sql.Identifier, sql.Composable))
def test_init(self):
self.assertTrue(isinstance(sql.Identifier('foo'), sql.Identifier))
self.assertTrue(isinstance(sql.Identifier('foo'), sql.Identifier))
self.assertRaises(TypeError, sql.Identifier, 10)
self.assertRaises(TypeError, sql.Identifier, dt.date(2016, 12, 31))
def test_string(self):
self.assertEqual(sql.Identifier('foo').string, 'foo')
def test_repr(self):
obj = sql.Identifier("fo'o")
self.assertEqual(repr(obj), 'Identifier("fo\'o")')
self.assertEqual(repr(obj), str(obj))
def test_eq(self):
self.assertTrue(sql.Identifier('foo') == sql.Identifier('foo'))
self.assertTrue(sql.Identifier('foo') != sql.Identifier('bar'))
self.assertTrue(sql.Identifier('foo') != 'foo')
self.assertTrue(sql.Identifier('foo') != sql.SQL('foo'))
def test_as_str(self):
self.assertEqual(sql.Identifier('foo').as_string(self.conn), '"foo"')
self.assertEqual(sql.Identifier("fo'o").as_string(self.conn), '"fo\'o"')
def test_join(self):
self.assertTrue(not hasattr(sql.Identifier('foo'), 'join'))
class LiteralTests(ConnectingTestCase):
def test_class(self):
self.assertTrue(issubclass(sql.Literal, sql.Composable))
def test_init(self):
self.assertTrue(isinstance(sql.Literal('foo'), sql.Literal))
self.assertTrue(isinstance(sql.Literal('foo'), sql.Literal))
self.assertTrue(isinstance(sql.Literal(b'foo'), sql.Literal))
self.assertTrue(isinstance(sql.Literal(42), sql.Literal))
self.assertTrue(isinstance(
sql.Literal(dt.date(2016, 12, 31)), sql.Literal))
def test_wrapped(self):
self.assertEqual(sql.Literal('foo').wrapped, 'foo')
def test_repr(self):
self.assertEqual(repr(sql.Literal("foo")), "Literal('foo')")
self.assertEqual(str(sql.Literal("foo")), "Literal('foo')")
self.assertQuotedEqual(sql.Literal("foo").as_string(self.conn), "'foo'")
self.assertEqual(sql.Literal(42).as_string(self.conn), "42")
self.assertEqual(
sql.Literal(dt.date(2017, 1, 1)).as_string(self.conn),
"'2017-01-01'::date")
def test_eq(self):
self.assertTrue(sql.Literal('foo') == sql.Literal('foo'))
self.assertTrue(sql.Literal('foo') != sql.Literal('bar'))
self.assertTrue(sql.Literal('foo') != 'foo')
self.assertTrue(sql.Literal('foo') != sql.SQL('foo'))
def test_must_be_adaptable(self):
class Foo(object):
pass
self.assertRaises(psycopg2.ProgrammingError,
sql.Literal(Foo()).as_string, self.conn)
class SQLTests(ConnectingTestCase):
def test_class(self):
self.assertTrue(issubclass(sql.SQL, sql.Composable))
def test_init(self):
self.assertTrue(isinstance(sql.SQL('foo'), sql.SQL))
self.assertTrue(isinstance(sql.SQL('foo'), sql.SQL))
self.assertRaises(TypeError, sql.SQL, 10)
self.assertRaises(TypeError, sql.SQL, dt.date(2016, 12, 31))
def test_string(self):
self.assertEqual(sql.SQL('foo').string, 'foo')
def test_repr(self):
self.assertEqual(repr(sql.SQL("foo")), "SQL('foo')")
self.assertEqual(str(sql.SQL("foo")), "SQL('foo')")
self.assertEqual(sql.SQL("foo").as_string(self.conn), "foo")
def test_eq(self):
self.assertTrue(sql.SQL('foo') == sql.SQL('foo'))
self.assertTrue(sql.SQL('foo') != sql.SQL('bar'))
self.assertTrue(sql.SQL('foo') != 'foo')
self.assertTrue(sql.SQL('foo') != sql.Literal('foo'))
def test_sum(self):
obj = sql.SQL("foo") + sql.SQL("bar")
self.assertTrue(isinstance(obj, sql.Composed))
self.assertEqual(obj.as_string(self.conn), "foobar")
def test_sum_inplace(self):
obj = sql.SQL("foo")
obj += sql.SQL("bar")
self.assertTrue(isinstance(obj, sql.Composed))
self.assertEqual(obj.as_string(self.conn), "foobar")
def test_multiply(self):
obj = sql.SQL("foo") * 3
self.assertTrue(isinstance(obj, sql.Composed))
self.assertEqual(obj.as_string(self.conn), "foofoofoo")
def test_join(self):
obj = sql.SQL(", ").join(
[sql.Identifier('foo'), sql.SQL('bar'), sql.Literal(42)])
self.assertTrue(isinstance(obj, sql.Composed))
self.assertEqual(obj.as_string(self.conn), '"foo", bar, 42')
obj = sql.SQL(", ").join(
sql.Composed([sql.Identifier('foo'), sql.SQL('bar'), sql.Literal(42)]))
self.assertTrue(isinstance(obj, sql.Composed))
self.assertEqual(obj.as_string(self.conn), '"foo", bar, 42')
obj = sql.SQL(", ").join([])
self.assertEqual(obj, sql.Composed([]))
class ComposedTest(ConnectingTestCase):
def test_class(self):
self.assertTrue(issubclass(sql.Composed, sql.Composable))
def test_repr(self):
obj = sql.Composed([sql.Literal("foo"), sql.Identifier("b'ar")])
self.assertEqual(repr(obj),
"""Composed([Literal('foo'), Identifier("b'ar")])""")
self.assertEqual(str(obj), repr(obj))
def test_seq(self):
l = [sql.SQL('foo'), sql.Literal('bar'), sql.Identifier('baz')]
self.assertEqual(sql.Composed(l).seq, l)
def test_eq(self):
l = [sql.Literal("foo"), sql.Identifier("b'ar")]
l2 = [sql.Literal("foo"), sql.Literal("b'ar")]
self.assertTrue(sql.Composed(l) == sql.Composed(list(l)))
self.assertTrue(sql.Composed(l) != l)
self.assertTrue(sql.Composed(l) != sql.Composed(l2))
def test_join(self):
obj = sql.Composed([sql.Literal("foo"), sql.Identifier("b'ar")])
obj = obj.join(", ")
self.assertTrue(isinstance(obj, sql.Composed))
self.assertQuotedEqual(obj.as_string(self.conn), "'foo', \"b'ar\"")
def test_sum(self):
obj = sql.Composed([sql.SQL("foo ")])
obj = obj + sql.Literal("bar")
self.assertTrue(isinstance(obj, sql.Composed))
self.assertQuotedEqual(obj.as_string(self.conn), "foo 'bar'")
def test_sum_inplace(self):
obj = sql.Composed([sql.SQL("foo ")])
obj += sql.Literal("bar")
self.assertTrue(isinstance(obj, sql.Composed))
self.assertQuotedEqual(obj.as_string(self.conn), "foo 'bar'")
obj = sql.Composed([sql.SQL("foo ")])
obj += sql.Composed([sql.Literal("bar")])
self.assertTrue(isinstance(obj, sql.Composed))
self.assertQuotedEqual(obj.as_string(self.conn), "foo 'bar'")
def test_iter(self):
obj = sql.Composed([sql.SQL("foo"), sql.SQL('bar')])
it = iter(obj)
i = next(it)
self.assertEqual(i, sql.SQL('foo'))
i = next(it)
self.assertEqual(i, sql.SQL('bar'))
self.assertRaises(StopIteration, it.__next__)
class PlaceholderTest(ConnectingTestCase):
def test_class(self):
self.assertTrue(issubclass(sql.Placeholder, sql.Composable))
def test_name(self):
self.assertEqual(sql.Placeholder().name, None)
self.assertEqual(sql.Placeholder('foo').name, 'foo')
def test_repr(self):
self.assertTrue(str(sql.Placeholder()), 'Placeholder()')
self.assertTrue(repr(sql.Placeholder()), 'Placeholder()')
self.assertTrue(sql.Placeholder().as_string(self.conn), '%s')
def test_repr_name(self):
self.assertTrue(str(sql.Placeholder('foo')), "Placeholder('foo')")
self.assertTrue(repr(sql.Placeholder('foo')), "Placeholder('foo')")
self.assertTrue(sql.Placeholder('foo').as_string(self.conn), '%(foo)s')
def test_bad_name(self):
self.assertRaises(ValueError, sql.Placeholder, ')')
def test_eq(self):
self.assertTrue(sql.Placeholder('foo') == sql.Placeholder('foo'))
self.assertTrue(sql.Placeholder('foo') != sql.Placeholder('bar'))
self.assertTrue(sql.Placeholder('foo') != 'foo')
self.assertTrue(sql.Placeholder() == sql.Placeholder())
self.assertTrue(sql.Placeholder('foo') != sql.Placeholder())
self.assertTrue(sql.Placeholder('foo') != sql.Literal('foo'))
class ValuesTest(ConnectingTestCase):
def test_null(self):
self.assertTrue(isinstance(sql.NULL, sql.SQL))
self.assertEqual(sql.NULL.as_string(self.conn), "NULL")
def test_default(self):
self.assertTrue(isinstance(sql.DEFAULT, sql.SQL))
self.assertEqual(sql.DEFAULT.as_string(self.conn), "DEFAULT")
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
p4datasystems/CarnotKEdist | dist/Lib/test/test_dircache.py | 23 | 2367 | """
Test cases for the dircache module
Nick Mathewson
"""
import unittest
from test.test_support import is_jython, run_unittest, TESTFN
import dircache, os, time, sys, tempfile
class DircacheTests(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
for fname in os.listdir(self.tempdir):
self.delTemp(fname)
os.rmdir(self.tempdir)
def writeTemp(self, fname):
f = open(os.path.join(self.tempdir, fname), 'w')
f.close()
def mkdirTemp(self, fname):
os.mkdir(os.path.join(self.tempdir, fname))
def delTemp(self, fname):
fname = os.path.join(self.tempdir, fname)
if os.path.isdir(fname):
os.rmdir(fname)
else:
os.unlink(fname)
def test_listdir(self):
## SUCCESSFUL CASES
entries = dircache.listdir(self.tempdir)
self.assertEquals(entries, [])
# Check that cache is actually caching, not just passing through.
self.assert_(dircache.listdir(self.tempdir) is entries)
# Directories aren't "files" on Windows, and directory mtime has
# nothing to do with when files under a directory get created.
# That is, this test can't possibly work under Windows -- dircache
# is only good for capturing a one-shot snapshot there.
if (sys.platform[:3] not in ('win', 'os2') and
(not is_jython or os._name != 'nt')):
# Sadly, dircache has the same granularity as stat.mtime, and so
# can't notice any changes that occurred within 1 sec of the last
# time it examined a directory.
time.sleep(1)
self.writeTemp("test1")
entries = dircache.listdir(self.tempdir)
self.assertEquals(entries, ['test1'])
self.assert_(dircache.listdir(self.tempdir) is entries)
## UNSUCCESSFUL CASES
self.assertRaises(OSError, dircache.listdir, self.tempdir+"_nonexistent")
def test_annotate(self):
self.writeTemp("test2")
self.mkdirTemp("A")
lst = ['A', 'test2', 'test_nonexistent']
dircache.annotate(self.tempdir, lst)
self.assertEquals(lst, ['A/', 'test2', 'test_nonexistent'])
def test_main():
run_unittest(DircacheTests)
if __name__ == "__main__":
test_main()
| apache-2.0 |
theoryno3/luigi | luigi/contrib/redshift.py | 11 | 12451 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import json
import logging
import time
import luigi
from luigi import postgres
from luigi.contrib import rdbms
from luigi.s3 import S3PathTask, S3Target
logger = logging.getLogger('luigi-interface')
try:
import psycopg2
import psycopg2.errorcodes
except ImportError:
logger.warning("Loading postgres module without psycopg2 installed. "
"Will crash at runtime if postgres functionality is used.")
class RedshiftTarget(postgres.PostgresTarget):
"""
Target for a resource in Redshift.
Redshift is similar to postgres with a few adjustments
required by redshift.
"""
marker_table = luigi.configuration.get_config().get(
'redshift',
'marker-table',
'table_updates')
use_db_timestamps = False
class S3CopyToTable(rdbms.CopyToTable):
"""
Template task for inserting a data set into Redshift from s3.
Usage:
* Subclass and override the required attributes:
* `host`,
* `database`,
* `user`,
* `password`,
* `table`,
* `columns`,
* `aws_access_key_id`,
* `aws_secret_access_key`,
* `s3_load_path`.
"""
@abc.abstractproperty
def s3_load_path(self):
"""
Override to return the load path.
"""
return None
@abc.abstractproperty
def aws_access_key_id(self):
"""
Override to return the key id.
"""
return None
@abc.abstractproperty
def aws_secret_access_key(self):
"""
Override to return the secret access key.
"""
return None
@abc.abstractproperty
def copy_options(self):
"""
Add extra copy options, for example:
* TIMEFORMAT 'auto'
* IGNOREHEADER 1
* TRUNCATECOLUMNS
* IGNOREBLANKLINES
"""
return ''
def table_attributes(self):
'''Add extra table attributes, for example:
DISTSTYLE KEY
DISTKEY (MY_FIELD)
SORTKEY (MY_FIELD_2, MY_FIELD_3)
'''
return ''
def do_truncate_table(self):
"""
Return True if table should be truncated before copying new data in.
"""
return False
def truncate_table(self, connection):
query = "truncate %s" % self.table
cursor = connection.cursor()
try:
cursor.execute(query)
finally:
cursor.close()
def create_table(self, connection):
"""
Override to provide code for creating the target table.
By default it will be created using types (optionally)
specified in columns.
If overridden, use the provided connection object for
setting up the table in order to create the table and
insert data using the same transaction.
"""
if len(self.columns[0]) == 1:
# only names of columns specified, no types
raise NotImplementedError("create_table() not implemented "
"for %r and columns types not "
"specified" % self.table)
elif len(self.columns[0]) == 2:
# if columns is specified as (name, type) tuples
coldefs = ','.join(
'{name} {type}'.format(
name=name,
type=type) for name, type in self.columns
)
query = ("CREATE TABLE "
"{table} ({coldefs}) "
"{table_attributes}").format(
table=self.table,
coldefs=coldefs,
table_attributes=self.table_attributes())
connection.cursor().execute(query)
def run(self):
"""
If the target table doesn't exist, self.create_table
will be called to attempt to create the table.
"""
if not (self.table):
raise Exception("table need to be specified")
path = self.s3_load_path()
connection = self.output().connect()
if not self.does_table_exist(connection):
# try creating table
logger.info("Creating table %s", self.table)
connection.reset()
self.create_table(connection)
elif self.do_truncate_table():
logger.info("Truncating table %s", self.table)
self.truncate_table(connection)
logger.info("Inserting file: %s", path)
cursor = connection.cursor()
self.init_copy(connection)
self.copy(cursor, path)
self.output().touch(connection)
connection.commit()
# commit and clean up
connection.close()
def copy(self, cursor, f):
"""
Defines copying from s3 into redshift.
"""
cursor.execute("""
COPY %s from '%s'
CREDENTIALS 'aws_access_key_id=%s;aws_secret_access_key=%s'
delimiter '%s'
%s
;""" % (self.table, f, self.aws_access_key_id,
self.aws_secret_access_key, self.column_separator,
self.copy_options))
def output(self):
"""
Returns a RedshiftTarget representing the inserted dataset.
Normally you don't override this.
"""
return RedshiftTarget(
host=self.host,
database=self.database,
user=self.user,
password=self.password,
table=self.table,
update_id=self.update_id())
def does_table_exist(self, connection):
"""
Determine whether the table already exists.
"""
query = ("select 1 as table_exists "
"from pg_table_def "
"where tablename = %s limit 1")
cursor = connection.cursor()
try:
cursor.execute(query, (self.table,))
result = cursor.fetchone()
return bool(result)
finally:
cursor.close()
class S3CopyJSONToTable(S3CopyToTable):
"""
Template task for inserting a JSON data set into Redshift from s3.
Usage:
* Subclass and override the required attributes:
* `host`,
* `database`,
* `user`,
* `password`,
* `table`,
* `columns`,
* `aws_access_key_id`,
* `aws_secret_access_key`,
* `s3_load_path`,
* `jsonpath`,
* `copy_json_options`.
"""
@abc.abstractproperty
def jsonpath(self):
"""
Override the jsonpath schema location for the table.
"""
return ''
@abc.abstractproperty
def copy_json_options(self):
"""
Add extra copy options, for example:
* GZIP
* LZOP
"""
return ''
def copy(self, cursor, f):
"""
Defines copying JSON from s3 into redshift.
"""
cursor.execute("""
COPY %s from '%s'
CREDENTIALS 'aws_access_key_id=%s;aws_secret_access_key=%s'
JSON AS '%s' %s
%s
;""" % (self.table, f, self.aws_access_key_id,
self.aws_secret_access_key, self.jsonpath,
self.copy_json_options, self.copy_options))
class RedshiftManifestTask(S3PathTask):
"""
Generic task to generate a manifest file that can be used
in S3CopyToTable in order to copy multiple files from your
s3 folder into a redshift table at once.
For full description on how to use the manifest file see
http://docs.aws.amazon.com/redshift/latest/dg/loading-data-files-using-manifest.html
Usage:
* requires parameters
* path - s3 path to the generated manifest file, including the
name of the generated file
to be copied into a redshift table
* folder_paths - s3 paths to the folders containing files you wish to be copied
Output:
* generated manifest file
"""
# should be over ridden to point to a variety
# of folders you wish to copy from
folder_paths = luigi.Parameter()
text_target = True
def run(self):
entries = []
for folder_path in self.folder_paths:
s3 = S3Target(folder_path)
client = s3.fs
for file_name in client.list(s3.path):
entries.append({
'url': '%s/%s' % (folder_path, file_name),
'mandatory': True
})
manifest = {'entries': entries}
target = self.output().open('w')
dump = json.dumps(manifest)
if not self.text_target:
dump = dump.encode('utf8')
target.write(dump)
target.close()
class KillOpenRedshiftSessions(luigi.Task):
"""
An task for killing any open Redshift sessions
in a given database. This is necessary to prevent open user sessions
with transactions against the table from blocking drop or truncate
table commands.
Usage:
Subclass and override the required `host`, `database`,
`user`, and `password` attributes.
"""
# time in seconds to wait before
# reconnecting to Redshift if our session is killed too.
# 30 seconds is usually fine; 60 is conservative
connection_reset_wait_seconds = luigi.IntParameter(default=60)
@abc.abstractproperty
def host(self):
return None
@abc.abstractproperty
def database(self):
return None
@abc.abstractproperty
def user(self):
return None
@abc.abstractproperty
def password(self):
return None
def update_id(self):
"""
This update id will be a unique identifier
for this insert on this table.
"""
return self.task_id
def output(self):
"""
Returns a RedshiftTarget representing the inserted dataset.
Normally you don't override this.
"""
# uses class name as a meta-table
return RedshiftTarget(
host=self.host,
database=self.database,
user=self.user,
password=self.password,
table=self.__class__.__name__,
update_id=self.update_id())
def run(self):
"""
Kill any open Redshift sessions for the given database.
"""
connection = self.output().connect()
# kill any sessions other than ours and
# internal Redshift sessions (rdsdb)
query = ("select pg_terminate_backend(process) "
"from STV_SESSIONS "
"where db_name=%s "
"and user_name != 'rdsdb' "
"and process != pg_backend_pid()")
cursor = connection.cursor()
logger.info('Killing all open Redshift sessions for database: %s', self.database)
try:
cursor.execute(query, (self.database,))
cursor.close()
connection.commit()
except psycopg2.DatabaseError as e:
if e.message and 'EOF' in e.message:
# sometimes this operation kills the current session.
# rebuild the connection. Need to pause for 30-60 seconds
# before Redshift will allow us back in.
connection.close()
logger.info('Pausing %s seconds for Redshift to reset connection', self.connection_reset_wait_seconds)
time.sleep(self.connection_reset_wait_seconds)
logger.info('Reconnecting to Redshift')
connection = self.output().connect()
else:
raise
try:
self.output().touch(connection)
connection.commit()
finally:
connection.close()
logger.info('Done killing all open Redshift sessions for database: %s', self.database)
| apache-2.0 |
f-droid/fdroidserver | fdroidserver/build.py | 1 | 58122 | #!/usr/bin/env python3
#
# build.py - part of the FDroid server tools
# Copyright (C) 2010-2014, Ciaran Gultnieks, ciaran@ciarang.com
# Copyright (C) 2013-2014 Daniel Martí <mvdan@mvdan.cc>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import shutil
import glob
import subprocess
import posixpath
import re
import sys
import tarfile
import threading
import traceback
import time
import requests
import tempfile
import argparse
from configparser import ConfigParser
import logging
from gettext import ngettext
from . import _
from . import common
from . import net
from . import metadata
from . import scanner
from . import vmtools
from .common import FDroidPopen
from .exception import FDroidException, BuildException, VCSException
try:
import paramiko
except ImportError:
pass
# Note that 'force' here also implies test mode.
def build_server(app, build, vcs, build_dir, output_dir, log_dir, force):
"""Do a build on the builder vm.
Parameters
----------
app
app metadata dict
build
vcs
version control system controller object
build_dir
local source-code checkout of app
output_dir
target folder for the build result
force
"""
global buildserverid
try:
paramiko
except NameError as e:
raise BuildException("Paramiko is required to use the buildserver") from e
if options.verbose:
logging.getLogger("paramiko").setLevel(logging.INFO)
else:
logging.getLogger("paramiko").setLevel(logging.WARN)
sshinfo = vmtools.get_clean_builder('builder')
output = None
try:
if not buildserverid:
try:
buildserverid = subprocess.check_output(['vagrant', 'ssh', '-c',
'cat /home/vagrant/buildserverid'],
cwd='builder').strip().decode()
logging.debug(_('Fetched buildserverid from VM: {buildserverid}')
.format(buildserverid=buildserverid))
except Exception as e:
if type(buildserverid) is not str or not re.match('^[0-9a-f]{40}$', buildserverid):
logging.info(subprocess.check_output(['vagrant', 'status'], cwd="builder"))
raise FDroidException("Could not obtain buildserverid from buldserver VM. "
"(stored inside the buildserver VM at '/home/vagrant/buildserverid') "
"Please reset your buildserver, the setup VM is broken.") from e
# Open SSH connection...
logging.info("Connecting to virtual machine...")
sshs = paramiko.SSHClient()
sshs.set_missing_host_key_policy(paramiko.AutoAddPolicy())
sshs.connect(sshinfo['hostname'], username=sshinfo['user'],
port=sshinfo['port'], timeout=300,
look_for_keys=False, key_filename=sshinfo['idfile'])
homedir = posixpath.join('/home', sshinfo['user'])
# Get an SFTP connection...
ftp = sshs.open_sftp()
ftp.get_channel().settimeout(60)
# Put all the necessary files in place...
ftp.chdir(homedir)
# Helper to copy the contents of a directory to the server...
def send_dir(path):
logging.debug("rsyncing " + path + " to " + ftp.getcwd())
# TODO this should move to `vagrant rsync` from >= v1.5
try:
subprocess.check_output(['rsync', '--recursive', '--perms', '--links', '--quiet', '--rsh='
+ 'ssh -o StrictHostKeyChecking=no'
+ ' -o UserKnownHostsFile=/dev/null'
+ ' -o LogLevel=FATAL'
+ ' -o IdentitiesOnly=yes'
+ ' -o PasswordAuthentication=no'
+ ' -p ' + str(sshinfo['port'])
+ ' -i ' + sshinfo['idfile'],
path,
sshinfo['user'] + "@" + sshinfo['hostname'] + ":" + ftp.getcwd()],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise FDroidException(str(e), e.output.decode())
logging.info("Preparing server for build...")
serverpath = os.path.abspath(os.path.dirname(__file__))
ftp.mkdir('fdroidserver')
ftp.chdir('fdroidserver')
ftp.put(os.path.join(serverpath, '..', 'fdroid'), 'fdroid')
ftp.put(os.path.join(serverpath, '..', 'gradlew-fdroid'), 'gradlew-fdroid')
ftp.chmod('fdroid', 0o755) # nosec B103 permissions are appropriate
ftp.chmod('gradlew-fdroid', 0o755) # nosec B103 permissions are appropriate
send_dir(os.path.join(serverpath))
ftp.chdir(homedir)
ftp.put(os.path.join(serverpath, '..', 'buildserver',
'config.buildserver.yml'), 'config.yml')
ftp.chmod('config.yml', 0o600)
# Copy over the ID (head commit hash) of the fdroidserver in use...
with open(os.path.join(os.getcwd(), 'tmp', 'fdroidserverid'), 'wb') as fp:
fp.write(subprocess.check_output(['git', 'rev-parse', 'HEAD'],
cwd=serverpath))
ftp.put('tmp/fdroidserverid', 'fdroidserverid')
# Copy the metadata - just the file for this app...
ftp.mkdir('metadata')
ftp.mkdir('srclibs')
ftp.chdir('metadata')
ftp.put(app.metadatapath, os.path.basename(app.metadatapath))
# And patches if there are any...
if os.path.exists(os.path.join('metadata', app.id)):
send_dir(os.path.join('metadata', app.id))
ftp.chdir(homedir)
# Create the build directory...
ftp.mkdir('build')
ftp.chdir('build')
ftp.mkdir('extlib')
ftp.mkdir('srclib')
# Copy any extlibs that are required...
if build.extlibs:
ftp.chdir(posixpath.join(homedir, 'build', 'extlib'))
for lib in build.extlibs:
lib = lib.strip()
libsrc = os.path.join('build/extlib', lib)
if not os.path.exists(libsrc):
raise BuildException("Missing extlib {0}".format(libsrc))
lp = lib.split('/')
for d in lp[:-1]:
if d not in ftp.listdir():
ftp.mkdir(d)
ftp.chdir(d)
ftp.put(libsrc, lp[-1])
for _ignored in lp[:-1]:
ftp.chdir('..')
# Copy any srclibs that are required...
srclibpaths = []
if build.srclibs:
for lib in build.srclibs:
srclibpaths.append(
common.getsrclib(lib, 'build/srclib', basepath=True, prepare=False))
# If one was used for the main source, add that too.
basesrclib = vcs.getsrclib()
if basesrclib:
srclibpaths.append(basesrclib)
for name, number, lib in srclibpaths:
logging.info("Sending srclib '%s'" % lib)
ftp.chdir(posixpath.join(homedir, 'build', 'srclib'))
if not os.path.exists(lib):
raise BuildException("Missing srclib directory '" + lib + "'")
fv = '.fdroidvcs-' + name
ftp.put(os.path.join('build/srclib', fv), fv)
send_dir(lib)
# Copy the metadata file too...
ftp.chdir(posixpath.join(homedir, 'srclibs'))
srclibsfile = os.path.join('srclibs', name + '.yml')
if os.path.isfile(srclibsfile):
ftp.put(srclibsfile, os.path.basename(srclibsfile))
else:
raise BuildException(_('cannot find required srclibs: "{path}"')
.format(path=srclibsfile))
# Copy the main app source code
# (no need if it's a srclib)
if (not basesrclib) and os.path.exists(build_dir):
ftp.chdir(posixpath.join(homedir, 'build'))
fv = '.fdroidvcs-' + app.id
ftp.put(os.path.join('build', fv), fv)
send_dir(build_dir)
# Execute the build script...
logging.info("Starting build...")
chan = sshs.get_transport().open_session()
chan.get_pty()
cmdline = posixpath.join(homedir, 'fdroidserver', 'fdroid')
cmdline += ' build --on-server'
if force:
cmdline += ' --force --test'
if options.verbose:
cmdline += ' --verbose'
if options.skipscan:
cmdline += ' --skip-scan'
if options.notarball:
cmdline += ' --no-tarball'
cmdline += " %s:%s" % (app.id, build.versionCode)
chan.exec_command('bash --login -c "' + cmdline + '"') # nosec B601 inputs are sanitized
# Fetch build process output ...
try:
cmd_stdout = chan.makefile('rb', 1024)
output = bytes()
output += common.get_android_tools_version_log().encode()
while not chan.exit_status_ready():
line = cmd_stdout.readline()
if line:
if options.verbose:
logging.debug("buildserver > " + str(line, 'utf-8').rstrip())
output += line
else:
time.sleep(0.05)
for line in cmd_stdout.readlines():
if options.verbose:
logging.debug("buildserver > " + str(line, 'utf-8').rstrip())
output += line
finally:
cmd_stdout.close()
# Check build process exit status ...
logging.info("...getting exit status")
returncode = chan.recv_exit_status()
if returncode != 0:
if timeout_event.is_set():
message = "Timeout exceeded! Build VM force-stopped for {0}:{1}"
else:
message = "Build.py failed on server for {0}:{1}"
raise BuildException(message.format(app.id, build.versionName),
None if options.verbose else str(output, 'utf-8'))
# Retreive logs...
toolsversion_log = common.get_toolsversion_logname(app, build)
try:
ftp.chdir(posixpath.join(homedir, log_dir))
ftp.get(toolsversion_log, os.path.join(log_dir, toolsversion_log))
logging.debug('retrieved %s', toolsversion_log)
except Exception as e:
logging.warning('could not get %s from builder vm: %s' % (toolsversion_log, e))
# Retrieve the built files...
logging.info("Retrieving build output...")
if force:
ftp.chdir(posixpath.join(homedir, 'tmp'))
else:
ftp.chdir(posixpath.join(homedir, 'unsigned'))
apkfile = common.get_release_filename(app, build)
tarball = common.getsrcname(app, build)
try:
ftp.get(apkfile, os.path.join(output_dir, apkfile))
if not options.notarball:
ftp.get(tarball, os.path.join(output_dir, tarball))
except Exception:
raise BuildException(
"Build failed for {0}:{1} - missing output files".format(
app.id, build.versionName), None if options.verbose else str(output, 'utf-8'))
ftp.close()
finally:
# Suspend the build server.
vm = vmtools.get_build_vm('builder')
logging.info('destroying buildserver after build')
vm.destroy()
# deploy logfile to repository web server
if output:
common.deploy_build_log_with_rsync(app.id, build.versionCode, output)
else:
logging.debug('skip publishing full build logs: '
'no output present')
def force_gradle_build_tools(build_dir, build_tools):
for root, dirs, files in os.walk(build_dir):
for filename in files:
if not filename.endswith('.gradle'):
continue
path = os.path.join(root, filename)
if not os.path.isfile(path):
continue
logging.debug("Forcing build-tools %s in %s" % (build_tools, path))
common.regsub_file(r"""(\s*)buildToolsVersion([\s=]+).*""",
r"""\1buildToolsVersion\2'%s'""" % build_tools,
path)
def transform_first_char(string, method):
"""Use method() on the first character of string."""
if len(string) == 0:
return string
if len(string) == 1:
return method(string)
return method(string[0]) + string[1:]
def add_failed_builds_entry(failed_builds, appid, build, entry):
failed_builds.append([appid, int(build.versionCode), str(entry)])
def get_metadata_from_apk(app, build, apkfile):
"""Get the required metadata from the built APK.
VersionName is allowed to be a blank string, i.e. ''
"""
appid, versionCode, versionName = common.get_apk_id(apkfile)
native_code = common.get_native_code(apkfile)
if build.buildjni and build.buildjni != ['no'] and not native_code:
raise BuildException("Native code should have been built but none was packaged")
if build.novcheck:
versionCode = build.versionCode
versionName = build.versionName
if not versionCode or versionName is None:
raise BuildException("Could not find version information in build in output")
if not appid:
raise BuildException("Could not find package ID in output")
if appid != app.id:
raise BuildException("Wrong package ID - build " + appid + " but expected " + app.id)
return versionCode, versionName
def build_local(app, build, vcs, build_dir, output_dir, log_dir, srclib_dir, extlib_dir, tmp_dir, force, onserver, refresh):
"""Do a build locally."""
ndk_path = build.ndk_path()
if build.ndk or (build.buildjni and build.buildjni != ['no']):
if not ndk_path:
logging.warning("Android NDK version '%s' could not be found!" % build.ndk)
logging.warning("Configured versions:")
for k, v in config['ndk_paths'].items():
if k.endswith("_orig"):
continue
logging.warning(" %s: %s" % (k, v))
if onserver:
common.auto_install_ndk(build)
else:
raise FDroidException()
elif not os.path.isdir(ndk_path):
logging.critical("Android NDK '%s' is not a directory!" % ndk_path)
raise FDroidException()
common.set_FDroidPopen_env(build)
# create ..._toolsversion.log when running in builder vm
if onserver:
# before doing anything, run the sudo commands to setup the VM
if build.sudo:
logging.info("Running 'sudo' commands in %s" % os.getcwd())
p = FDroidPopen(['sudo', 'DEBIAN_FRONTEND=noninteractive',
'bash', '-x', '-c', build.sudo])
if p.returncode != 0:
raise BuildException("Error running sudo command for %s:%s" %
(app.id, build.versionName), p.output)
p = FDroidPopen(['sudo', 'passwd', '--lock', 'root'])
if p.returncode != 0:
raise BuildException("Error locking root account for %s:%s" %
(app.id, build.versionName), p.output)
p = FDroidPopen(['sudo', 'SUDO_FORCE_REMOVE=yes', 'dpkg', '--purge', 'sudo'])
if p.returncode != 0:
raise BuildException("Error removing sudo for %s:%s" %
(app.id, build.versionName), p.output)
log_path = os.path.join(log_dir,
common.get_toolsversion_logname(app, build))
with open(log_path, 'w') as f:
f.write(common.get_android_tools_version_log())
else:
if build.sudo:
logging.warning('%s:%s runs this on the buildserver with sudo:\n\t%s\nThese commands were skipped because fdroid build is not running on a dedicated build server.'
% (app.id, build.versionName, build.sudo))
# Prepare the source code...
root_dir, srclibpaths = common.prepare_source(vcs, app, build,
build_dir, srclib_dir,
extlib_dir, onserver, refresh)
# We need to clean via the build tool in case the binary dirs are
# different from the default ones
p = None
gradletasks = []
bmethod = build.build_method()
if bmethod == 'maven':
logging.info("Cleaning Maven project...")
cmd = [config['mvn3'], 'clean', '-Dandroid.sdk.path=' + config['sdk_path']]
if '@' in build.maven:
maven_dir = os.path.join(root_dir, build.maven.split('@', 1)[1])
maven_dir = os.path.normpath(maven_dir)
else:
maven_dir = root_dir
p = FDroidPopen(cmd, cwd=maven_dir)
elif bmethod == 'gradle':
logging.info("Cleaning Gradle project...")
if build.preassemble:
gradletasks += build.preassemble
flavours = build.gradle
if flavours == ['yes']:
flavours = []
flavours_cmd = ''.join([transform_first_char(flav, str.upper) for flav in flavours])
gradletasks += ['assemble' + flavours_cmd + 'Release']
cmd = [config['gradle']]
if build.gradleprops:
cmd += ['-P' + kv for kv in build.gradleprops]
cmd += ['clean']
p = FDroidPopen(cmd, cwd=root_dir, envs={"GRADLE_VERSION_DIR": config['gradle_version_dir'], "CACHEDIR": config['cachedir']})
elif bmethod == 'buildozer':
pass
elif bmethod == 'ant':
logging.info("Cleaning Ant project...")
p = FDroidPopen(['ant', 'clean'], cwd=root_dir)
if p is not None and p.returncode != 0:
raise BuildException("Error cleaning %s:%s" %
(app.id, build.versionName), p.output)
for root, dirs, files in os.walk(build_dir):
def del_dirs(dl):
for d in dl:
shutil.rmtree(os.path.join(root, d), ignore_errors=True)
def del_files(fl):
for f in fl:
if f in files:
os.remove(os.path.join(root, f))
if any(f in files for f in ['build.gradle', 'build.gradle.kts', 'settings.gradle', 'settings.gradle.kts']):
# Even when running clean, gradle stores task/artifact caches in
# .gradle/ as binary files. To avoid overcomplicating the scanner,
# manually delete them, just like `gradle clean` should have removed
# the build/* dirs.
del_dirs([os.path.join('build', 'android-profile'),
os.path.join('build', 'generated'),
os.path.join('build', 'intermediates'),
os.path.join('build', 'outputs'),
os.path.join('build', 'reports'),
os.path.join('build', 'tmp'),
os.path.join('buildSrc', 'build'),
'.gradle'])
del_files(['gradlew', 'gradlew.bat'])
if 'pom.xml' in files:
del_dirs(['target'])
if any(f in files for f in ['ant.properties', 'project.properties', 'build.xml']):
del_dirs(['bin', 'gen'])
if 'jni' in dirs:
del_dirs(['obj'])
if options.skipscan:
if build.scandelete:
raise BuildException("Refusing to skip source scan since scandelete is present")
else:
# Scan before building...
logging.info("Scanning source for common problems...")
scanner.options = options # pass verbose through
count = scanner.scan_source(build_dir, build)
if count > 0:
if force:
logging.warning(ngettext('Scanner found {} problem',
'Scanner found {} problems', count).format(count))
else:
raise BuildException(ngettext(
"Can't build due to {} error while scanning",
"Can't build due to {} errors while scanning", count).format(count))
if not options.notarball:
# Build the source tarball right before we build the release...
logging.info("Creating source tarball...")
tarname = common.getsrcname(app, build)
tarball = tarfile.open(os.path.join(tmp_dir, tarname), "w:gz")
def tarexc(t):
return None if any(t.name.endswith(s) for s in ['.svn', '.git', '.hg', '.bzr']) else t
tarball.add(build_dir, tarname, filter=tarexc)
tarball.close()
# Run a build command if one is required...
if build.build:
logging.info("Running 'build' commands in %s" % root_dir)
cmd = common.replace_config_vars(build.build, build)
# Substitute source library paths into commands...
for name, number, libpath in srclibpaths:
cmd = cmd.replace('$$' + name + '$$', os.path.join(os.getcwd(), libpath))
p = FDroidPopen(['bash', '-x', '-c', cmd], cwd=root_dir)
if p.returncode != 0:
raise BuildException("Error running build command for %s:%s" %
(app.id, build.versionName), p.output)
# Build native stuff if required...
if build.buildjni and build.buildjni != ['no']:
logging.info("Building the native code")
jni_components = build.buildjni
if jni_components == ['yes']:
jni_components = ['']
cmd = [os.path.join(ndk_path, "ndk-build"), "-j1"]
for d in jni_components:
if d:
logging.info("Building native code in '%s'" % d)
else:
logging.info("Building native code in the main project")
manifest = os.path.join(root_dir, d, 'AndroidManifest.xml')
if os.path.exists(manifest):
# Read and write the whole AM.xml to fix newlines and avoid
# the ndk r8c or later 'wordlist' errors. The outcome of this
# under gnu/linux is the same as when using tools like
# dos2unix, but the native python way is faster and will
# work in non-unix systems.
manifest_text = open(manifest, 'U').read()
open(manifest, 'w').write(manifest_text)
# In case the AM.xml read was big, free the memory
del manifest_text
p = FDroidPopen(cmd, cwd=os.path.join(root_dir, d))
if p.returncode != 0:
raise BuildException("NDK build failed for %s:%s" % (app.id, build.versionName), p.output)
p = None
# Build the release...
if bmethod == 'maven':
logging.info("Building Maven project...")
if '@' in build.maven:
maven_dir = os.path.join(root_dir, build.maven.split('@', 1)[1])
else:
maven_dir = root_dir
mvncmd = [config['mvn3'], '-Dandroid.sdk.path=' + config['sdk_path'],
'-Dmaven.jar.sign.skip=true', '-Dmaven.test.skip=true',
'-Dandroid.sign.debug=false', '-Dandroid.release=true',
'package']
if build.target:
target = build.target.split('-')[1]
common.regsub_file(r'<platform>[0-9]*</platform>',
r'<platform>%s</platform>' % target,
os.path.join(root_dir, 'pom.xml'))
if '@' in build.maven:
common.regsub_file(r'<platform>[0-9]*</platform>',
r'<platform>%s</platform>' % target,
os.path.join(maven_dir, 'pom.xml'))
p = FDroidPopen(mvncmd, cwd=maven_dir)
bindir = os.path.join(root_dir, 'target')
elif bmethod == 'buildozer':
logging.info("Building Kivy project using buildozer...")
# parse buildozer.spez
spec = os.path.join(root_dir, 'buildozer.spec')
if not os.path.exists(spec):
raise BuildException("Expected to find buildozer-compatible spec at {0}"
.format(spec))
defaults = {'orientation': 'landscape', 'icon': '',
'permissions': '', 'android.api': "19"}
bconfig = ConfigParser(defaults, allow_no_value=True)
bconfig.read(spec)
# update spec with sdk and ndk locations to prevent buildozer from
# downloading.
loc_ndk = common.env['ANDROID_NDK']
loc_sdk = common.env['ANDROID_SDK']
if loc_ndk == '$ANDROID_NDK':
loc_ndk = loc_sdk + '/ndk-bundle'
bc_ndk = None
bc_sdk = None
try:
bc_ndk = bconfig.get('app', 'android.sdk_path')
except Exception:
pass
try:
bc_sdk = bconfig.get('app', 'android.ndk_path')
except Exception:
pass
if bc_sdk is None:
bconfig.set('app', 'android.sdk_path', loc_sdk)
if bc_ndk is None:
bconfig.set('app', 'android.ndk_path', loc_ndk)
fspec = open(spec, 'w')
bconfig.write(fspec)
fspec.close()
logging.info("sdk_path = %s" % loc_sdk)
logging.info("ndk_path = %s" % loc_ndk)
p = None
# execute buildozer
cmd = ['buildozer', 'android', 'release']
try:
p = FDroidPopen(cmd, cwd=root_dir)
except Exception:
pass
# buidozer not installed ? clone repo and run
if (p is None or p.returncode != 0):
cmd = ['git', 'clone', 'https://github.com/kivy/buildozer.git']
p = subprocess.Popen(cmd, cwd=root_dir, shell=False)
p.wait()
if p.returncode != 0:
raise BuildException("Distribute build failed")
cmd = ['python', 'buildozer/buildozer/scripts/client.py', 'android', 'release']
p = FDroidPopen(cmd, cwd=root_dir)
# expected to fail.
# Signing will fail if not set by environnment vars (cf. p4a docs).
# But the unsigned APK will be ok.
p.returncode = 0
elif bmethod == 'gradle':
logging.info("Building Gradle project...")
cmd = [config['gradle']]
if build.gradleprops:
cmd += ['-P' + kv for kv in build.gradleprops]
cmd += gradletasks
p = FDroidPopen(cmd, cwd=root_dir, envs={"GRADLE_VERSION_DIR": config['gradle_version_dir'], "CACHEDIR": config['cachedir']})
elif bmethod == 'ant':
logging.info("Building Ant project...")
cmd = ['ant']
if build.antcommands:
cmd += build.antcommands
else:
cmd += ['release']
p = FDroidPopen(cmd, cwd=root_dir)
bindir = os.path.join(root_dir, 'bin')
if os.path.isdir(os.path.join(build_dir, '.git')):
import git
commit_id = common.get_head_commit_id(git.repo.Repo(build_dir))
else:
commit_id = build.commit
if p is not None and p.returncode != 0:
raise BuildException("Build failed for %s:%s@%s" % (app.id, build.versionName, commit_id),
p.output)
logging.info("Successfully built version {versionName} of {appid} from {commit_id}"
.format(versionName=build.versionName, appid=app.id, commit_id=commit_id))
omethod = build.output_method()
if omethod == 'maven':
stdout_apk = '\n'.join([
line for line in p.output.splitlines() if any(
a in line for a in ('.apk', '.ap_', '.jar'))])
m = re.match(r".*^\[INFO\] .*apkbuilder.*/([^/]*)\.apk",
stdout_apk, re.S | re.M)
if not m:
m = re.match(r".*^\[INFO\] Creating additional unsigned apk file .*/([^/]+)\.apk[^l]",
stdout_apk, re.S | re.M)
if not m:
m = re.match(r'.*^\[INFO\] [^$]*aapt \[package,[^$]*' + bindir + r'/([^/]+)\.ap[_k][,\]]',
stdout_apk, re.S | re.M)
if not m:
m = re.match(r".*^\[INFO\] Building jar: .*/" + bindir + r"/(.+)\.jar",
stdout_apk, re.S | re.M)
if not m:
raise BuildException('Failed to find output')
src = m.group(1)
src = os.path.join(bindir, src) + '.apk'
elif omethod == 'buildozer':
src = None
for apks_dir in [
os.path.join(root_dir, '.buildozer', 'android', 'platform', 'build', 'dists', bconfig.get('app', 'title'), 'bin'),
]:
for apkglob in ['*-release-unsigned.apk', '*-unsigned.apk', '*.apk']:
apks = glob.glob(os.path.join(apks_dir, apkglob))
if len(apks) > 1:
raise BuildException('More than one resulting apks found in %s' % apks_dir,
'\n'.join(apks))
if len(apks) == 1:
src = apks[0]
break
if src is not None:
break
if src is None:
raise BuildException('Failed to find any output apks')
elif omethod == 'gradle':
src = None
apk_dirs = [
# gradle plugin >= 3.0
os.path.join(root_dir, 'build', 'outputs', 'apk', 'release'),
# gradle plugin < 3.0 and >= 0.11
os.path.join(root_dir, 'build', 'outputs', 'apk'),
# really old path
os.path.join(root_dir, 'build', 'apk'),
]
# If we build with gradle flavours with gradle plugin >= 3.0 the APK will be in
# a subdirectory corresponding to the flavour command used, but with different
# capitalization.
if flavours_cmd:
apk_dirs.append(os.path.join(root_dir, 'build', 'outputs', 'apk', transform_first_char(flavours_cmd, str.lower), 'release'))
for apks_dir in apk_dirs:
for apkglob in ['*-release-unsigned.apk', '*-unsigned.apk', '*.apk']:
apks = glob.glob(os.path.join(apks_dir, apkglob))
if len(apks) > 1:
raise BuildException('More than one resulting apks found in %s' % apks_dir,
'\n'.join(apks))
if len(apks) == 1:
src = apks[0]
break
if src is not None:
break
if src is None:
raise BuildException('Failed to find any output apks')
elif omethod == 'ant':
stdout_apk = '\n'.join([
line for line in p.output.splitlines() if '.apk' in line])
src = re.match(r".*^.*Creating (.+) for release.*$.*", stdout_apk,
re.S | re.M).group(1)
src = os.path.join(bindir, src)
elif omethod == 'raw':
output_path = common.replace_build_vars(build.output, build)
globpath = os.path.join(root_dir, output_path)
apks = glob.glob(globpath)
if len(apks) > 1:
raise BuildException('Multiple apks match %s' % globpath, '\n'.join(apks))
if len(apks) < 1:
raise BuildException('No apks match %s' % globpath)
src = os.path.normpath(apks[0])
# Make sure it's not debuggable...
if common.is_apk_and_debuggable(src):
raise BuildException("APK is debuggable")
# By way of a sanity check, make sure the version and version
# code in our new APK match what we expect...
logging.debug("Checking " + src)
if not os.path.exists(src):
raise BuildException("Unsigned APK is not at expected location of " + src)
if common.get_file_extension(src) == 'apk':
vercode, version = get_metadata_from_apk(app, build, src)
if version != build.versionName or vercode != build.versionCode:
raise BuildException(("Unexpected version/version code in output;"
" APK: '%s' / '%s', "
" Expected: '%s' / '%s'")
% (version, str(vercode), build.versionName,
str(build.versionCode)))
if (options.scan_binary or config.get('scan_binary')) and not options.skipscan:
if scanner.scan_binary(src):
raise BuildException("Found blocklisted packages in final apk!")
# Copy the unsigned APK to our destination directory for further
# processing (by publish.py)...
dest = os.path.join(
output_dir,
common.get_release_filename(
app, build, common.get_file_extension(src)
)
)
shutil.copyfile(src, dest)
# Move the source tarball into the output directory...
if output_dir != tmp_dir and not options.notarball:
shutil.move(os.path.join(tmp_dir, tarname),
os.path.join(output_dir, tarname))
def trybuild(app, build, build_dir, output_dir, log_dir, also_check_dir,
srclib_dir, extlib_dir, tmp_dir, repo_dir, vcs, test,
server, force, onserver, refresh):
"""Build a particular version of an application, if it needs building.
Parameters
----------
output_dir
The directory where the build output will go.
Usually this is the 'unsigned' directory.
repo_dir
The repo directory - used for checking if the build is necessary.
also_check_dir
An additional location for checking if the build
is necessary (usually the archive repo)
test
True if building in test mode, in which case the build will
always happen, even if the output already exists. In test mode, the
output directory should be a temporary location, not any of the real
ones.
Returns
-------
Boolean
True if the build was done, False if it wasn't necessary.
"""
dest_file = common.get_release_filename(app, build)
dest = os.path.join(output_dir, dest_file)
dest_repo = os.path.join(repo_dir, dest_file)
if not test:
if os.path.exists(dest) or os.path.exists(dest_repo):
return False
if also_check_dir:
dest_also = os.path.join(also_check_dir, dest_file)
if os.path.exists(dest_also):
return False
if build.disable and not options.force:
return False
logging.info("Building version %s (%s) of %s" % (
build.versionName, build.versionCode, app.id))
if server:
# When using server mode, still keep a local cache of the repo, by
# grabbing the source now.
vcs.gotorevision(build.commit, refresh)
build_server(app, build, vcs, build_dir, output_dir, log_dir, force)
else:
build_local(app, build, vcs, build_dir, output_dir, log_dir, srclib_dir, extlib_dir, tmp_dir, force, onserver, refresh)
return True
def force_halt_build(timeout):
"""Halt the currently running Vagrant VM, to be called from a Timer."""
logging.error(_('Force halting build after {0} sec timeout!').format(timeout))
timeout_event.set()
vm = vmtools.get_build_vm('builder')
vm.halt()
def parse_commandline():
"""Parse the command line.
Returns
-------
options
parser
"""
parser = argparse.ArgumentParser(usage="%(prog)s [options] [APPID[:VERCODE] [APPID[:VERCODE] ...]]")
common.setup_global_opts(parser)
parser.add_argument("appid", nargs='*', help=_("application ID with optional versionCode in the form APPID[:VERCODE]"))
parser.add_argument("-l", "--latest", action="store_true", default=False,
help=_("Build only the latest version of each package"))
parser.add_argument("-s", "--stop", action="store_true", default=False,
help=_("Make the build stop on exceptions"))
parser.add_argument("-t", "--test", action="store_true", default=False,
help=_("Test mode - put output in the tmp directory only, and always build, even if the output already exists."))
parser.add_argument("--server", action="store_true", default=False,
help=_("Use build server"))
parser.add_argument("--reset-server", action="store_true", default=False,
help=_("Reset and create a brand new build server, even if the existing one appears to be ok."))
# this option is internal API for telling fdroid that
# it's running inside a buildserver vm.
parser.add_argument("--on-server", dest="onserver", action="store_true", default=False,
help=argparse.SUPPRESS)
parser.add_argument("--skip-scan", dest="skipscan", action="store_true", default=False,
help=_("Skip scanning the source code for binaries and other problems"))
parser.add_argument("--scan-binary", action="store_true", default=False,
help=_("Scan the resulting APK(s) for known non-free classes."))
parser.add_argument("--no-tarball", dest="notarball", action="store_true", default=False,
help=_("Don't create a source tarball, useful when testing a build"))
parser.add_argument("--no-refresh", dest="refresh", action="store_false", default=True,
help=_("Don't refresh the repository, useful when testing a build with no internet connection"))
parser.add_argument("-f", "--force", action="store_true", default=False,
help=_("Force build of disabled apps, and carries on regardless of scan problems. Only allowed in test mode."))
parser.add_argument("-a", "--all", action="store_true", default=False,
help=_("Build all applications available"))
parser.add_argument("-w", "--wiki", default=False, action="store_true",
help=_("Update the wiki"))
metadata.add_metadata_arguments(parser)
options = parser.parse_args()
metadata.warnings_action = options.W
# Force --stop with --on-server to get correct exit code
if options.onserver:
options.stop = True
if options.force and not options.test:
parser.error("option %s: Force is only allowed in test mode" % "force")
return options, parser
options = None
config = None
buildserverid = None
fdroidserverid = None
start_timestamp = time.gmtime()
status_output = None
timeout_event = threading.Event()
def main():
global options, config, buildserverid, fdroidserverid
options, parser = parse_commandline()
# The defaults for .fdroid.* metadata that is included in a git repo are
# different than for the standard metadata/ layout because expectations
# are different. In this case, the most common user will be the app
# developer working on the latest update of the app on their own machine.
local_metadata_files = common.get_local_metadata_files()
if len(local_metadata_files) == 1: # there is local metadata in an app's source
config = dict(common.default_config)
# `fdroid build` should build only the latest version by default since
# most of the time the user will be building the most recent update
if not options.all:
options.latest = True
elif len(local_metadata_files) > 1:
raise FDroidException("Only one local metadata file allowed! Found: "
+ " ".join(local_metadata_files))
else:
if not os.path.isdir('metadata') and len(local_metadata_files) == 0:
raise FDroidException("No app metadata found, nothing to process!")
if not options.appid and not options.all:
parser.error("option %s: If you really want to build all the apps, use --all" % "all")
config = common.read_config(options)
if config['build_server_always']:
options.server = True
if options.reset_server and not options.server:
parser.error("option %s: Using --reset-server without --server makes no sense" % "reset-server")
if options.onserver or not options.server:
for d in ['build-tools', 'platform-tools', 'tools']:
if not os.path.isdir(os.path.join(config['sdk_path'], d)):
raise FDroidException(_("Android SDK '{path}' does not have '{dirname}' installed!")
.format(path=config['sdk_path'], dirname=d))
log_dir = 'logs'
if not os.path.isdir(log_dir):
logging.info("Creating log directory")
os.makedirs(log_dir)
tmp_dir = 'tmp'
if not os.path.isdir(tmp_dir):
logging.info("Creating temporary directory")
os.makedirs(tmp_dir)
if options.test:
output_dir = tmp_dir
else:
output_dir = 'unsigned'
if not os.path.isdir(output_dir):
logging.info("Creating output directory")
os.makedirs(output_dir)
binaries_dir = os.path.join(output_dir, 'binaries')
if config['archive_older'] != 0:
also_check_dir = 'archive'
else:
also_check_dir = None
if options.onserver:
status_output = dict() # HACK dummy placeholder
else:
status_output = common.setup_status_output(start_timestamp)
repo_dir = 'repo'
build_dir = 'build'
if not os.path.isdir(build_dir):
logging.info("Creating build directory")
os.makedirs(build_dir)
srclib_dir = os.path.join(build_dir, 'srclib')
extlib_dir = os.path.join(build_dir, 'extlib')
# Read all app and srclib metadata
pkgs = common.read_pkg_args(options.appid, True)
allapps = metadata.read_metadata(pkgs, sort_by_time=True)
apps = common.read_app_args(options.appid, allapps, True)
for appid, app in list(apps.items()):
if (app.get('Disabled') and not options.force) or not app.get('RepoType') or not app.get('Builds', []):
del apps[appid]
if not apps:
raise FDroidException("No apps to process.")
# make sure enough open files are allowed to process everything
try:
import resource # not available on Windows
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if len(apps) > soft:
try:
soft = len(apps) * 2
if soft > hard:
soft = hard
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
logging.debug(_('Set open file limit to {integer}')
.format(integer=soft))
except (OSError, ValueError) as e:
logging.warning(_('Setting open file limit failed: ') + str(e))
except ImportError:
pass
if options.latest:
for app in apps.values():
for build in reversed(app.get('Builds', [])):
if build.disable and not options.force:
continue
app['Builds'] = [build]
break
if options.wiki:
import mwclient
site = mwclient.Site((config['wiki_protocol'], config['wiki_server']),
path=config['wiki_path'])
site.login(config['wiki_user'], config['wiki_password'])
# Build applications...
failed_builds = []
build_succeeded = []
build_succeeded_ids = []
status_output['failedBuilds'] = failed_builds
status_output['successfulBuilds'] = build_succeeded
status_output['successfulBuildIds'] = build_succeeded_ids
# Only build for 72 hours, then stop gracefully.
endtime = time.time() + 72 * 60 * 60
max_build_time_reached = False
for appid, app in apps.items():
first = True
for build in app.get('Builds', []):
if time.time() > endtime:
max_build_time_reached = True
break
# Enable watchdog timer (2 hours by default).
if build.timeout is None:
timeout = 7200
else:
timeout = int(build.timeout)
if options.server and timeout > 0:
logging.debug(_('Setting {0} sec timeout for this build').format(timeout))
timer = threading.Timer(timeout, force_halt_build, [timeout])
timeout_event.clear()
timer.start()
else:
timer = None
wikilog = None
build_starttime = common.get_wiki_timestamp()
tools_version_log = ''
if not options.onserver:
tools_version_log = common.get_android_tools_version_log()
common.write_running_status_json(status_output)
try:
# For the first build of a particular app, we need to set up
# the source repo. We can reuse it on subsequent builds, if
# there are any.
if first:
vcs, build_dir = common.setup_vcs(app)
first = False
logging.info("Using %s" % vcs.clientversion())
logging.debug("Checking " + build.versionName)
if trybuild(app, build, build_dir, output_dir, log_dir,
also_check_dir, srclib_dir, extlib_dir,
tmp_dir, repo_dir, vcs, options.test,
options.server, options.force,
options.onserver, options.refresh):
toolslog = os.path.join(log_dir,
common.get_toolsversion_logname(app, build))
if not options.onserver and os.path.exists(toolslog):
with open(toolslog, 'r') as f:
tools_version_log = ''.join(f.readlines())
os.remove(toolslog)
if app.Binaries is not None:
# This is an app where we build from source, and
# verify the APK contents against a developer's
# binary. We get that binary now, and save it
# alongside our built one in the 'unsigend'
# directory.
if not os.path.isdir(binaries_dir):
os.makedirs(binaries_dir)
logging.info("Created directory for storing "
"developer supplied reference "
"binaries: '{path}'"
.format(path=binaries_dir))
url = app.Binaries
url = url.replace('%v', build.versionName)
url = url.replace('%c', str(build.versionCode))
logging.info("...retrieving " + url)
of = re.sub(r'\.apk$', '.binary.apk', common.get_release_filename(app, build))
of = os.path.join(binaries_dir, of)
try:
net.download_file(url, local_filename=of)
except requests.exceptions.HTTPError as e:
raise FDroidException(
'Downloading Binaries from %s failed.' % url) from e
# Now we check whether the build can be verified to
# match the supplied binary or not. Should the
# comparison fail, we mark this build as a failure
# and remove everything from the unsigend folder.
with tempfile.TemporaryDirectory() as tmpdir:
unsigned_apk = \
common.get_release_filename(app, build)
unsigned_apk = \
os.path.join(output_dir, unsigned_apk)
compare_result = \
common.verify_apks(of, unsigned_apk, tmpdir)
if compare_result:
if options.test:
logging.warning(_('Keeping failed build "{apkfilename}"')
.format(apkfilename=unsigned_apk))
else:
logging.debug('removing %s', unsigned_apk)
os.remove(unsigned_apk)
logging.debug('removing %s', of)
os.remove(of)
compare_result = compare_result.split('\n')
line_count = len(compare_result)
compare_result = compare_result[:299]
if line_count > len(compare_result):
line_difference = \
line_count - len(compare_result)
compare_result.append('%d more lines ...' %
line_difference)
compare_result = '\n'.join(compare_result)
raise FDroidException('compared built binary '
'to supplied reference '
'binary but failed',
compare_result)
else:
logging.info('compared built binary to '
'supplied reference binary '
'successfully')
build_succeeded.append(app)
build_succeeded_ids.append([app['id'], build.versionCode])
wikilog = "Build succeeded"
except VCSException as vcse:
reason = str(vcse).split('\n', 1)[0] if options.verbose else str(vcse)
logging.error("VCS error while building app %s: %s" % (
appid, reason))
if options.stop:
logging.debug("Error encoutered, stopping by user request.")
common.force_exit(1)
add_failed_builds_entry(failed_builds, appid, build, vcse)
wikilog = str(vcse)
common.deploy_build_log_with_rsync(appid, build.versionCode, str(vcse))
except FDroidException as e:
with open(os.path.join(log_dir, appid + '.log'), 'a+') as f:
f.write('\n\n============================================================\n')
f.write('versionCode: %s\nversionName: %s\ncommit: %s\n' %
(build.versionCode, build.versionName, build.commit))
f.write('Build completed at '
+ common.get_wiki_timestamp() + '\n')
f.write('\n' + tools_version_log + '\n')
f.write(str(e))
logging.error("Could not build app %s: %s" % (appid, e))
if options.stop:
logging.debug("Error encoutered, stopping by user request.")
common.force_exit(1)
add_failed_builds_entry(failed_builds, appid, build, e)
wikilog = e.get_wikitext()
except Exception as e:
logging.error("Could not build app %s due to unknown error: %s" % (
appid, traceback.format_exc()))
if options.stop:
logging.debug("Error encoutered, stopping by user request.")
common.force_exit(1)
add_failed_builds_entry(failed_builds, appid, build, e)
wikilog = str(e)
if options.wiki and wikilog:
try:
# Write a page with the last build log for this version code
lastbuildpage = appid + '/lastbuild_' + build.versionCode
newpage = site.Pages[lastbuildpage]
with open(os.path.join('tmp', 'fdroidserverid')) as fp:
fdroidserverid = fp.read().rstrip()
txt = "* build session started at " + common.get_wiki_timestamp(start_timestamp) + '\n' \
+ "* this build started at " + build_starttime + '\n' \
+ "* this build completed at " + common.get_wiki_timestamp() + '\n' \
+ common.get_git_describe_link() \
+ '* fdroidserverid: [https://gitlab.com/fdroid/fdroidserver/commit/' \
+ fdroidserverid + ' ' + fdroidserverid + ']\n\n'
if buildserverid:
txt += '* buildserverid: [https://gitlab.com/fdroid/fdroidserver/commit/' \
+ buildserverid + ' ' + buildserverid + ']\n\n'
txt += tools_version_log + '\n\n'
txt += '== Build Log ==\n\n' + wikilog
newpage.save(txt, summary='Build log')
# Redirect from /lastbuild to the most recent build log
newpage = site.Pages[appid + '/lastbuild']
newpage.save('#REDIRECT [[' + lastbuildpage + ']]', summary='Update redirect')
except Exception as e:
logging.error("Error while attempting to publish build log: %s" % e)
if timer:
timer.cancel() # kill the watchdog timer
if max_build_time_reached:
status_output['maxBuildTimeReached'] = True
logging.info("Stopping after global build timeout...")
break
for app in build_succeeded:
logging.info("success: %s" % (app.id))
if not options.verbose:
for fb in failed_builds:
logging.info('Build for app {}:{} failed:\n{}'.format(*fb))
logging.info(_("Finished"))
if len(build_succeeded) > 0:
logging.info(ngettext("{} build succeeded",
"{} builds succeeded", len(build_succeeded)).format(len(build_succeeded)))
if len(failed_builds) > 0:
logging.info(ngettext("{} build failed",
"{} builds failed", len(failed_builds)).format(len(failed_builds)))
if options.wiki:
wiki_page_path = 'build_' + time.strftime('%s', start_timestamp)
newpage = site.Pages[wiki_page_path]
txt = ''
txt += "* command line: <code>%s</code>\n" % ' '.join(sys.argv)
txt += "* started at %s\n" % common.get_wiki_timestamp(start_timestamp)
txt += "* completed at %s\n" % common.get_wiki_timestamp()
if buildserverid:
txt += ('* buildserverid: [https://gitlab.com/fdroid/fdroidserver/commit/{id} {id}]\n'
.format(id=buildserverid))
if fdroidserverid:
txt += ('* fdroidserverid: [https://gitlab.com/fdroid/fdroidserver/commit/{id} {id}]\n'
.format(id=fdroidserverid))
if os.cpu_count():
txt += "* host processors: %d\n" % os.cpu_count()
status_output['hostOsCpuCount'] = os.cpu_count()
if os.path.isfile('/proc/meminfo') and os.access('/proc/meminfo', os.R_OK):
with open('/proc/meminfo') as fp:
for line in fp:
m = re.search(r'MemTotal:\s*([0-9].*)', line)
if m:
txt += "* host RAM: %s\n" % m.group(1)
status_output['hostProcMeminfoMemTotal'] = m.group(1)
break
fdroid_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..'))
buildserver_config = os.path.join(fdroid_path, 'makebuildserver.config.py')
if os.path.isfile(buildserver_config) and os.access(buildserver_config, os.R_OK):
with open(buildserver_config) as configfile:
for line in configfile:
m = re.search(r'cpus\s*=\s*([0-9].*)', line)
if m:
txt += "* guest processors: %s\n" % m.group(1)
status_output['guestVagrantVmCpus'] = m.group(1)
m = re.search(r'memory\s*=\s*([0-9].*)', line)
if m:
txt += "* guest RAM: %s MB\n" % m.group(1)
status_output['guestVagrantVmMemory'] = m.group(1)
txt += "* successful builds: %d\n" % len(build_succeeded)
txt += "* failed builds: %d\n" % len(failed_builds)
txt += "\n\n"
newpage.save(txt, summary='Run log')
newpage = site.Pages['build']
newpage.save('#REDIRECT [[' + wiki_page_path + ']]', summary='Update redirect')
if buildserverid:
status_output['buildserver'] = {'commitId': buildserverid}
if not options.onserver:
common.write_status_json(status_output)
# hack to ensure this exits, even is some threads are still running
common.force_exit()
if __name__ == "__main__":
main()
| agpl-3.0 |
vesellov/bitdust.devel | contacts/identitydb.py | 1 | 13247 | #!/usr/bin/python
# identitydb.py
#
# Copyright (C) 2008-2018 Veselin Penev, https://bitdust.io
#
# This file (identitydb.py) is part of BitDust Software.
#
# BitDust is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BitDust Software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with BitDust Software. If not, see <http://www.gnu.org/licenses/>.
#
# Please contact us if you have any questions at bitdust.io@gmail.com
#
#
#
"""
.. module:: identitydb.
Here is a simple1 database for identities cache. Also keep track of
changing identities sources and maintain a several "index" dictionaries
to speed up processes.
"""
#------------------------------------------------------------------------------
from __future__ import absolute_import
import os
import time
#------------------------------------------------------------------------------
from logs import lg
from system import bpio
from main import settings
from lib import nameurl
from lib import strng
from userid import identity
#------------------------------------------------------------------------------
# Dictionary cache of identities - lookup by primary url
# global dictionary of identities in this file
# indexed with urls and contains identity objects
_IdentityCache = {}
_IdentityCacheIDs = {}
_IdentityCacheCounter = 0
_IdentityCacheModifiedTime = {}
_Contact2IDURL = {}
_IDURL2Contacts = {}
_IPPort2IDURL = {}
_LocalIPs = {}
_IdentityCacheUpdatedCallbacks = []
#------------------------------------------------------------------------------
def cache():
global _IdentityCache
return _IdentityCache
def cache_ids():
global _IdentityCacheIDs
return _IdentityCacheIDs
#------------------------------------------------------------------------------
def init():
"""
Need to call before all other methods.
Check to exist and create a folder to keep all cached identities.
"""
lg.out(4, "identitydb.init")
iddir = settings.IdentityCacheDir()
if not os.path.exists(iddir):
lg.out(8, 'identitydb.init create folder %r' % iddir)
bpio._dir_make(iddir)
def shutdown():
"""
"""
lg.out(4, "identitydb.shutdown")
#------------------------------------------------------------------------------
def clear(exclude_list=None):
"""
Clear the database, indexes and cached files from disk.
"""
global _Contact2IDURL
global _IPPort2IDURL
global _IDURL2Contacts
global _IdentityCache
global _IdentityCacheIDs
global _IdentityCacheModifiedTime
lg.out(4, "identitydb.clear")
_IdentityCache.clear()
_IdentityCacheIDs.clear()
_IdentityCacheModifiedTime.clear()
_Contact2IDURL.clear()
_IPPort2IDURL.clear()
_IDURL2Contacts.clear()
iddir = settings.IdentityCacheDir()
if not os.path.exists(iddir):
return
for file_name in os.listdir(iddir):
path = os.path.join(iddir, file_name)
if not os.access(path, os.W_OK):
continue
if exclude_list:
idurl = nameurl.FilenameUrl(file_name)
if idurl in exclude_list:
continue
os.remove(path)
lg.out(6, 'identitydb.clear remove ' + path)
fire_cache_updated_callbacks()
def size():
"""
Return a number of items in the database.
"""
global _IdentityCache
return len(_IdentityCache)
def has_idurl(idurl):
"""
Return True if that IDURL already cached.
"""
global _IdentityCache
return strng.to_bin(idurl) in _IdentityCache
def has_file(idurl):
"""
"""
idurl = strng.to_bin(idurl)
try:
partfilename = nameurl.UrlFilename(idurl)
except:
lg.out(1, "identitydb.has_file ERROR %r is not correct" % idurl)
return None
filename = os.path.join(settings.IdentityCacheDir(), partfilename)
return os.path.exists(filename)
def idset(idurl, id_obj):
"""
Important method - need to call that to update indexes.
"""
global _Contact2IDURL
global _IDURL2Contacts
global _IPPort2IDURL
global _IdentityCache
global _IdentityCacheIDs
global _IdentityCacheCounter
global _IdentityCacheModifiedTime
idurl = strng.to_bin(idurl)
if not has_idurl(idurl):
lg.out(6, 'identitydb.idset new identity: %r' % idurl)
_IdentityCache[idurl] = id_obj
_IdentityCacheModifiedTime[idurl] = time.time()
identid = _IdentityCacheIDs.get(idurl, None)
if identid is None:
identid = _IdentityCacheCounter
_IdentityCacheCounter += 1
_IdentityCacheIDs[idurl] = identid
for contact in id_obj.getContacts():
if contact not in _Contact2IDURL:
_Contact2IDURL[contact] = set()
# else:
# if len(_Contact2IDURL[contact]) >= 1 and idurl not in _Contact2IDURL[contact]:
# lg.warn('another user have same contact: ' + str(list(_Contact2IDURL[contact])))
_Contact2IDURL[contact].add(idurl)
if idurl not in _IDURL2Contacts:
_IDURL2Contacts[idurl] = set()
_IDURL2Contacts[idurl].add(contact)
try:
proto, host, port, fname = nameurl.UrlParse(contact)
ipport = (host, int(port))
_IPPort2IDURL[ipport] = idurl
except:
pass
# TODO: when identity contacts changed - need to remove old items from _Contact2IDURL
fire_cache_updated_callbacks(single_item=(identid, idurl, id_obj))
def idget(idurl):
"""
Get identity from cache.
"""
global _IdentityCache
idurl = strng.to_bin(idurl)
return _IdentityCache.get(idurl, None)
def idremove(idurl):
"""
Remove identity from cache, also update indexes.
Not remove local file.
"""
global _IdentityCache
global _IdentityCacheIDs
global _IdentityCacheModifiedTime
global _Contact2IDURL
global _IDURL2Contacts
global _IPPort2IDURL
idurl = strng.to_bin(idurl)
idobj = _IdentityCache.pop(idurl, None)
identid = _IdentityCacheIDs.pop(idurl, None)
_IdentityCacheModifiedTime.pop(idurl, None)
_IDURL2Contacts.pop(idurl, None)
if idobj is not None:
for contact in idobj.getContacts():
_Contact2IDURL.pop(contact, None)
try:
proto, host, port, fname = nameurl.UrlParse(contact)
ipport = (host, int(port))
_IPPort2IDURL.pop(ipport, None)
except:
pass
fire_cache_updated_callbacks(single_item=(identid, None, None))
return idobj
def idcontacts(idurl):
"""
A fast way to get identity contacts.
"""
global _IDURL2Contacts
idurl = strng.to_bin(idurl)
return list(_IDURL2Contacts.get(idurl, set()))
def get(idurl):
"""
A smart way to get identity from cache.
If not cached in memory but found on disk - will cache from disk.
"""
idurl = strng.to_bin(idurl)
if has_idurl(idurl):
return idget(idurl)
else:
try:
partfilename = nameurl.UrlFilename(idurl)
except:
lg.out(1, "identitydb.get ERROR %r is incorrect" % idurl)
return None
filename = os.path.join(settings.IdentityCacheDir(), partfilename)
if not os.path.exists(filename):
lg.out(6, "identitydb.get file %r not exist" % os.path.basename(filename))
return None
idxml = bpio.ReadTextFile(filename)
if idxml:
idobj = identity.identity(xmlsrc=idxml)
idurl_orig = idobj.getIDURL()
if idurl == idurl_orig:
idset(idurl, idobj)
return idobj
else:
lg.out(1, "identitydb.get ERROR idurl=%r idurl_orig=%r" % (idurl, idurl_orig))
return None
lg.out(6, "identitydb.get %s not found" % nameurl.GetName(idurl))
return None
def get_filename(idurl):
idurl = strng.to_bin(idurl)
try:
partfilename = nameurl.UrlFilename(idurl)
except:
lg.out(1, "identitydb.get_filename ERROR %r is incorrect" % idurl)
return None
return os.path.join(settings.IdentityCacheDir(), partfilename)
def get_idurls_by_contact(contact):
"""
Use index dictionary to get IDURL with given contact.
"""
global _Contact2IDURL
return list(_Contact2IDURL.get(contact, set()))
def get_idurl_by_ip_port(ip, port):
"""
Use index dictionary to get IDURL by IP and PORT.
"""
global _IPPort2IDURL
return _IPPort2IDURL.get((ip, int(port)), None)
def update(idurl, xml_src):
"""
This is a correct method to update an identity in the local cache.
PREPRO need to check that date or version is after old one so not
vulnerable to replay attacks.
"""
idurl = strng.to_bin(idurl)
try:
newid = identity.identity(xmlsrc=xml_src)
except:
lg.exc()
return False
if not newid.isCorrect():
lg.out(1, "identitydb.update ERROR incorrect identity : %r" % idurl)
return False
try:
if not newid.Valid():
lg.out(1, "identitydb.update ERROR identity not Valid : %r" % idurl)
return False
except:
lg.exc()
return False
filename = os.path.join(settings.IdentityCacheDir(), nameurl.UrlFilename(idurl))
if os.path.exists(filename):
oldidentityxml = bpio.ReadTextFile(filename)
oldidentity = identity.identity(xmlsrc=oldidentityxml)
if oldidentity.publickey != newid.publickey:
lg.out(1, "identitydb.update ERROR new publickey does not match old, SECURITY VIOLATION : %r" % idurl)
return False
if oldidentity.signature != newid.signature:
lg.out(6, 'identitydb.update have new data for %r' % nameurl.GetName(idurl))
else:
idset(idurl, newid)
return True
# publickeys match so we can update it
bpio.WriteTextFile(filename, xml_src)
idset(idurl, newid)
return True
def remove(idurl):
"""
Top method to remove identity from cache - also remove local file.
"""
idurl = strng.to_bin(idurl)
filename = os.path.join(settings.IdentityCacheDir(), nameurl.UrlFilename(idurl))
if os.path.isfile(filename):
lg.out(6, "identitydb.remove file %r" % filename)
try:
os.remove(filename)
except:
lg.exc()
idremove(idurl)
return True
def update_local_ips_dict(local_ips_dict):
"""
This method intended to maintain a local IP's index.
"""
global _LocalIPs
# _LocalIPs.clear()
# _LocalIPs = local_ips_dict
_LocalIPs.update(local_ips_dict)
def get_local_ip(idurl):
"""
This is to get a local IP of some user from the index.
"""
global _LocalIPs
idurl = strng.to_bin(idurl)
return _LocalIPs.get(idurl, None)
def has_local_ip(idurl):
"""
To check for some known local IP of given user.
"""
global _LocalIPs
idurl = strng.to_bin(idurl)
return idurl in _LocalIPs
def search_local_ip(ip):
"""
Search all index for given local IP and return a first found idurl.
"""
global _LocalIPs
for idurl, localip in _LocalIPs.items():
if localip == ip:
return idurl
return None
def get_last_modified_time(idurl):
"""
"""
global _IdentityCacheModifiedTime
idurl = strng.to_bin(idurl)
return _IdentityCacheModifiedTime.get(idurl, None)
#------------------------------------------------------------------------------
def print_id(idurl):
"""
For debug purposes.
"""
idurl = strng.to_bin(idurl)
if has_idurl(idurl):
idForKey = get(idurl)
lg.out(6, str(idForKey.sources))
lg.out(6, str(idForKey.contacts))
lg.out(6, str(idForKey.publickey))
lg.out(6, str(idForKey.signature))
def print_keys():
"""
For debug purposes.
"""
global _IdentityCache
for key in _IdentityCache.keys():
lg.out(6, "%d: %s" % (_IdentityCacheIDs[key], key))
def print_cache():
"""
For debug purposes.
"""
global _IdentityCache
for key in _IdentityCache.keys():
lg.out(6, "---------------------")
print_id(key)
#------------------------------------------------------------------------------
def AddCacheUpdatedCallback(cb):
global _IdentityCacheUpdatedCallbacks
_IdentityCacheUpdatedCallbacks.append(cb)
def RemoveCacheUpdatedCallback(cb):
global _IdentityCacheUpdatedCallbacks
if cb in _IdentityCacheUpdatedCallbacks:
_IdentityCacheUpdatedCallbacks.remove(cb)
def fire_cache_updated_callbacks(single_item=None):
global _IdentityCacheUpdatedCallbacks
for cb in _IdentityCacheUpdatedCallbacks:
cb(cache_ids(), cache(), single_item)
| agpl-3.0 |
asimshankar/tensorflow | tensorflow/python/data/util/convert_test.py | 22 | 6901 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with user input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import convert
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class ConvertTest(test.TestCase):
def testInteger(self):
resp = convert.optional_param_to_tensor("foo", 3)
self.assertEqual(3, self.evaluate(resp))
def testIntegerDefault(self):
resp = convert.optional_param_to_tensor("foo", None)
self.assertEqual(0, self.evaluate(resp))
def testStringDefault(self):
resp = convert.optional_param_to_tensor("bar", None, "default",
dtypes.string)
self.assertEqual(compat.as_bytes("default"), self.evaluate(resp))
def testString(self):
resp = convert.optional_param_to_tensor("bar", "value", "default",
dtypes.string)
self.assertEqual(compat.as_bytes("value"), self.evaluate(resp))
def testPartialShapeToTensorKnownDimension(self):
self.assertAllEqual([1],
self.evaluate(
convert.partial_shape_to_tensor(
tensor_shape.TensorShape([1]))))
self.assertAllEqual([1], self.evaluate(
convert.partial_shape_to_tensor((1,))))
self.assertAllEqual([1], self.evaluate(
convert.partial_shape_to_tensor([1])))
self.assertAllEqual([1],
self.evaluate(
convert.partial_shape_to_tensor(
constant_op.constant([1], dtype=dtypes.int64))))
@test_util.run_deprecated_v1
def testPartialShapeToTensorUnknownDimension(self):
self.assertAllEqual([-1],
self.evaluate(
convert.partial_shape_to_tensor(
tensor_shape.TensorShape([None]))))
self.assertAllEqual([-1],
self.evaluate(convert.partial_shape_to_tensor((None,))))
self.assertAllEqual([-1],
self.evaluate(convert.partial_shape_to_tensor([None])))
self.assertAllEqual([-1],
self.evaluate(convert.partial_shape_to_tensor([-1])))
self.assertAllEqual([-1],
self.evaluate(
convert.partial_shape_to_tensor(
constant_op.constant([-1],
dtype=dtypes.int64))))
with self.assertRaisesRegexp(
ValueError, r"The given shape .* must be a 1-D tensor of tf.int64 "
r"values, but the shape was \(2, 2\)."):
convert.partial_shape_to_tensor(constant_op.constant(
[[1, 1], [1, 1]], dtype=dtypes.int64))
with self.assertRaisesRegexp(
TypeError, r"The given shape .* must be a 1-D tensor of tf.int64 "
r"values, but the element type was float32."):
convert.partial_shape_to_tensor(constant_op.constant([1., 1.]))
def testPartialShapeToTensorMultipleDimensions(self):
self.assertAllEqual([3, 6],
self.evaluate(
convert.partial_shape_to_tensor(
tensor_shape.TensorShape([3, 6]))))
self.assertAllEqual([3, 6],
self.evaluate(convert.partial_shape_to_tensor((3, 6))))
self.assertAllEqual([3, 6],
self.evaluate(convert.partial_shape_to_tensor([3, 6])))
self.assertAllEqual([3, 6],
self.evaluate(
convert.partial_shape_to_tensor(
constant_op.constant([3, 6],
dtype=dtypes.int64))))
self.assertAllEqual([3, -1],
self.evaluate(
convert.partial_shape_to_tensor(
tensor_shape.TensorShape([3, None]))))
self.assertAllEqual([3, -1],
self.evaluate(
convert.partial_shape_to_tensor((3, None))))
self.assertAllEqual([3, -1],
self.evaluate(
convert.partial_shape_to_tensor([3, None])))
self.assertAllEqual([3, -1],
self.evaluate(
convert.partial_shape_to_tensor(
constant_op.constant([3, -1],
dtype=dtypes.int64))))
self.assertAllEqual([-1, -1],
self.evaluate(
convert.partial_shape_to_tensor(
tensor_shape.TensorShape([None, None]))))
self.assertAllEqual([-1, -1],
self.evaluate(
convert.partial_shape_to_tensor((None, None))))
self.assertAllEqual([-1, -1],
self.evaluate(
convert.partial_shape_to_tensor([None, None])))
self.assertAllEqual([-1, -1],
self.evaluate(
convert.partial_shape_to_tensor(
constant_op.constant([-1, -1],
dtype=dtypes.int64))))
def testPartialShapeToTensorScalar(self):
self.assertAllEqual([],
self.evaluate(
convert.partial_shape_to_tensor(
tensor_shape.TensorShape([]))))
self.assertAllEqual([], self.evaluate(convert.partial_shape_to_tensor(())))
self.assertAllEqual([], self.evaluate(convert.partial_shape_to_tensor([])))
self.assertAllEqual([],
self.evaluate(
convert.partial_shape_to_tensor(
constant_op.constant([], dtype=dtypes.int64))))
if __name__ == "__main__":
test.main()
| apache-2.0 |
mapycz/python-mapnik | test/python_tests/visual_test.py | 1 | 12406 | #!/usr/bin/env python3
import glob
import os
import platform
import shutil
import sys
import argparse
import mapnik
from nose.tools import eq_, raises
# mapnik.logger.set_severity(mapnik.severity_type.None)
# mapnik.logger.set_severity(mapnik.severity_type.Debug)
try:
import json
except ImportError:
import simplejson as json
visual_output_dir = "/tmp/mapnik-visual-images"
defaults = {
'status': True,
'sizes': [(500, 100)],
'scales': [1.0, 2.0],
'agg': True,
'cairo': mapnik.has_cairo(),
'ignored_renderers': []
}
cairo_threshold = 10
agg_threshold = 0
if 'Linux' == platform.uname()[0]:
# we assume if linux then you are running packaged cairo
# which is older than the 1.12.14 version we used on OS X
# to generate the expected images, so we'll rachet back the threshold
# https://github.com/mapnik/mapnik/issues/1868
cairo_threshold = 230
agg_threshold = 12
def render_cairo(m, output, scale_factor):
mapnik.render_to_file(m, output, 'ARGB32', scale_factor)
# open and re-save as png8 to save space
new_im = mapnik.Image.open(output)
new_im.save(output, 'png32')
def render_agg(m, output, scale_factor):
mapnik.render_to_file(m, output, 'png32', scale_factor),
renderers = [
{'name': 'agg',
'render': render_agg,
'compare': lambda actual, reference: compare(actual, reference, alpha=True),
'threshold': agg_threshold,
'filetype': 'png',
'dir': 'images'
},
{'name': 'cairo',
'render': render_cairo,
'compare': lambda actual, reference: compare(actual, reference, alpha=False),
'threshold': cairo_threshold,
'filetype': 'png',
'dir': 'images'
}
]
COMPUTE_THRESHOLD = 16
# testcase images are generated on OS X
# so they should exactly match
if platform.uname()[0] == 'Darwin':
COMPUTE_THRESHOLD = 2
# compare two images and return number of different pixels
def compare(actual, expected, alpha=True):
im1 = mapnik.Image.open(actual)
im2 = mapnik.Image.open(expected)
return im1.compare(im2, COMPUTE_THRESHOLD, alpha)
class Reporting:
DIFF = 1
NOT_FOUND = 2
OTHER = 3
REPLACE = 4
def __init__(self, quiet, overwrite_failures, only_errors):
self.quiet = quiet
self.passed = 0
self.failed = 0
self.overwrite_failures = overwrite_failures
self.only_errors = only_errors
self.errors = [ # (type, actual, expected, diff, message)
]
def result_fail(self, actual, expected, diff):
self.failed += 1
if self.quiet:
if platform.uname()[0] == 'Windows':
sys.stderr.write('.')
else:
sys.stderr.write('\x1b[31m.\x1b[0m')
else:
print(
'\x1b[31m✘\x1b[0m (\x1b[34m%u different pixels\x1b[0m)' %
diff)
if self.overwrite_failures:
self.errors.append((self.REPLACE, actual, expected, diff, None))
with open(actual, 'rb') as f:
contents = f.read()
with open(expected, 'wb') as f:
f.write(contents)
else:
self.errors.append((self.DIFF, actual, expected, diff, None))
def result_pass(self, actual, expected, diff):
self.passed += 1
if self.quiet:
if platform.uname()[0] == 'Windows':
sys.stderr.write('.')
else:
sys.stderr.write('\x1b[32m.\x1b[0m')
else:
if platform.uname()[0] == 'Windows':
print('\x1b[32m✓\x1b[0m')
else:
print('✓')
def not_found(self, actual, expected):
self.failed += 1
self.errors.append((self.NOT_FOUND, actual, expected, 0, None))
if self.quiet:
sys.stderr.write('\x1b[33m.\x1b[0m')
else:
print(
'\x1b[33m?\x1b[0m (\x1b[34mReference file not found, creating\x1b[0m)')
with open(actual, 'rb') as f:
contents = f.read()
with open(expected, 'wb') as f:
f.write(contents)
def other_error(self, expected, message):
self.failed += 1
self.errors.append((self.OTHER, None, expected, 0, message))
if self.quiet:
sys.stderr.write('\x1b[31m.\x1b[0m')
else:
print('\x1b[31m✘\x1b[0m (\x1b[34m%s\x1b[0m)' % message)
def make_html_item(self, actual, expected, diff):
item = '''
<div class="expected">
<a href="%s">
<img src="%s" width="100%s">
</a>
</div>
''' % (expected, expected, '%')
item += '<div class="text">%s</div>' % (diff)
item += '''
<div class="actual">
<a href="%s">
<img src="%s" width="100%s">
</a>
</div>
''' % (actual, actual, '%')
return item
def summary(self, data_dir):
if len(self.errors) == 0:
print(
'\nAll %s visual tests passed: \x1b[1;32m✓ \x1b[0m' %
self.passed)
return 0
sortable_errors = []
error_count = 0
print("\nVisual rendering: %s failed / %s passed" %
(len(self.errors), self.passed))
for idx, error in enumerate(self.errors):
if error[0] == self.OTHER:
error_count = error_count + 1
print(str(idx +
1) +
") \x1b[31mfailure to run test:\x1b[0m %s (\x1b[34m%s\x1b[0m)" %
(error[2], error[4]))
elif self.only_errors:
continue
elif error[0] == self.NOT_FOUND:
error_count = error_count + 1
print(str(idx + 1) + ") Generating reference image: '%s'" %
error[2])
continue
elif error[0] == self.DIFF:
error_count = error_count + 1
print(
str(
idx +
1) +
") \x1b[34m%s different pixels\x1b[0m:\n\t%s (\x1b[31mactual\x1b[0m)\n\t%s (\x1b[32mexpected\x1b[0m)" %
(error[3],
error[1],
error[2]))
sortable_errors.append((error[3], error))
elif error[0] == self.REPLACE:
print(str(idx +
1) +
") \x1b[31mreplaced reference with new version:\x1b[0m %s" %
error[2])
if len(sortable_errors):
# drop failure results in folder
vdir = os.path.join(visual_output_dir, 'visual-test-results')
if not os.path.exists(vdir):
os.makedirs(vdir)
with open(os.path.join(data_dir, 'index.html'), 'r') as f:
html_template = f.read()
name = 'index.html'
failures_realpath = os.path.join(vdir, name)
with open(failures_realpath, 'w+') as html_out:
sortable_errors.sort(reverse=True)
html_body = ''
for item in sortable_errors:
# copy images into single directory
actual = item[1][1]
expected = item[1][2]
diff = item[0]
actual_new = os.path.join(vdir, os.path.basename(actual))
shutil.copy(actual, actual_new)
expected_new = os.path.join(vdir, os.path.basename(expected))
shutil.copy(expected, expected_new)
html_body += self.make_html_item(
os.path.relpath(
actual_new, vdir), os.path.relpath(
expected_new, vdir), diff)
html_out.write(html_template.replace('{{RESULTS}}', html_body))
print('View failures by opening %s' % failures_realpath)
return error_count
def render(data_dir, filename, config, scale_factor, reporting):
m = mapnik.Map(*config['sizes'][0])
try:
mapnik.load_map(m, os.path.join(data_dir, "styles", filename), True)
if not (m.parameters['status'] if (
'status' in m.parameters) else config['status']):
return
except Exception as e:
if 'Could not create datasource' in str(e) \
or 'Bad connection' in str(e) \
or 'Postgis Plugin: could not connect to server' in str(e):
return m
reporting.other_error(filename, repr(e))
return m
sizes = config['sizes']
if 'sizes' in m.parameters:
sizes = [[int(i) for i in size.split(',')]
for size in m.parameters['sizes'].split(';')]
ignored_renderers = config['ignored_renderers']
if 'ignored_renderers' in m.parameters:
ignored_renderers = m.parameters['ignored_renderers'].split(',')
for size in sizes:
m.width, m.height = size
if 'bbox' in m.parameters:
bbox = mapnik.Box2d.from_string(str(m.parameters['bbox']))
m.zoom_to_box(bbox)
else:
m.zoom_all()
name = filename[0:-4]
postfix = "%s-%d-%d-%s" % (name, m.width, m.height, scale_factor)
for renderer in renderers:
if renderer['name'] in ignored_renderers:
continue
if config.get(renderer['name'], True):
expected = os.path.join(data_dir, renderer['dir'], '%s-%s-reference.%s' %
(postfix, renderer['name'], renderer['filetype']))
actual = os.path.join(visual_output_dir, '%s-%s.%s' %
(postfix, renderer['name'], renderer['filetype']))
if not reporting.quiet:
print("\"%s\" with %s..." % (postfix, renderer['name']))
try:
renderer['render'](m, actual, scale_factor)
if not os.path.exists(expected):
reporting.not_found(actual, expected)
else:
diff = renderer['compare'](actual, expected)
if diff > renderer['threshold']:
reporting.result_fail(actual, expected, diff)
else:
reporting.result_pass(actual, expected, diff)
except Exception as e:
reporting.other_error(expected, repr(e))
return m
def run(data_dir, styles, quiet, only_errors, overwrite_failures):
files = None
if styles:
files = [name + ".xml" for name in styles]
else:
files = [
os.path.basename(file) for file in glob.glob(
os.path.join(
data_dir,
"styles/*.xml"))]
if not os.path.exists(visual_output_dir):
os.makedirs(visual_output_dir)
reporting = Reporting(quiet, overwrite_failures, only_errors)
try:
for filename in files:
config = dict(defaults)
for scale_factor in config['scales']:
m = render(data_dir, filename, config, scale_factor, reporting)
except KeyboardInterrupt:
pass
return reporting.summary(data_dir)
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
from .utilities import execution_path
os.chdir(execution_path('.'))
# Include visual tests to the suite
def test_visual():
if os.environ.get("MAPNIK_VISUAL_TESTS", "true") != "true":
return
setup()
error_count = run('../data-visual', [], True, True, False)
eq_(error_count, 0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Visual test runner')
parser.add_argument('data_dir')
parser.add_argument('-q', action='store_true')
parser.add_argument('--only-errors', action='store_true', help='Report only errors, ignore diffs')
parser.add_argument('--overwrite', action='store_true')
parser.add_argument('styles', nargs='*')
args = parser.parse_args()
error_count = run(args.data_dir, args.styles, args.q, args.only_errors, args.overwrite)
sys.exit(error_count)
| lgpl-2.1 |
fast90/youtube-dl | youtube_dl/extractor/rtl2.py | 20 | 2993 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class RTL2IE(InfoExtractor):
_VALID_URL = r'http?://(?:www\.)?rtl2\.de/[^?#]*?/(?P<id>[^?#/]*?)(?:$|/(?:$|[?#]))'
_TESTS = [{
'url': 'http://www.rtl2.de/sendung/grip-das-motormagazin/folge/folge-203-0',
'info_dict': {
'id': 'folge-203-0',
'ext': 'f4v',
'title': 'GRIP sucht den Sommerkönig',
'description': 'Matthias, Det und Helge treten gegeneinander an.'
},
'params': {
# rtmp download
'skip_download': True,
},
}, {
'url': 'http://www.rtl2.de/sendung/koeln-50667/video/5512-anna/21040-anna-erwischt-alex/',
'info_dict': {
'id': '21040-anna-erwischt-alex',
'ext': 'mp4',
'title': 'Anna erwischt Alex!',
'description': 'Anna ist Alex\' Tochter bei Köln 50667.'
},
'params': {
# rtmp download
'skip_download': True,
},
}]
def _real_extract(self, url):
# Some rtl2 urls have no slash at the end, so append it.
if not url.endswith('/'):
url += '/'
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
mobj = re.search(
r'<div[^>]+data-collection="(?P<vico_id>\d+)"[^>]+data-video="(?P<vivi_id>\d+)"',
webpage)
if mobj:
vico_id = mobj.group('vico_id')
vivi_id = mobj.group('vivi_id')
else:
vico_id = self._html_search_regex(
r'vico_id\s*:\s*([0-9]+)', webpage, 'vico_id')
vivi_id = self._html_search_regex(
r'vivi_id\s*:\s*([0-9]+)', webpage, 'vivi_id')
info_url = 'http://www.rtl2.de/video/php/get_video.php?vico_id=' + vico_id + '&vivi_id=' + vivi_id
info = self._download_json(info_url, video_id)
video_info = info['video']
title = video_info['titel']
description = video_info.get('beschreibung')
thumbnail = video_info.get('image')
download_url = video_info['streamurl']
download_url = download_url.replace('\\', '')
stream_url = 'mp4:' + self._html_search_regex(r'ondemand/(.*)', download_url, 'stream URL')
rtmp_conn = ['S:connect', 'O:1', 'NS:pageUrl:' + url, 'NB:fpad:0', 'NN:videoFunction:1', 'O:0']
formats = [{
'url': download_url,
'play_path': stream_url,
'player_url': 'http://www.rtl2.de/flashplayer/vipo_player.swf',
'page_url': url,
'flash_version': 'LNX 11,2,202,429',
'rtmp_conn': rtmp_conn,
'no_resume': True,
}]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'description': description,
'formats': formats,
}
| unlicense |
sigma-random/avmplus | utils/hooks/tamarin-commit-hook.py | 8 | 15863 | #! /usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Hook script used by tamarin team on tamarin-redux and tamarin-central.
#
# For documentation on hook scripts see:
# http://hgbook.red-bean.com/read/handling-repository-events-with-hooks.html
# http://mercurial.selenic.com/wiki/MercurialApi
# This file is to be run using a pretxncommit hook
# (and, at your option, a preoutgoing hook).
# Place this in your .hg/hgrc file in the repo:
#
# [hooks]
# pretxncommit.master = python:/path/to/tamarin-commit-hook.py:master_hook
# preoutgoing.checklog = python:/path/to/tamarin-commit-hook.py:preoutgoing_hook
#
# ====
#
# OVERVIEW
#
# These hooks check that the changesets you commit and push adhere to
# some fairly loose formatting and information-content rules.
#
# Ground rules:
# - Neither hook prevents the commit or push of any changeset (as long
# as you respond affirmatively when prompted for confirmation).
# - Neither hook should significantly impede your personal workflow.
# Mercurial Queues and other extensions should work without
# introducing a new prompt in the common case*.
#
# Counter-examples to either of these two ground rules should be filed as
# bug against the hooks. (The objective is to reduce pollution in the
# central repository, not impose a strait-jacket on your development
# style.)
#
#
# THE PRETXNCOMMIT HOOK
#
# The pretxncommit hook, named master_hook below, fires whenever the
# user runs a transaction to create a changeset in the working
# repository via a command such as 'hg commit' or 'hg import <patch>'.
# The master_hook checks that all changesets introduced by the
# transaction satisfy two rules:
#
# - Source code changes (for a standard set of languages including
# C++, AS3, and Python) should have clean whitespace**; this means
# they contain no new occurrences of: (1.) tab characters, (2.)
# Microsoft Windows line-endings ('\r'), or (3.) trailing whitespace
# on a line.
#
# - Source code changes should not contain the security change marker
# used to demarcate code that is not meant for release to public
# repositories.
#
# If any of the above checks fail, the user is prompted with a
# description of the failing checks and a request to confirm that they
# still want the transaction (i.e. commit or import) to proceed.
#
#
# THE PREOUTGOING HOOK
#
# The preoutgoing_hook fires whenever the user transfers changesets
# between repositories, via a command such a 'hg push' or 'hg pull'.
# The preoutgoing_hook performs a heuristic*** scan of a changeset to
# check:
# - the changeset's log message has a Bugzilla ticket number,
# - the associated Bugzilla ticket is not tagged as a security bug,
# - the changeset's log message has a parenthesized list of reviewers, and
# - the changeset's user has a validly formatted email address.
#
# If any of the above checks fail, the user is prompted with a
# description of the failing checks and a request to confirm that they
# still want the transfer (i.e. push or pull) to proceed. One can
# obviously still push to security bugs (or push without reviews,
# without an associated ticket, etc); this is just meant as an extra
# hurdle providing a moment for the user to ask reflectively "Am I
# pushing to the right repository? Am I doing the right thing?"
#
#
# FOOTNOTES
#
# * (Yes, "in the common case" is deliberate weasel wording, as is
# "significantly impede". For example, the hooks may prompt
# occasionally, e.g. when pushing between your own private
# repositories.)
#
# ** See utils/fixtabs for a cleap-up utility if you feel like fixing
# a whole file rather than remove just your own violations of the
# whitespace rules.
#
# *** The preoutgoing_hook's scan is considered heuristic because it
# only scans the tip changeset. It restricts its attention to the
# tip (and not to other changesets that may be included in the push
# or pull) for technical reasons documented on Bugzilla 630416 and
# in the source below. Scanning the tip alone is a sufficient
# compromise because it captures the common case where the tip
# changeset is the sole change being pushed to our central
# repository.
import sys, re, os
from mercurial import hg, ui, commands, patch
from mercurial.node import hex, short
from HTMLParser import HTMLParser
from urllib2 import urlopen
try:
# Mercurial 1.9
from mercurial import scmutil
matchfiles = scmutil.matchfiles
except ImportError:
from mercurial import cmdutil
matchfiles = cmdutil.matchfiles
class BugType:
NORMAL = 1
SECURITY = 2
INVALID = 3
class TitleParser(HTMLParser):
'''Very simple parser to extract the title from an HTML page'''
in_title = False
title = ''
def handle_starttag(self, tag, attrs):
if tag == 'title':
self.in_title = True
def handle_data(self, data):
if self.in_title:
self.title = data
self.in_title = False
def master_hook(ui, repo, **kwargs):
ui.debug('running tamarin master_hook\n')
ui.debug('kwargs: %s\n' % kwargs)
# The mercurial hook script expects the equivalent of an exit code back from
# this call:
# False = 0 = No Error : allow push
# True = 1 = Error : abort push
error = False
error = error or diff_check(ui, repo, **kwargs)
if error:
# Save the commit message so it can be reused by user
desc = repo[repo[kwargs['node']].rev()].description()
ui.debug('Description: %s\n' % desc)
try:
f = open('%s/.hg/commit.save' % repo.root, 'w')
f.write(desc)
f.close()
ui.warn('Commit message saved to .hg/commit.save\nSaved message can be recommitted using -l .hg/commit.save\n')
except IOError:
ui.warn('Error writing .hg/commit.save file')
return error
def preoutgoing_hook(ui, repo, **kwargs):
ui.debug('running tamarin preoutgoing_hook\n')
ui.debug('kwargs: %s\n' % kwargs)
operation = kwargs['source']
# Like master_hook, return code False implies No Error, allow push.
error = False
error = error or heuristic_log_check(ui, repo, operation, **kwargs)
return error
def heuristic_log_check(ui, repo, operation, **kwargs):
# Bug 630416: Unlike master_hook, the hg preoutgoing hook (as of
# Mercurial version 1.7) has very little to work with: no
# reference to targeted repo, no description of changesets being
# gathered to propagate, etc.
#
# We just want to catch log entry oversights before pushing to
# other repositories. As a heuristic, assume tip changeset is the
# (only) revision being pushed; heuristic can misfire, but should
# catch the common cases (a more formal guard would belong
# server-side anyway).
#
# If future Mercurial versions address this problem with
# preoutgoing, then could drop heuristic and apply description
# check across all outgoing changesets; then we should print all
# warnings in one pass and prompt for confirmation at most once.
# Don't do format-check on hg-strip
if (operation in ["strip"]):
return False
tip_id = repo.changelog.tip()
tip_changeset = repo[tip_id]
# See master_hook for interpretation of error flag.
error = check_desc_for_bugnum_and_reviews(ui, tip_changeset, operation)
error = error or check_user_for_valid_email(ui, tip_changeset, operation)
return error
def prompt_yesno(ui, operation):
return ui.promptchoice(('Continue %s (n)o, (y)es? [n]' % operation),
(('&No'),('&Yes')), 0)
def bugzilla_reference(line):
# Match bug number of >= 6 digits and prefixed by "Bug", "For", etc
try:
bug_number = re.match(r'.*(Bug|For|Fix)\s*([0-9]{6,})',
line, re.IGNORECASE).group(2)
except AttributeError:
return None
return bug_number
def has_reviewer_notes(line):
# Match "r=<name>" or "r+<name>"; assumes names are alphanumeric.
return re.match(r'.*r(=|\+)[a-zA-Z0-9]+', line)
def check_bug_type(bug):
p = TitleParser()
u = urlopen('https://bugzilla.mozilla.org/show_bug.cgi?id=%s' % bug)
p.feed(u.read().decode(u.info().getparam('charset')))
p.close()
if p.title == 'Access Denied':
return BugType.SECURITY
elif p.title == 'Invalid Bug ID':
return BugType.INVALID
return BugType.NORMAL
def has_email_in_brackets(user):
return re.match(r'.*<.+>', user)
def has_email_with_domain(user):
return re.match(r'.*<.+@.+>', user)
def check_user_for_valid_email(ui, changeset, operation):
user = changeset.user()
ui.debug('\ncheck_user_for_valid_email: %s' % user)
has_email = has_email_in_brackets(user)
if not has_email:
ui.warn('\nUser missing email address for changeset %s: \n %s\n'
% (changeset, user))
response = prompt_yesno(ui, operation)
if response == 0:
ui.warn('Aborting %s due to user missing email.\n' % operation)
return True;
else:
has_domain = has_email_with_domain(user)
if not has_domain:
ui.warn('\nUser email missing domain for changeset %s: \n %s\n'
% (changeset, user))
response = prompt_yesno(ui, operation)
if response == 0:
ui.warn('Aborting %s due to email without domain.\n' % operation)
return True;
return False;
def check_desc_for_bugnum_and_reviews(ui, changeset, operation):
# Check first line of log of tip changeset; if it appears questionable,
# prompt the user to confirm that they want to continue the operation.
desc = changeset.description()
lines = desc.split('\n')
first_line = lines[0]
bug_num = bugzilla_reference(first_line)
has_review = has_reviewer_notes(first_line)
if not bug_num or not has_review:
ui.warn('\nQuestionable log for changeset %s:\n %s\n'
% (changeset,first_line))
if not bug_num:
ui.warn('Missing bug number, e.g. "Bug NNNNNN: ..."\n')
response = prompt_yesno(ui, operation)
if response == 0:
ui.warn('Aborting %s due to missing bug number.\n' % operation)
return True
else:
bug_type = check_bug_type(bug_num)
if bug_type == BugType.SECURITY:
ui.warn('Bug %s is a security bug.' % bug_num)
response = prompt_yesno(ui, operation)
if response == 0:
ui.warn('Aborting %s due to security bug.\n' % operation)
return True
elif bug_type == BugType.INVALID:
ui.warn('Bug %s is a not defined in bugzilla.' % bug_num)
response = prompt_yesno(ui, operation)
if response == 0:
ui.warn('Aborting %s due to invalid bug number.\n' % operation)
return True
if not has_review:
ui.warn('Missing review notes, e.g. "... (r=<name>,sr=<name>)"\n')
response = prompt_yesno(ui, operation)
if response == 0:
ui.warn('Aborting %s due to missing review notes.\n' % operation)
return True
return False;
def diff_check(ui, repo, **kwargs):
ui.debug('running diff_check\n')
# get all the change contexts for this commit
# kwargs['node'] returns the first changecontext nodeid
changecontexts = [repo[i] for i in range(repo[kwargs['node']].rev(), len(repo))]
# check for tabs
def tabCheck(line):
tab = line.find('\t')
if tab >= 0: # find returns -1 if not found
return True, tab
return False, tab
def windowsLineendingsCheck(line):
if line.endswith('\r'):
return True, len(line)-1
return False, 0
def trailingWhitespaceCheck(line):
if len(line.strip()) > 1: # skip empty lines (will have a +) see bug 600536
m = re.match(r'\+.*?(\s+$)', line)
if m:
return True, m.start(1)
return False, 0
def securityCheck(line):
loc = line.find('MARK_SECURITY_CHANGE')
if loc != -1:
# found security change ifdef
return True, loc
loc = line.find('SECURITYFIX_')
if loc != -1:
# found security change ifdef
return True, loc
return False, 0
# check for tabs - exit if user chooses to abort
if checkChangeCtxDiff(ui, repo, changecontexts, tabCheck,
'Tab', ('.cpp', '.c', '.h', '.as', '.abs', '.py')):
return True
if checkChangeCtxDiff(ui, repo, changecontexts, windowsLineendingsCheck,
'Windows line ending', ('.cpp', '.c', '.h', '.as', '.abs', '.py')):
return True
if checkChangeCtxDiff(ui, repo, changecontexts, trailingWhitespaceCheck,
'Trailing Whitespace', ('.cpp', '.c', '.h', '.as', '.abs', '.py')):
return True
if checkChangeCtxDiff(ui, repo, changecontexts, securityCheck,
'Security Check', ('.cpp', '.c', '.h', '.as', '.abs', '.py')):
return True
return False
def checkChangeCtxDiff(ui, repo, changecontexts, testFunc, testDesc, fileEndings):
'''Loop through each diff for each change and run the testFunc against each line'''
ui.debug('Checking %s\n' % testDesc)
for ctx in changecontexts:
# Get the diff for each change and file
for file in [f for f in ctx.files() if f.endswith(fileEndings)]:
ui.debug('checking change: %s, file: %s\n' % (short(ctx.node()), file))
fmatch = matchfiles(repo,[file])
# diff from this nodes parent to current node
diff = ''.join(patch.diff(repo, ctx.parents()[0].node(), ctx.node(), fmatch)).split('\n')
for i in range(3, len(diff)): # start checking after diff header
line = diff[i]
if line.startswith('@@'):
diffLocation = line
# only check new lines added/modified in the file
if line.startswith('+'):
ui.debug('\nchecking line for %s: %s\n\n' % (testDesc, line))
testResult, errorLocation = testFunc(line)
if testResult:
ui.warn('\n%s(s) found in %s for rev %s (change %s):\n' %
(testDesc, file, ctx.rev(), short(ctx.node())))
ui.warn('%s\n' % diffLocation)
ui.warn('%s\n' % line)
ui.warn('%s^\n' % (' '*errorLocation,)) # show a pointer to error
try:
response = ui.promptchoice('(n)o, (y)es, (a)llow %ss for current file\n' % testDesc +
'Are you sure you want to commit this change? [n]: ' ,
(('&No'), ('&Yes'), ('&Allow')), 0)
except AttributeError:
ui.warn('This commit hook requires that you have mercurial 1.4+ installed. Please upgrade your hg installation.')
response = 0
if response == 1:
# next occurance in file
continue
elif response == 2:
# next file
break
else:
ui.warn('Aborting commit due to %s.\n' % testDesc)
# error = True
return True
return False
| mpl-2.0 |
wger-project/wger | wger/manager/tests/test_workout_canonical.py | 1 | 10117 | # This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Django
from django.core.cache import cache
# wger
from wger.core.models import DaysOfWeek
from wger.core.tests.base_testcase import WgerTestCase
from wger.exercises.models import (
Exercise,
Muscle,
)
from wger.manager.models import (
Day,
Set,
Setting,
Workout,
)
from wger.utils.cache import cache_mapper
class WorkoutCanonicalFormTestCase(WgerTestCase):
"""
Tests the canonical form for a workout
"""
maxDiff = None
def test_canonical_form(self):
"""
Tests the canonical form for a workout
"""
workout = Workout.objects.get(pk=1)
setting_1 = Setting.objects.get(pk=1)
setting_2 = Setting.objects.get(pk=2)
muscle1 = Muscle.objects.get(pk=1)
muscle2 = Muscle.objects.get(pk=2)
setting1 = Setting.objects.get(pk=1)
setting2 = Setting.objects.get(pk=2)
image1 = '/media/exercise-images/1/protestschwein.jpg'
image2 = '/media/exercise-images/1/wildschwein.jpg'
self.assertEqual(
workout.canonical_representation['muscles'], {
'back': [muscle2],
'frontsecondary': [muscle1],
'backsecondary': [muscle1],
'front': [muscle1]
}
)
self.assertEqual(workout.canonical_representation['obj'], workout)
canonical_form = {
'days_of_week': {
'day_list': [DaysOfWeek.objects.get(pk=2)],
'text': 'Tuesday'
},
'muscles': {
'back': [muscle2],
'frontsecondary': [],
'backsecondary': [],
'front': [muscle1]
},
'obj':
Day.objects.get(pk=1),
'set_list': [
{
'exercise_list': [
{
'obj':
Exercise.objects.get(pk=1),
'image_list': [
{
'image': image1,
'is_main': True
}, {
'image': image2,
'is_main': False
}
],
'comment_list': ['test 123'],
'has_weight':
False,
'setting_obj_list': [setting_1],
'setting_text':
'2 \xd7 8 (3 RiR)',
}
],
'is_superset':
False,
'muscles': {
'back': [muscle2],
'frontsecondary': [],
'backsecondary': [],
'front': [muscle1]
},
'obj':
Set.objects.get(pk=1),
'settings_computed': [setting1] * 2
}
]
}
days_test_data = workout.canonical_representation['day_list'][0]
self.assertEqual(days_test_data['days_of_week'], canonical_form['days_of_week'])
self.assertEqual(days_test_data['muscles'], canonical_form['muscles'])
self.assertEqual(days_test_data['obj'], canonical_form['obj'])
# Check that the content is the same
for key in days_test_data['set_list'][0].keys():
self.assertEqual(days_test_data['set_list'][0][key], canonical_form['set_list'][0][key])
canonical_form = {
'days_of_week': {
'day_list': [DaysOfWeek.objects.get(pk=4)],
'text': 'Thursday'
},
'obj':
Day.objects.get(pk=2),
'muscles': {
'back': [muscle2],
'frontsecondary': [muscle1],
'backsecondary': [muscle1],
'front': []
},
'set_list': [
{
'exercise_list': [
{
'obj': Exercise.objects.get(pk=2),
'image_list': [{
'image': image2,
'is_main': False
}],
'comment_list': ['Foobar'],
'has_weight': True,
'setting_obj_list': [setting_2],
'setting_text': '4 \xd7 10 (15 kg)',
}
],
'is_superset':
False,
'muscles': {
'back': [muscle2],
'frontsecondary': [muscle1],
'backsecondary': [muscle1],
'front': []
},
'obj':
Set.objects.get(pk=2),
'settings_computed': [setting2] * 4
}
]
}
days_test_data = workout.canonical_representation['day_list'][1]
self.assertEqual(days_test_data['days_of_week'], canonical_form['days_of_week'])
self.assertEqual(days_test_data['muscles'], canonical_form['muscles'])
self.assertEqual(days_test_data['obj'], canonical_form['obj'])
for key in days_test_data['set_list'][0].keys():
self.assertEqual(days_test_data['set_list'][0][key], canonical_form['set_list'][0][key])
# Check that the content is the same
canonical_form = {
'days_of_week': {
'day_list': [DaysOfWeek.objects.get(pk=5)],
'text': 'Friday'
},
'obj': Day.objects.get(pk=4),
'muscles': {
'back': [],
'front': [],
'frontsecondary': [],
'backsecondary': []
},
'set_list': [],
}
self.assertEqual(workout.canonical_representation['day_list'][2], canonical_form)
def test_canonical_form_day(self):
"""
Tests the canonical form for a day
"""
day = Day.objects.get(pk=5)
weekday1 = DaysOfWeek.objects.get(pk=3)
weekday2 = DaysOfWeek.objects.get(pk=5)
muscle1 = Muscle.objects.get(pk=1)
muscle2 = Muscle.objects.get(pk=2)
setting = Setting.objects.get(pk=3)
image2 = '/media/exercise-images/1/wildschwein.jpg'
self.assertEqual(
day.canonical_representation['days_of_week'], {
'day_list': [weekday1, weekday2],
'text': 'Wednesday, Friday'
}
)
self.assertEqual(
day.canonical_representation['muscles'], {
'back': [muscle2],
'frontsecondary': [muscle1],
'backsecondary': [muscle1],
'front': []
}
)
self.assertEqual(day.canonical_representation['obj'], day)
canonical_form = [
{
'exercise_list': [
{
'obj': Exercise.objects.get(pk=2),
'image_list': [{
'image': image2,
'is_main': False
}],
'comment_list': ['Foobar'],
'has_weight': False,
'setting_obj_list': [Setting.objects.get(pk=3)],
'setting_text': '4 \xd7 10',
}
],
'is_superset':
False,
'muscles': {
'back': [muscle2],
'frontsecondary': [muscle1],
'backsecondary': [muscle1],
'front': []
},
'obj':
Set.objects.get(pk=3),
'settings_computed': [setting] * 4
}
]
self.assertEqual(day.canonical_representation['set_list'], canonical_form)
class WorkoutCacheTestCase(WgerTestCase):
"""
Test case for the workout canonical representation
"""
def test_canonical_form_cache(self):
"""
Tests that the workout cache of the canonical form is correctly generated
"""
self.assertFalse(cache.get(cache_mapper.get_workout_canonical(1)))
workout = Workout.objects.get(pk=1)
workout.canonical_representation
self.assertTrue(cache.get(cache_mapper.get_workout_canonical(1)))
def test_canonical_form_cache_save(self):
"""
Tests the workout cache when saving
"""
workout = Workout.objects.get(pk=1)
workout.canonical_representation
self.assertTrue(cache.get(cache_mapper.get_workout_canonical(1)))
workout.save()
self.assertFalse(cache.get(cache_mapper.get_workout_canonical(1)))
def test_canonical_form_cache_delete(self):
"""
Tests the workout cache when deleting
"""
workout = Workout.objects.get(pk=1)
workout.canonical_representation
self.assertTrue(cache.get(cache_mapper.get_workout_canonical(1)))
workout.delete()
self.assertFalse(cache.get(cache_mapper.get_workout_canonical(1)))
| agpl-3.0 |
csrocha/OpenUpgrade | addons/point_of_sale/report/pos_users_product.py | 380 | 3336 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class pos_user_product(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(pos_user_product, self).__init__(cr, uid, name, context)
self.localcontext.update({
'time': time,
'get_data':self._get_data,
'get_user':self._get_user,
'get_total':self._get_total,
})
def _get_data(self, o):
self.total = 0.0
data={}
sql1=""" SELECT distinct(o.id) from account_bank_statement s, account_bank_statement_line l,pos_order o,pos_order_line i where i.order_id=o.id and o.state='paid' and l.statement_id=s.id and l.pos_statement_id=o.id and s.id=%d"""%(o.id)
self.cr.execute(sql1)
data = self.cr.dictfetchall()
a_l=[]
for r in data:
a_l.append(r['id'])
if len(a_l):
sql2="""SELECT sum(qty) as qty,l.price_unit*sum(l.qty) as amt,t.name as name, p.default_code as code, pu.name as uom from product_product p, product_template t,product_uom pu,pos_order_line l where order_id = %d and p.product_tmpl_id=t.id and l.product_id=p.id and pu.id=t.uom_id group by t.name,p.default_code,pu.name,l.price_unit"""%(o.id)
self.cr.execute(sql2)
data = self.cr.dictfetchall()
for d in data:
self.total += d['amt']
return data
def _get_user(self, object):
names = []
users_obj = self.pool['res.users']
for o in object:
sql = """select ru.id from account_bank_statement as abs,res_users ru
where abs.user_id = ru.id
and abs.id = %d"""%(o.id)
self.cr.execute(sql)
data = self.cr.fetchone()
if data:
user = users_obj.browse(self.cr, self.uid, data[0])
names.append(user.partner_id.name)
return list(set(names))
def _get_total(self, o):
return self.total
class report_pos_user_product(osv.AbstractModel):
_name = 'report.point_of_sale.report_usersproduct'
_inherit = 'report.abstract_report'
_template = 'point_of_sale.report_usersproduct'
_wrapped_report_class = pos_user_product
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
TaskEvolution/Task-Coach-Evolution | taskcoach/taskcoachlib/thirdparty/pubsub/core/weakmethod.py | 1 | 4104 | '''
This module provides a basic "weak method" implementation. It is necessary
because the weakref module does not support weak methods (in the sense that,
counter-intuitively, a user who creates a weakref.ref(obj.method), a reasonable
action, get a weak ref that is None.
:copyright: Copyright since 2006 by Oliver Schoenborn, all rights reserved.
:license: BSD, see LICENSE.txt for details.
'''
# for function and method parameter counting:
from inspect import ismethod
# for weakly bound methods:
from new import instancemethod as InstanceMethod
from weakref import ref as WeakRef
class WeakMethod:
"""Represent a weak bound method, i.e. a method which doesn't keep alive the
object that it is bound to. It uses WeakRef which, used on its own,
produces weak methods that are dead on creation, not very useful.
Typically, you will use the getWeakRef() module function instead of using
this class directly. """
def __init__(self, method, notifyDead = None):
"""The method must be bound. notifyDead will be called when
object that method is bound to dies. """
assert ismethod(method)
if method.im_self is None:
raise ValueError('Unbound methods cannot be weak-referenced.')
self.notifyDead = None
if notifyDead is None:
self.objRef = WeakRef(method.im_self)
else:
self.notifyDead = notifyDead
self.objRef = WeakRef(method.im_self, self.__onNotifyDeadObj)
self.fun = method.im_func
self.cls = method.im_class
def __onNotifyDeadObj(self, ref):
if self.notifyDead:
try:
self.notifyDead(self)
except Exception:
import traceback
traceback.print_exc()
def __call__(self):
"""Returns a new.instancemethod if object for method still alive.
Otherwise return None. Note that instancemethod causes a
strong reference to object to be created, so shouldn't save
the return value of this call. Note also that this __call__
is required only for compatibility with WeakRef.ref(), otherwise
there would be more efficient ways of providing this functionality."""
if self.objRef() is None:
return None
else:
return InstanceMethod(self.fun, self.objRef(), self.cls)
def __eq__(self, method2):
"""Two WeakMethod objects compare equal if they refer to the same method
of the same instance. Thanks to Josiah Carlson for patch and clarifications
on how dict uses eq/cmp and hashing. """
if not isinstance(method2, WeakMethod):
return False
return ( self.fun is method2.fun
and self.objRef() is method2.objRef()
and self.objRef() is not None )
def __hash__(self):
"""Hash is an optimization for dict searches, it need not
return different numbers for every different object. Some objects
are not hashable (eg objects of classes derived from dict) so no
hash(objRef()) in there, and hash(self.cls) would only be useful
in the rare case where instance method was rebound. """
return hash(self.fun)
def __repr__(self):
dead = ''
if self.objRef() is None:
dead = '; DEAD'
obj = '<%s at %s%s>' % (self.__class__, id(self), dead)
return obj
def refs(self, weakRef):
"""Return true if we are storing same object referred to by weakRef."""
return self.objRef == weakRef
def getWeakRef(obj, notifyDead=None):
"""Get a weak reference to obj. If obj is a bound method, a WeakMethod
object, that behaves like a WeakRef, is returned; if it is
anything else a WeakRef is returned. If obj is an unbound method,
a ValueError will be raised."""
if ismethod(obj):
createRef = WeakMethod
else:
createRef = WeakRef
return createRef(obj, notifyDead)
| gpl-3.0 |
hydrospanner/DForurm | DForurm/env/Lib/site-packages/django/contrib/gis/db/backends/oracle/introspection.py | 44 | 1975 | import sys
import cx_Oracle
from django.db.backends.oracle.introspection import DatabaseIntrospection
from django.utils import six
class OracleIntrospection(DatabaseIntrospection):
# Associating any OBJECTVAR instances with GeometryField. Of course,
# this won't work right on Oracle objects that aren't MDSYS.SDO_GEOMETRY,
# but it is the only object type supported within Django anyways.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[cx_Oracle.OBJECT] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying USER_SDO_GEOM_METADATA to get the SRID and dimension information.
try:
cursor.execute(
'SELECT "DIMINFO", "SRID" FROM "USER_SDO_GEOM_METADATA" '
'WHERE "TABLE_NAME"=%s AND "COLUMN_NAME"=%s',
(table_name.upper(), geo_col.upper())
)
row = cursor.fetchone()
except Exception as msg:
new_msg = (
'Could not find entry in USER_SDO_GEOM_METADATA '
'corresponding to "%s"."%s"\n'
'Error message: %s.') % (table_name, geo_col, msg)
six.reraise(Exception, Exception(new_msg), sys.exc_info()[2])
# TODO: Research way to find a more specific geometry field type for
# the column's contents.
field_type = 'GeometryField'
# Getting the field parameters.
field_params = {}
dim, srid = row
if srid != 4326:
field_params['srid'] = srid
# Size of object array (SDO_DIM_ARRAY) is number of dimensions.
dim = dim.size()
if dim != 2:
field_params['dim'] = dim
finally:
cursor.close()
return field_type, field_params
| mit |
davidyezsetz/kuma | vendor/lib/python/debug_toolbar/settings.py | 8 | 4870 | from __future__ import absolute_import, unicode_literals
import warnings
from django.conf import settings
from django.utils import six
# Always import this module as follows:
# from debug_toolbar import settings [as dt_settings]
# Don't import directly CONFIG or PANELs, or you will miss changes performed
# with override_settings in tests.
CONFIG_DEFAULTS = {
# Toolbar options
'INSERT_BEFORE': '</body>',
'RENDER_PANELS': None,
'RESULTS_STORE_SIZE': 10,
'ROOT_TAG_EXTRA_ATTRS': '',
'SHOW_COLLAPSED': False,
'SHOW_TOOLBAR_CALLBACK': 'debug_toolbar.middleware.show_toolbar',
# Panel options
'EXTRA_SIGNALS': [],
'ENABLE_STACKTRACES': True,
'HIDE_IN_STACKTRACES': (
'socketserver' if six.PY3 else 'SocketServer',
'threading',
'wsgiref',
'debug_toolbar',
'django',
),
'INTERCEPT_REDIRECTS': False,
'SHOW_TEMPLATE_CONTEXT': True,
'SQL_WARNING_THRESHOLD': 500, # milliseconds
}
USER_CONFIG = getattr(settings, 'DEBUG_TOOLBAR_CONFIG', {})
# Backward-compatibility for 1.0, remove in 2.0.
_RENAMED_CONFIG = {
'RESULTS_STORE_SIZE': 'RESULTS_CACHE_SIZE',
'ROOT_TAG_ATTRS': 'ROOT_TAG_EXTRA_ATTRS',
'HIDDEN_STACKTRACE_MODULES': 'HIDE_IN_STACKTRACES'
}
for old_name, new_name in _RENAMED_CONFIG.items():
if old_name in USER_CONFIG:
warnings.warn(
"%r was renamed to %r. Update your DEBUG_TOOLBAR_CONFIG "
"setting." % (old_name, new_name), DeprecationWarning)
USER_CONFIG[new_name] = USER_CONFIG.pop(old_name)
if 'HIDE_DJANGO_SQL' in USER_CONFIG:
warnings.warn(
"HIDE_DJANGO_SQL was removed. Update your "
"DEBUG_TOOLBAR_CONFIG setting.", DeprecationWarning)
USER_CONFIG.pop('HIDE_DJANGO_SQL')
if 'TAG' in USER_CONFIG:
warnings.warn(
"TAG was replaced by INSERT_BEFORE. Update your "
"DEBUG_TOOLBAR_CONFIG setting.", DeprecationWarning)
USER_CONFIG['INSERT_BEFORE'] = '</%s>' % USER_CONFIG.pop('TAG')
CONFIG = CONFIG_DEFAULTS.copy()
CONFIG.update(USER_CONFIG)
if not isinstance(CONFIG['SHOW_TOOLBAR_CALLBACK'], six.string_types):
warnings.warn(
"SHOW_TOOLBAR_CALLBACK is now a dotted path. Update your "
"DEBUG_TOOLBAR_CONFIG setting.", DeprecationWarning)
PANELS_DEFAULTS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
try:
PANELS = list(settings.DEBUG_TOOLBAR_PANELS)
except AttributeError:
PANELS = PANELS_DEFAULTS
else:
# Backward-compatibility for 1.0, remove in 2.0.
_RENAMED_PANELS = {
'debug_toolbar.panels.version.VersionDebugPanel':
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerDebugPanel':
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings_vars.SettingsDebugPanel':
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel':
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel':
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLDebugPanel':
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.template.TemplateDebugPanel':
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CacheDebugPanel':
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalDebugPanel':
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logger.LoggingDebugPanel':
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.InterceptRedirectsDebugPanel':
'debug_toolbar.panels.redirects.RedirectsPanel',
'debug_toolbar.panels.profiling.ProfilingDebugPanel':
'debug_toolbar.panels.profiling.ProfilingPanel',
}
for index, old_panel in enumerate(PANELS):
new_panel = _RENAMED_PANELS.get(old_panel)
if new_panel is not None:
warnings.warn(
"%r was renamed to %r. Update your DEBUG_TOOLBAR_PANELS "
"setting." % (old_panel, new_panel), DeprecationWarning)
PANELS[index] = new_panel
PATCH_SETTINGS = getattr(settings, 'DEBUG_TOOLBAR_PATCH_SETTINGS', settings.DEBUG)
| mpl-2.0 |
nanolearningllc/edx-platform-cypress | common/lib/sandbox-packages/verifiers/tests_draganddrop.py | 173 | 32788 | import unittest
import draganddrop
from .draganddrop import PositionsCompare
import json
class Test_PositionsCompare(unittest.TestCase):
""" describe"""
def test_nested_list_and_list1(self):
self.assertEqual(PositionsCompare([[1, 2], 40]), PositionsCompare([1, 3]))
def test_nested_list_and_list2(self):
self.assertNotEqual(PositionsCompare([1, 12]), PositionsCompare([1, 1]))
def test_list_and_list1(self):
self.assertNotEqual(PositionsCompare([[1, 2], 12]), PositionsCompare([1, 15]))
def test_list_and_list2(self):
self.assertEqual(PositionsCompare([1, 11]), PositionsCompare([1, 1]))
def test_numerical_list_and_string_list(self):
self.assertNotEqual(PositionsCompare([1, 2]), PositionsCompare(["1"]))
def test_string_and_string_list1(self):
self.assertEqual(PositionsCompare("1"), PositionsCompare(["1"]))
def test_string_and_string_list2(self):
self.assertEqual(PositionsCompare("abc"), PositionsCompare("abc"))
def test_string_and_string_list3(self):
self.assertNotEqual(PositionsCompare("abd"), PositionsCompare("abe"))
def test_float_and_string(self):
self.assertNotEqual(PositionsCompare([3.5, 5.7]), PositionsCompare(["1"]))
def test_floats_and_ints(self):
self.assertEqual(PositionsCompare([3.5, 4.5]), PositionsCompare([5, 7]))
class Test_DragAndDrop_Grade(unittest.TestCase):
def test_targets_are_draggable_1(self):
user_input = json.dumps([
{'p': 'p_l'},
{'up': {'first': {'p': 'p_l'}}}
])
correct_answer = [
{
'draggables': ['p'],
'targets': ['p_l', 'p_r'],
'rule': 'anyof'
},
{
'draggables': ['up'],
'targets': [
'p_l[p][first]'
],
'rule': 'anyof'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_targets_are_draggable_2(self):
user_input = json.dumps([
{'p': 'p_l'},
{'p': 'p_r'},
{'s': 's_l'},
{'s': 's_r'},
{'up': {'1': {'p': 'p_l'}}},
{'up': {'3': {'p': 'p_l'}}},
{'up': {'1': {'p': 'p_r'}}},
{'up': {'3': {'p': 'p_r'}}},
{'up_and_down': {'1': {'s': 's_l'}}},
{'up_and_down': {'1': {'s': 's_r'}}}
])
correct_answer = [
{
'draggables': ['p'],
'targets': ['p_l', 'p_r'],
'rule': 'unordered_equal'
},
{
'draggables': ['s'],
'targets': ['s_l', 's_r'],
'rule': 'unordered_equal'
},
{
'draggables': ['up_and_down'],
'targets': ['s_l[s][1]', 's_r[s][1]'],
'rule': 'unordered_equal'
},
{
'draggables': ['up'],
'targets': [
'p_l[p][1]',
'p_l[p][3]',
'p_r[p][1]',
'p_r[p][3]',
],
'rule': 'unordered_equal'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_targets_are_draggable_2_manual_parsing(self):
user_input = json.dumps([
{'up': 'p_l[p][1]'},
{'p': 'p_l'},
{'up': 'p_l[p][3]'},
{'up': 'p_r[p][1]'},
{'p': 'p_r'},
{'up': 'p_r[p][3]'},
{'up_and_down': 's_l[s][1]'},
{'s': 's_l'},
{'up_and_down': 's_r[s][1]'},
{'s': 's_r'}
])
correct_answer = [
{
'draggables': ['p'],
'targets': ['p_l', 'p_r'],
'rule': 'unordered_equal'
},
{
'draggables': ['s'],
'targets': ['s_l', 's_r'],
'rule': 'unordered_equal'
},
{
'draggables': ['up_and_down'],
'targets': ['s_l[s][1]', 's_r[s][1]'],
'rule': 'unordered_equal'
},
{
'draggables': ['up'],
'targets': [
'p_l[p][1]',
'p_l[p][3]',
'p_r[p][1]',
'p_r[p][3]',
],
'rule': 'unordered_equal'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_targets_are_draggable_3_nested(self):
user_input = json.dumps([
{'molecule': 'left_side_tagret'},
{'molecule': 'right_side_tagret'},
{'p': {'p_target': {'molecule': 'left_side_tagret'}}},
{'p': {'p_target': {'molecule': 'right_side_tagret'}}},
{'s': {'s_target': {'molecule': 'left_side_tagret'}}},
{'s': {'s_target': {'molecule': 'right_side_tagret'}}},
{'up': {'1': {'p': {'p_target': {'molecule': 'left_side_tagret'}}}}},
{'up': {'3': {'p': {'p_target': {'molecule': 'left_side_tagret'}}}}},
{'up': {'1': {'p': {'p_target': {'molecule': 'right_side_tagret'}}}}},
{'up': {'3': {'p': {'p_target': {'molecule': 'right_side_tagret'}}}}},
{'up_and_down': {'1': {'s': {'s_target': {'molecule': 'left_side_tagret'}}}}},
{'up_and_down': {'1': {'s': {'s_target': {'molecule': 'right_side_tagret'}}}}}
])
correct_answer = [
{
'draggables': ['molecule'],
'targets': ['left_side_tagret', 'right_side_tagret'],
'rule': 'unordered_equal'
},
{
'draggables': ['p'],
'targets': [
'left_side_tagret[molecule][p_target]',
'right_side_tagret[molecule][p_target]',
],
'rule': 'unordered_equal'
},
{
'draggables': ['s'],
'targets': [
'left_side_tagret[molecule][s_target]',
'right_side_tagret[molecule][s_target]',
],
'rule': 'unordered_equal'
},
{
'draggables': ['up_and_down'],
'targets': [
'left_side_tagret[molecule][s_target][s][1]',
'right_side_tagret[molecule][s_target][s][1]',
],
'rule': 'unordered_equal'
},
{
'draggables': ['up'],
'targets': [
'left_side_tagret[molecule][p_target][p][1]',
'left_side_tagret[molecule][p_target][p][3]',
'right_side_tagret[molecule][p_target][p][1]',
'right_side_tagret[molecule][p_target][p][3]',
],
'rule': 'unordered_equal'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_targets_are_draggable_4_real_example(self):
user_input = json.dumps([
{'single_draggable': 's_l'},
{'single_draggable': 's_r'},
{'single_draggable': 'p_sigma'},
{'single_draggable': 'p_sigma*'},
{'single_draggable': 's_sigma'},
{'single_draggable': 's_sigma*'},
{'double_draggable': 'p_pi*'},
{'double_draggable': 'p_pi'},
{'triple_draggable': 'p_l'},
{'triple_draggable': 'p_r'},
{'up': {'1': {'triple_draggable': 'p_l'}}},
{'up': {'2': {'triple_draggable': 'p_l'}}},
{'up': {'2': {'triple_draggable': 'p_r'}}},
{'up': {'3': {'triple_draggable': 'p_r'}}},
{'up_and_down': {'1': {'single_draggable': 's_l'}}},
{'up_and_down': {'1': {'single_draggable': 's_r'}}},
{'up_and_down': {'1': {'single_draggable': 's_sigma'}}},
{'up_and_down': {'1': {'single_draggable': 's_sigma*'}}},
{'up_and_down': {'1': {'double_draggable': 'p_pi'}}},
{'up_and_down': {'2': {'double_draggable': 'p_pi'}}}
])
# 10 targets:
# s_l, s_r, p_l, p_r, s_sigma, s_sigma*, p_pi, p_sigma, p_pi*, p_sigma*
#
# 3 draggable objects, which have targets (internal target ids - 1, 2, 3):
# single_draggable, double_draggable, triple_draggable
#
# 2 draggable objects:
# up, up_and_down
correct_answer = [
{
'draggables': ['triple_draggable'],
'targets': ['p_l', 'p_r'],
'rule': 'unordered_equal'
},
{
'draggables': ['double_draggable'],
'targets': ['p_pi', 'p_pi*'],
'rule': 'unordered_equal'
},
{
'draggables': ['single_draggable'],
'targets': ['s_l', 's_r', 's_sigma', 's_sigma*', 'p_sigma', 'p_sigma*'],
'rule': 'unordered_equal'
},
{
'draggables': ['up'],
'targets': [
'p_l[triple_draggable][1]',
'p_l[triple_draggable][2]',
'p_r[triple_draggable][2]',
'p_r[triple_draggable][3]',
],
'rule': 'unordered_equal'
},
{
'draggables': ['up_and_down'],
'targets': [
's_l[single_draggable][1]',
's_r[single_draggable][1]',
's_sigma[single_draggable][1]',
's_sigma*[single_draggable][1]',
'p_pi[double_draggable][1]',
'p_pi[double_draggable][2]',
],
'rule': 'unordered_equal'
},
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_targets_true(self):
user_input = '[{"1": "t1"}, \
{"name_with_icon": "t2"}]'
correct_answer = {'1': 't1', 'name_with_icon': 't2'}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_expect_no_actions_wrong(self):
user_input = '[{"1": "t1"}, \
{"name_with_icon": "t2"}]'
correct_answer = []
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_expect_no_actions_right(self):
user_input = '[]'
correct_answer = []
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_targets_false(self):
user_input = '[{"1": "t1"}, \
{"name_with_icon": "t2"}]'
correct_answer = {'1': 't3', 'name_with_icon': 't2'}
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_multiple_images_per_target_true(self):
user_input = '[{"1": "t1"}, {"name_with_icon": "t2"}, \
{"2": "t1"}]'
correct_answer = {'1': 't1', 'name_with_icon': 't2', '2': 't1'}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_multiple_images_per_target_false(self):
user_input = '[{"1": "t1"}, {"name_with_icon": "t2"}, \
{"2": "t1"}]'
correct_answer = {'1': 't2', 'name_with_icon': 't2', '2': 't1'}
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_targets_and_positions(self):
user_input = '[{"1": [10,10]}, \
{"name_with_icon": [[10,10],4]}]'
correct_answer = {'1': [10, 10], 'name_with_icon': [[10, 10], 4]}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_position_and_targets(self):
user_input = '[{"1": "t1"}, {"name_with_icon": "t2"}]'
correct_answer = {'1': 't1', 'name_with_icon': 't2'}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_positions_exact(self):
user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]'
correct_answer = {'1': [10, 10], 'name_with_icon': [20, 20]}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_positions_false(self):
user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]'
correct_answer = {'1': [25, 25], 'name_with_icon': [20, 20]}
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_positions_true_in_radius(self):
user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]'
correct_answer = {'1': [14, 14], 'name_with_icon': [20, 20]}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_positions_true_in_manual_radius(self):
user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]'
correct_answer = {'1': [[40, 10], 30], 'name_with_icon': [20, 20]}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_positions_false_in_manual_radius(self):
user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]'
correct_answer = {'1': [[40, 10], 29], 'name_with_icon': [20, 20]}
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_correct_answer_not_has_key_from_user_answer(self):
user_input = '[{"1": "t1"}, {"name_with_icon": "t2"}]'
correct_answer = {'3': 't3', 'name_with_icon': 't2'}
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_anywhere(self):
"""Draggables can be places anywhere on base image.
Place grass in the middle of the image and ant in the
right upper corner."""
user_input = '[{"ant":[610.5,57.449951171875]},\
{"grass":[322.5,199.449951171875]}]'
correct_answer = {'grass': [[300, 200], 200], 'ant': [[500, 0], 200]}
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_lcao_correct(self):
"""Describe carbon molecule in LCAO-MO"""
user_input = '[{"1":"s_left"}, \
{"5":"s_right"},{"4":"s_sigma"},{"6":"s_sigma_star"},{"7":"p_left_1"}, \
{"8":"p_left_2"},{"10":"p_right_1"},{"9":"p_right_2"}, \
{"2":"p_pi_1"},{"3":"p_pi_2"},{"11":"s_sigma_name"}, \
{"13":"s_sigma_star_name"},{"15":"p_pi_name"},{"16":"p_pi_star_name"}, \
{"12":"p_sigma_name"},{"14":"p_sigma_star_name"}]'
correct_answer = [{
'draggables': ['1', '2', '3', '4', '5', '6'],
'targets': [
's_left', 's_right', 's_sigma', 's_sigma_star', 'p_pi_1', 'p_pi_2'
],
'rule': 'anyof'
}, {
'draggables': ['7', '8', '9', '10'],
'targets': ['p_left_1', 'p_left_2', 'p_right_1', 'p_right_2'],
'rule': 'anyof'
}, {
'draggables': ['11', '12'],
'targets': ['s_sigma_name', 'p_sigma_name'],
'rule': 'anyof'
}, {
'draggables': ['13', '14'],
'targets': ['s_sigma_star_name', 'p_sigma_star_name'],
'rule': 'anyof'
}, {
'draggables': ['15'],
'targets': ['p_pi_name'],
'rule': 'anyof'
}, {
'draggables': ['16'],
'targets': ['p_pi_star_name'],
'rule': 'anyof'
}]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_lcao_extra_element_incorrect(self):
"""Describe carbon molecule in LCAO-MO"""
user_input = '[{"1":"s_left"}, \
{"5":"s_right"},{"4":"s_sigma"},{"6":"s_sigma_star"},{"7":"p_left_1"}, \
{"8":"p_left_2"},{"17":"p_left_3"},{"10":"p_right_1"},{"9":"p_right_2"}, \
{"2":"p_pi_1"},{"3":"p_pi_2"},{"11":"s_sigma_name"}, \
{"13":"s_sigma_star_name"},{"15":"p_pi_name"},{"16":"p_pi_star_name"}, \
{"12":"p_sigma_name"},{"14":"p_sigma_star_name"}]'
correct_answer = [{
'draggables': ['1', '2', '3', '4', '5', '6'],
'targets': [
's_left', 's_right', 's_sigma', 's_sigma_star', 'p_pi_1', 'p_pi_2'
],
'rule': 'anyof'
}, {
'draggables': ['7', '8', '9', '10'],
'targets': ['p_left_1', 'p_left_2', 'p_right_1', 'p_right_2'],
'rule': 'anyof'
}, {
'draggables': ['11', '12'],
'targets': ['s_sigma_name', 'p_sigma_name'],
'rule': 'anyof'
}, {
'draggables': ['13', '14'],
'targets': ['s_sigma_star_name', 'p_sigma_star_name'],
'rule': 'anyof'
}, {
'draggables': ['15'],
'targets': ['p_pi_name'],
'rule': 'anyof'
}, {
'draggables': ['16'],
'targets': ['p_pi_star_name'],
'rule': 'anyof'
}]
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_reuse_draggable_no_mupliples(self):
"""Test reusable draggables (no mupltiple draggables per target)"""
user_input = '[{"1":"target1"}, \
{"2":"target2"},{"1":"target3"},{"2":"target4"},{"2":"target5"}, \
{"3":"target6"}]'
correct_answer = [
{
'draggables': ['1'],
'targets': ['target1', 'target3'],
'rule': 'anyof'
},
{
'draggables': ['2'],
'targets': ['target2', 'target4', 'target5'],
'rule': 'anyof'
},
{
'draggables': ['3'],
'targets': ['target6'],
'rule': 'anyof'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_reuse_draggable_with_mupliples(self):
"""Test reusable draggables with mupltiple draggables per target"""
user_input = '[{"1":"target1"}, \
{"2":"target2"},{"1":"target1"},{"2":"target4"},{"2":"target4"}, \
{"3":"target6"}]'
correct_answer = [
{
'draggables': ['1'],
'targets': ['target1', 'target3'],
'rule': 'anyof'
},
{
'draggables': ['2'],
'targets': ['target2', 'target4'],
'rule': 'anyof'
},
{
'draggables': ['3'],
'targets': ['target6'],
'rule': 'anyof'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_reuse_many_draggable_with_mupliples(self):
"""Test reusable draggables with mupltiple draggables per target"""
user_input = '[{"1":"target1"}, \
{"2":"target2"},{"1":"target1"},{"2":"target4"},{"2":"target4"}, \
{"3":"target6"}, {"4": "target3"}, {"5": "target4"}, \
{"5": "target5"}, {"6": "target2"}]'
correct_answer = [
{
'draggables': ['1', '4'],
'targets': ['target1', 'target3'],
'rule': 'anyof'
},
{
'draggables': ['2', '6'],
'targets': ['target2', 'target4'],
'rule': 'anyof'
},
{
'draggables': ['5'],
'targets': ['target4', 'target5'],
'rule': 'anyof'
},
{
'draggables': ['3'],
'targets': ['target6'],
'rule': 'anyof'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_reuse_many_draggable_with_mupliples_wrong(self):
"""Test reusable draggables with mupltiple draggables per target"""
user_input = '[{"1":"target1"}, \
{"2":"target2"},{"1":"target1"}, \
{"2":"target3"}, \
{"2":"target4"}, \
{"3":"target6"}, {"4": "target3"}, {"5": "target4"}, \
{"5": "target5"}, {"6": "target2"}]'
correct_answer = [
{
'draggables': ['1', '4'],
'targets': ['target1', 'target3'],
'rule': 'anyof'
},
{
'draggables': ['2', '6'],
'targets': ['target2', 'target4'],
'rule': 'anyof'
},
{
'draggables': ['5'],
'targets': ['target4', 'target5'],
'rule': 'anyof'
},
{
'draggables': ['3'],
'targets': ['target6'],
'rule': 'anyof'
}]
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_label_10_targets_with_a_b_c_false(self):
"""Test reusable draggables (no mupltiple draggables per target)"""
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"},{"a":"target4"},{"b":"target5"}, \
{"c":"target6"}, {"a":"target7"},{"b":"target8"},{"c":"target9"}, \
{"a":"target1"}]'
correct_answer = [
{
'draggables': ['a'],
'targets': ['target1', 'target4', 'target7', 'target10'],
'rule': 'unordered_equal'
},
{
'draggables': ['b'],
'targets': ['target2', 'target5', 'target8'],
'rule': 'unordered_equal'
},
{
'draggables': ['c'],
'targets': ['target3', 'target6', 'target9'],
'rule': 'unordered_equal'
}
]
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_label_10_targets_with_a_b_c_(self):
"""Test reusable draggables (no mupltiple draggables per target)"""
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"},{"a":"target4"},{"b":"target5"}, \
{"c":"target6"}, {"a":"target7"},{"b":"target8"},{"c":"target9"}, \
{"a":"target10"}]'
correct_answer = [
{
'draggables': ['a'],
'targets': ['target1', 'target4', 'target7', 'target10'],
'rule': 'unordered_equal'
},
{
'draggables': ['b'],
'targets': ['target2', 'target5', 'target8'],
'rule': 'unordered_equal'
},
{
'draggables': ['c'],
'targets': ['target3', 'target6', 'target9'],
'rule': 'unordered_equal'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_label_10_targets_with_a_b_c_multiple(self):
"""Test reusable draggables (mupltiple draggables per target)"""
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"},{"b":"target5"}, \
{"c":"target6"}, {"a":"target7"},{"b":"target8"},{"c":"target9"}, \
{"a":"target1"}]'
correct_answer = [
{
'draggables': ['a', 'a', 'a'],
'targets': ['target1', 'target4', 'target7', 'target10'],
'rule': 'anyof+number'
},
{
'draggables': ['b', 'b', 'b'],
'targets': ['target2', 'target5', 'target8'],
'rule': 'anyof+number'
},
{
'draggables': ['c', 'c', 'c'],
'targets': ['target3', 'target6', 'target9'],
'rule': 'anyof+number'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_label_10_targets_with_a_b_c_multiple_false(self):
"""Test reusable draggables (mupltiple draggables per target)"""
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"},{"a":"target4"},{"b":"target5"}, \
{"c":"target6"}, {"a":"target7"},{"b":"target8"},{"c":"target9"}, \
{"a":"target1"}]'
correct_answer = [
{
'draggables': ['a', 'a', 'a'],
'targets': ['target1', 'target4', 'target7', 'target10'],
'rule': 'anyof+number'
},
{
'draggables': ['b', 'b', 'b'],
'targets': ['target2', 'target5', 'target8'],
'rule': 'anyof+number'
},
{
'draggables': ['c', 'c', 'c'],
'targets': ['target3', 'target6', 'target9'],
'rule': 'anyof+number'
}
]
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_label_10_targets_with_a_b_c_reused(self):
"""Test a b c in 10 labels reused"""
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"},{"b":"target5"}, \
{"c":"target6"}, {"b":"target8"},{"c":"target9"}, \
{"a":"target10"}]'
correct_answer = [
{
'draggables': ['a', 'a'],
'targets': ['target1', 'target10'],
'rule': 'unordered_equal+number'
},
{
'draggables': ['b', 'b', 'b'],
'targets': ['target2', 'target5', 'target8'],
'rule': 'unordered_equal+number'
},
{
'draggables': ['c', 'c', 'c'],
'targets': ['target3', 'target6', 'target9'],
'rule': 'unordered_equal+number'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_label_10_targets_with_a_b_c_reused_false(self):
"""Test a b c in 10 labels reused false"""
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"},{"b":"target5"}, {"a":"target8"},\
{"c":"target6"}, {"b":"target8"},{"c":"target9"}, \
{"a":"target10"}]'
correct_answer = [
{
'draggables': ['a', 'a'],
'targets': ['target1', 'target10'],
'rule': 'unordered_equal+number'
},
{
'draggables': ['b', 'b', 'b'],
'targets': ['target2', 'target5', 'target8'],
'rule': 'unordered_equal+number'
},
{
'draggables': ['c', 'c', 'c'],
'targets': ['target3', 'target6', 'target9'],
'rule': 'unordered_equal+number'
}
]
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_mixed_reuse_and_not_reuse(self):
"""Test reusable draggables """
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"}, {"a":"target4"},\
{"a":"target5"}]'
correct_answer = [
{
'draggables': ['a', 'b'],
'targets': ['target1', 'target2', 'target4', 'target5'],
'rule': 'anyof'
},
{
'draggables': ['c'],
'targets': ['target3'],
'rule': 'exact'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_mixed_reuse_and_not_reuse_number(self):
"""Test reusable draggables with number """
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"}, {"a":"target4"}]'
correct_answer = [
{
'draggables': ['a', 'a', 'b'],
'targets': ['target1', 'target2', 'target4'],
'rule': 'anyof+number'
},
{
'draggables': ['c'],
'targets': ['target3'],
'rule': 'exact'
}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
def test_mixed_reuse_and_not_reuse_number_false(self):
"""Test reusable draggables with numbers, but wrong"""
user_input = '[{"a":"target1"}, \
{"b":"target2"},{"c":"target3"}, {"a":"target4"}, {"a":"target10"}]'
correct_answer = [
{
'draggables': ['a', 'a', 'b'],
'targets': ['target1', 'target2', 'target4', 'target10'],
'rule': 'anyof_number'
},
{
'draggables': ['c'],
'targets': ['target3'],
'rule': 'exact'
}
]
self.assertFalse(draganddrop.grade(user_input, correct_answer))
def test_alternative_correct_answer(self):
user_input = '[{"name_with_icon":"t1"},\
{"name_with_icon":"t1"},{"name_with_icon":"t1"},{"name4":"t1"}, \
{"name4":"t1"}]'
correct_answer = [
{'draggables': ['name4'], 'targets': ['t1', 't1'], 'rule': 'exact'},
{'draggables': ['name_with_icon'], 'targets': ['t1', 't1', 't1'],
'rule': 'exact'}
]
self.assertTrue(draganddrop.grade(user_input, correct_answer))
class Test_DragAndDrop_Populate(unittest.TestCase):
def test_1(self):
correct_answer = {'1': [[40, 10], 29], 'name_with_icon': [20, 20]}
user_input = '[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]'
dnd = draganddrop.DragAndDrop(correct_answer, user_input)
correct_groups = [['1'], ['name_with_icon']]
correct_positions = [{'exact': [[[40, 10], 29]]}, {'exact': [[20, 20]]}]
user_groups = [['1'], ['name_with_icon']]
user_positions = [{'user': [[10, 10]]}, {'user': [[20, 20]]}]
self.assertEqual(correct_groups, dnd.correct_groups)
self.assertEqual(correct_positions, dnd.correct_positions)
self.assertEqual(user_groups, dnd.user_groups)
self.assertEqual(user_positions, dnd.user_positions)
class Test_DraAndDrop_Compare_Positions(unittest.TestCase):
def test_1(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertTrue(dnd.compare_positions(correct=[[1, 1], [2, 3]],
user=[[2, 3], [1, 1]],
flag='anyof'))
def test_2a(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertTrue(dnd.compare_positions(correct=[[1, 1], [2, 3]],
user=[[2, 3], [1, 1]],
flag='exact'))
def test_2b(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertFalse(dnd.compare_positions(correct=[[1, 1], [2, 3]],
user=[[2, 13], [1, 1]],
flag='exact'))
def test_3(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertFalse(dnd.compare_positions(correct=["a", "b"],
user=["a", "b", "c"],
flag='anyof'))
def test_4(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertTrue(dnd.compare_positions(correct=["a", "b", "c"],
user=["a", "b"],
flag='anyof'))
def test_5(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertFalse(dnd.compare_positions(correct=["a", "b", "c"],
user=["a", "c", "b"],
flag='exact'))
def test_6(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertTrue(dnd.compare_positions(correct=["a", "b", "c"],
user=["a", "c", "b"],
flag='anyof'))
def test_7(self):
dnd = draganddrop.DragAndDrop({'1': 't1'}, '[{"1": "t1"}]')
self.assertFalse(dnd.compare_positions(correct=["a", "b", "b"],
user=["a", "c", "b"],
flag='anyof'))
def suite():
testcases = [Test_PositionsCompare,
Test_DragAndDrop_Populate,
Test_DragAndDrop_Grade,
Test_DraAndDrop_Compare_Positions]
suites = []
for testcase in testcases:
suites.append(unittest.TestLoader().loadTestsFromTestCase(testcase))
return unittest.TestSuite(suites)
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=2).run(suite())
| agpl-3.0 |
herow/planning_qgis | python/plugins/processing/algs/taudem/dinfdistdown.py | 7 | 5022 | # -*- coding: utf-8 -*-
"""
***************************************************************************
dinfdistdown.py
---------------------
Date : October 2012
Copyright : (C) 2012 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2012'
__copyright__ = '(C) 2012, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4.QtGui import QIcon
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterSelection
from processing.core.outputs import OutputRaster
from TauDEMUtils import TauDEMUtils
class DinfDistDown(GeoAlgorithm):
DINF_FLOW_DIR_GRID = 'DINF_FLOW_DIR_GRID'
PIT_FILLED_GRID = 'PIT_FILLED_GRID'
STREAM_GRID = 'STREAM_GRID'
WEIGHT_PATH_GRID = 'WEIGHT_PATH_GRID'
STAT_METHOD = 'STAT_METHOD'
DIST_METHOD = 'DIST_METHOD'
EDGE_CONTAM = 'EDGE_CONTAM'
DIST_DOWN_GRID = 'DIST_DOWN_GRID'
STATISTICS = ['Minimum', 'Maximum', 'Average']
STAT_DICT = {0: 'min', 1: 'max', 2: 'ave'}
DISTANCE = ['Pythagoras', 'Horizontal', 'Vertical', 'Surface']
DIST_DICT = {
0: 'p',
1: 'h',
2: 'v',
3: 's',
}
def getIcon(self):
return QIcon(os.path.dirname(__file__) + '/../../images/taudem.png')
def defineCharacteristics(self):
self.name = 'D-Infinity Distance Down'
self.cmdName = 'dinfdistdown'
self.group = 'Specialized Grid Analysis tools'
self.addParameter(ParameterRaster(self.DINF_FLOW_DIR_GRID,
self.tr('D-Infinity Flow Direction Grid'), False))
self.addParameter(ParameterRaster(self.PIT_FILLED_GRID,
self.tr('Pit Filled Elevation Grid'), False))
self.addParameter(ParameterRaster(self.STREAM_GRID,
self.tr('Stream Raster Grid'), False))
self.addParameter(ParameterRaster(self.WEIGHT_PATH_GRID,
self.tr('Weight Path Grid'), True))
self.addParameter(ParameterSelection(self.STAT_METHOD,
self.tr('Statistical Method'), self.STATISTICS, 2))
self.addParameter(ParameterSelection(self.DIST_METHOD,
self.tr('Distance Method'), self.DISTANCE, 1))
self.addParameter(ParameterBoolean(self.EDGE_CONTAM,
self.tr('Check for edge contamination'), True))
self.addOutput(OutputRaster(self.DIST_DOWN_GRID,
self.tr('D-Infinity Drop to Stream Grid')))
def processAlgorithm(self, progress):
commands = []
commands.append(os.path.join(TauDEMUtils.mpiexecPath(), 'mpiexec'))
processNum = ProcessingConfig.getSetting(TauDEMUtils.MPI_PROCESSES)
if processNum <= 0:
raise GeoAlgorithmExecutionException(
self.tr('Wrong number of MPI processes used. Please set '
'correct number before running TauDEM algorithms.'))
commands.append('-n')
commands.append(str(processNum))
commands.append(os.path.join(TauDEMUtils.taudemPath(), self.cmdName))
commands.append('-ang')
commands.append(self.getParameterValue(self.DINF_FLOW_DIR_GRID))
commands.append('-fel')
commands.append(self.getParameterValue(self.PIT_FILLED_GRID))
commands.append('-src')
commands.append(self.getParameterValue(self.STREAM_GRID))
wg = self.getParameterValue(self.WEIGHT_PATH_GRID)
if wg is not None:
commands.append('-wg')
commands.append(self.getParameterValue(self.WEIGHT_PATH_GRID))
commands.append('-m')
commands.append(str(self.STAT_DICT[self.getParameterValue(
self.STAT_METHOD)]))
commands.append(str(self.DIST_DICT[self.getParameterValue(
self.DIST_METHOD)]))
if not self.getParameterValue(self.EDGE_CONTAM):
commands.append('-nc')
commands.append('-dd')
commands.append(self.getOutputValue(self.DIST_DOWN_GRID))
TauDEMUtils.executeTauDEM(commands, progress)
| gpl-2.0 |
leadbrick/authomatic | authomatic/providers/gaeopenid.py | 14 | 3245 | # -*- coding: utf-8 -*-
"""
Google App Engine OpenID Providers
----------------------------------
|openid|_ provider implementations based on the |gae_users_api|_.
.. note::
When using the :class:`GAEOpenID` provider, the :class:`.User` object
will always have only the
:attr:`.User.user_id`,
:attr:`.User.email`,
:attr:`.User.gae_user`
attributes populated with data.
Moreover the :attr:`.User.user_id` will always be empty on the
`GAE Development Server <https://developers.google.com/appengine/docs/python/tools/devserver>`_.
.. autosummary::
GAEOpenID
Yahoo
Google
"""
import logging
from google.appengine.api import users
import authomatic.core as core
from authomatic import providers
from authomatic.exceptions import FailureError
__all__ = ['GAEOpenID', 'Yahoo', 'Google']
class GAEOpenID(providers.AuthenticationProvider):
"""
|openid|_ provider based on the |gae_users_api|_.
Accepts additional keyword arguments inherited from :class:`.AuthenticationProvider`.
"""
@providers.login_decorator
def login(self):
"""Launches the OpenID authentication procedure."""
if self.params.get(self.identifier_param):
#===================================================================
# Phase 1 before redirect.
#===================================================================
self._log(logging.INFO, u'Starting OpenID authentication procedure.')
url = users.create_login_url(dest_url=self.url, federated_identity=self.identifier)
self._log(logging.INFO, u'Redirecting user to {0}.'.format(url))
self.redirect(url)
else:
#===================================================================
# Phase 2 after redirect.
#===================================================================
self._log(logging.INFO, u'Continuing OpenID authentication procedure after redirect.')
user = users.get_current_user()
if user:
self._log(logging.INFO, u'Authentication successful.')
self._log(logging.INFO, u'Creating user.')
self.user = core.User(self,
id=user.federated_identity(),
email=user.email(),
gae_user=user)
#===============================================================
# We're done
#===============================================================
else:
raise FailureError('Unable to authenticate identifier "{0}"!'.format(self.identifier))
class Yahoo(GAEOpenID):
"""
:class:`.GAEOpenID` provider with the :attr:`.identifier` set to ``"me.yahoo.com"``.
"""
identifier = 'me.yahoo.com'
class Google(GAEOpenID):
"""
:class:`.GAEOpenID` provider with the :attr:`.identifier` set to ``"https://www.google.com/accounts/o8/id"``.
"""
identifier = 'https://www.google.com/accounts/o8/id'
| mit |
peterbarker/MAVProxy | MAVProxy/modules/mavproxy_adsb.py | 6 | 10426 | '''
Support for ADS-B data
Samuel Dudley
Dec 2015
'''
import time
from math import *
from MAVProxy.modules.lib import mp_module
from MAVProxy.modules.mavproxy_map import mp_slipmap
from MAVProxy.modules.lib import mp_settings
from MAVProxy.modules.lib.mp_menu import * # popup menus
from pymavlink import mavutil
class ADSBVehicle(object):
'''a generic ADS-B threat'''
def __init__(self, id, state):
self.id = id
self.state = state
self.vehicle_colour = 'green' # use plane icon for now
self.vehicle_type = 'plane'
self.icon = self.vehicle_colour + self.vehicle_type + '.png'
self.update_time = time.time()
self.is_evading_threat = False
self.v_distance = None
self.h_distance = None
self.distance = None
def update(self, state):
'''update the threat state'''
self.state = state
self.update_time = time.time()
class ADSBModule(mp_module.MPModule):
def __init__(self, mpstate):
super(ADSBModule, self).__init__(mpstate, "adsb", "ADS-B data support")
self.threat_vehicles = {}
self.active_threat_ids = [] # holds all threat ids the vehicle is evading
self.add_command('adsb', self.cmd_ADSB, ["adsb control",
"<status>",
"set (ADSBSETTING)"])
self.ADSB_settings = mp_settings.MPSettings([("timeout", int, 10), # seconds
("threat_radius", int, 200), # meters
("show_threat_radius", bool, False),
# threat_radius_clear = threat_radius*threat_radius_clear_multiplier
("threat_radius_clear_multiplier", int, 2),
("show_threat_radius_clear", bool, False)])
self.threat_detection_timer = mavutil.periodic_event(2)
self.threat_timeout_timer = mavutil.periodic_event(2)
def cmd_ADSB(self, args):
'''adsb command parser'''
usage = "usage: adsb <set>"
if len(args) == 0:
print(usage)
return
if args[0] == "status":
print("total threat count: %u active threat count: %u" %
(len(self.threat_vehicles), len(self.active_threat_ids)))
for id in self.threat_vehicles.keys():
print("id: %s distance: %.2f m callsign: %s alt: %.2f" % (id,
self.threat_vehicles[id].distance,
self.threat_vehicles[id].state['callsign'],
self.threat_vehicles[id].state['altitude']))
elif args[0] == "set":
self.ADSB_settings.command(args[1:])
else:
print(usage)
def perform_threat_detection(self):
'''determine threats'''
# TODO: perform more advanced threat detection
threat_radius_clear = self.ADSB_settings.threat_radius * \
self.ADSB_settings.threat_radius_clear_multiplier
for id in self.threat_vehicles.keys():
if self.threat_vehicles[id].distance is not None:
if self.threat_vehicles[id].distance <= self.ADSB_settings.threat_radius and not self.threat_vehicles[id].is_evading_threat:
# if the threat is in the threat radius and not currently
# known to the module...
# set flag to action threat
self.threat_vehicles[id].is_evading_threat = True
if self.threat_vehicles[id].distance > threat_radius_clear and self.threat_vehicles[id].is_evading_threat:
# if the threat is known to the module and outside the
# threat clear radius...
# clear flag to action threat
self.threat_vehicles[id].is_evading_threat = False
self.active_threat_ids = [id for id in self.threat_vehicles.keys(
) if self.threat_vehicles[id].is_evading_threat]
def update_threat_distances(self, latlonalt):
'''update the distance between threats and vehicle'''
for id in self.threat_vehicles.keys():
threat_latlonalt = (self.threat_vehicles[id].state['lat'] * 1e-7,
self.threat_vehicles[id].state['lon'] * 1e-7,
self.threat_vehicles[id].state['altitude'])
self.threat_vehicles[id].h_distance = self.get_h_distance(latlonalt, threat_latlonalt)
self.threat_vehicles[id].v_distance = self.get_v_distance(latlonalt, threat_latlonalt)
# calculate and set the total distance between threat and vehicle
self.threat_vehicles[id].distance = sqrt(
self.threat_vehicles[id].h_distance**2 + (self.threat_vehicles[id].v_distance)**2)
def get_h_distance(self, latlonalt1, latlonalt2):
'''get the horizontal distance between threat and vehicle'''
(lat1, lon1, alt1) = latlonalt1
(lat2, lon2, alt2) = latlonalt2
lat1 = radians(lat1)
lon1 = radians(lon1)
lat2 = radians(lat2)
lon2 = radians(lon2)
dLat = lat2 - lat1
dLon = lon2 - lon1
# math as per mavextra.distance_two()
a = sin(0.5 * dLat)**2 + sin(0.5 * dLon)**2 * cos(lat1) * cos(lat2)
c = 2.0 * atan2(sqrt(a), sqrt(1.0 - a))
return 6371 * 1000 * c
def get_v_distance(self, latlonalt1, latlonalt2):
'''get the horizontal distance between threat and vehicle'''
(lat1, lon1, alt1) = latlonalt1
(lat2, lon2, alt2) = latlonalt2
return alt2 - alt1
def check_threat_timeout(self):
'''check and handle threat time out'''
current_time = time.time()
for id in self.threat_vehicles.keys():
if current_time - self.threat_vehicles[id].update_time > self.ADSB_settings.timeout:
# if the threat has timed out...
del self.threat_vehicles[id] # remove the threat from the dict
if self.mpstate.map:
# remove the threat from the map
self.mpstate.map.remove_object(id)
# we've modified the dict we're iterating over, so
# we'll get any more timed-out threats next time we're
# called:
return
def mavlink_packet(self, m):
'''handle an incoming mavlink packet'''
if m.get_type() == "ADSB_VEHICLE":
id = 'ADSB-' + str(m.ICAO_address)
if id not in self.threat_vehicles.keys(): # check to see if the vehicle is in the dict
# if not then add it
self.threat_vehicles[id] = ADSBVehicle(id=id, state=m.to_dict())
if self.mpstate.map: # if the map is loaded...
icon = self.mpstate.map.icon(self.threat_vehicles[id].icon)
popup = MPMenuSubMenu('ADSB', items=[MPMenuItem(name=id, returnkey=None)])
# draw the vehicle on the map
self.mpstate.map.add_object(mp_slipmap.SlipIcon(id, (m.lat * 1e-7, m.lon * 1e-7),
icon, layer=3, rotation=m.heading*0.01, follow=False,
trail=mp_slipmap.SlipTrail(colour=(0, 255, 255)),
popup_menu=popup))
else: # the vehicle is in the dict
# update the dict entry
self.threat_vehicles[id].update(m.to_dict())
if self.mpstate.map: # if the map is loaded...
# update the map
self.mpstate.map.set_position(id, (m.lat * 1e-7, m.lon * 1e-7), rotation=m.heading*0.01)
if m.get_type() == "GLOBAL_POSITION_INT":
if self.mpstate.map:
if len(self.active_threat_ids) > 0:
threat_circle_width = 2
else:
threat_circle_width = 1
# update the threat circle on the map
threat_circle = mp_slipmap.SlipCircle("threat_circle", 3,
(m.lat * 1e-7, m.lon * 1e-7),
self.ADSB_settings.threat_radius,
(0, 255, 255), linewidth=threat_circle_width)
threat_circle.set_hidden(
not self.ADSB_settings.show_threat_radius) # show the circle?
self.mpstate.map.add_object(threat_circle)
# update the threat clear circle on the map
threat_radius_clear = self.ADSB_settings.threat_radius * \
self.ADSB_settings.threat_radius_clear_multiplier
threat_clear_circle = mp_slipmap.SlipCircle("threat_clear_circle", 3,
(m.lat * 1e-7,
m.lon * 1e-7),
threat_radius_clear,
(0, 255, 255), linewidth=1)
# show the circle?
threat_clear_circle.set_hidden(not self.ADSB_settings.show_threat_radius_clear)
self.mpstate.map.add_object(threat_clear_circle)
# we assume this is handled much more oftern than ADS-B messages
# so update the distance between vehicle and threat here
self.update_threat_distances((m.lat * 1e-7, m.lon * 1e-7, m.alt * 1e-3))
def idle_task(self):
'''called on idle'''
if self.threat_timeout_timer.trigger():
self.check_threat_timeout()
if self.threat_detection_timer.trigger():
self.perform_threat_detection()
# TODO: possibly evade detected threats with ids in
# self.active_threat_ids
def init(mpstate):
'''initialise module'''
return ADSBModule(mpstate)
| gpl-3.0 |
klahnakoski/TestLog-ETL | vendor/jx_sqlite/expressions/_utils.py | 2 | 6006 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
from jx_base.expressions import FALSE, FalseOp, NULL, NullOp, TrueOp, extend
from jx_base.language import Language
from jx_sqlite.utils import quote_column
from jx_sqlite.sqlite import sql_call
from mo_dots import wrap, FlatList, is_data
from mo_future import decorate
from mo_json import BOOLEAN, NESTED, OBJECT, STRING, NUMBER, IS_NULL, TIME, INTERVAL
from mo_logs import Log
from mo_sql import (
SQL,
SQL_FALSE,
SQL_NULL,
SQL_TRUE,
sql_iso,
SQL_ZERO,
SQL_ONE,
SQL_PLUS,
SQL_STAR,
SQL_LT,
ConcatSQL)
NumberOp, OrOp, SQLScript = [None] * 3
def check(func):
"""
TEMPORARY TYPE CHECKING TO ENSURE to_sql() IS OUTPUTTING THE CORRECT FORMAT
"""
@decorate(func)
def to_sql(self, schema, not_null=False, boolean=False, **kwargs):
if kwargs.get("many") != None:
Log.error("not expecting many")
try:
output = func(self, schema, not_null, boolean)
except Exception as e:
Log.error("not expected", cause=e)
if isinstance(output, SQLScript):
return output
if not isinstance(output, FlatList):
Log.error("expecting FlatList")
if not is_data(output[0].sql):
Log.error("expecting Data")
for k, v in output[0].sql.items():
if k not in {"b", "n", "s", "j", "0"}:
Log.error("expecting datatypes")
if not isinstance(v, SQL):
Log.error("expecting text")
return output
return to_sql
@extend(NullOp)
@check
def to_sql(self, schema, not_null=False, boolean=False):
return wrap([{"name": ".", "sql": {"0": SQL_NULL}}])
@extend(TrueOp)
@check
def to_sql(self, schema, not_null=False, boolean=False):
return wrap([{"name": ".", "sql": {"b": SQL_TRUE}}])
@extend(FalseOp)
@check
def to_sql(self, schema, not_null=False, boolean=False):
return wrap([{"name": ".", "sql": {"b": SQL_FALSE}}])
def _inequality_to_sql(self, schema, not_null=False, boolean=False, many=True):
op, identity = _sql_operators[self.op]
lhs = NumberOp(self.lhs).partial_eval().to_sql(schema, not_null=True)[0].sql.n
rhs = NumberOp(self.rhs).partial_eval().to_sql(schema, not_null=True)[0].sql.n
sql = sql_iso(lhs) + op + sql_iso(rhs)
output = SQLScript(
data_type=BOOLEAN,
expr=sql,
frum=self,
miss=OrOp([self.lhs.missing(), self.rhs.missing()]),
schema=schema,
)
return output
@check
def _binaryop_to_sql(self, schema, not_null=False, boolean=False, many=True):
op, identity = _sql_operators[self.op]
lhs = NumberOp(self.lhs).partial_eval().to_sql(schema, not_null=True)[0].sql.n
rhs = NumberOp(self.rhs).partial_eval().to_sql(schema, not_null=True)[0].sql.n
script = sql_iso(lhs) + op + sql_iso(rhs)
if not_null:
sql = script
else:
missing = OrOp([self.lhs.missing(), self.rhs.missing()]).partial_eval()
if missing is FALSE:
sql = script
else:
sql = (
"CASE WHEN "
+ missing.to_sql(schema, boolean=True)[0].sql.b
+ " THEN NULL ELSE "
+ script
+ " END"
)
return wrap([{"name": ".", "sql": {"n": sql}}])
def multiop_to_sql(self, schema, not_null=False, boolean=False, many=False):
sign, zero = _sql_operators[self.op]
if len(self.terms) == 0:
return SQLang[self.default].to_sql(schema)
elif self.default is NULL:
return sign.join(
sql_call("COALESCE", SQLang[t].to_sql(schema), zero)
for t in self.terms
)
else:
return sql_call(
"COALESCE",
sign.join(sql_iso(SQLang[t].to_sql(schema)) for t in self.terms),
SQLang[self.default].to_sql(schema)
)
def with_var(var, expression, eval):
"""
:param var: NAME GIVEN TO expression
:param expression: THE EXPRESSION TO COMPUTE FIRST
:param eval: THE EXPRESSION TO COMPUTE SECOND, WITH var ASSIGNED
:return: PYTHON EXPRESSION
"""
return ConcatSQL(
SQL("WITH x AS (SELECT ("),
expression,
SQL(") AS "),
var,
SQL(") SELECT "),
eval,
SQL(" FROM x")
)
def basic_multiop_to_sql(self, schema, not_null=False, boolean=False, many=False):
op, identity = _sql_operators[self.op.split("basic.")[1]]
sql = op.join(sql_iso(SQLang[t].to_sql(schema)[0].sql.n) for t in self.terms)
return wrap([{"name": ".", "sql": {"n": sql}}])
SQLang = Language("SQLang")
_sql_operators = {
# (operator, zero-array default value) PAIR
"add": (SQL_PLUS, SQL_ZERO),
"sum": (SQL_PLUS, SQL_ZERO),
"mul": (SQL_STAR, SQL_ONE),
"sub": (SQL(" - "), None),
"div": (SQL(" / "), None),
"exp": (SQL(" ** "), None),
"mod": (SQL(" % "), None),
"gt": (SQL(" > "), None),
"gte": (SQL(" >= "), None),
"lte": (SQL(" <= "), None),
"lt": (SQL_LT, None),
}
SQL_IS_NULL_TYPE = "0"
SQL_BOOLEAN_TYPE = "b"
SQL_NUMBER_TYPE = "n"
SQL_TIME_TYPE = "t"
SQL_INTERVAL_TYPE = "n"
SQL_STRING_TYPE = "s"
SQL_OBJECT_TYPE = "j"
SQL_NESTED_TYPE = "a"
json_type_to_sql_type = {
IS_NULL: SQL_IS_NULL_TYPE,
BOOLEAN: SQL_BOOLEAN_TYPE,
NUMBER: SQL_NUMBER_TYPE,
TIME: SQL_TIME_TYPE,
INTERVAL: SQL_INTERVAL_TYPE,
STRING: SQL_STRING_TYPE,
OBJECT: SQL_OBJECT_TYPE,
NESTED: SQL_NESTED_TYPE,
}
sql_type_to_json_type = {
None: None,
SQL_IS_NULL_TYPE: IS_NULL,
SQL_BOOLEAN_TYPE: BOOLEAN,
SQL_NUMBER_TYPE: NUMBER,
SQL_TIME_TYPE: TIME,
SQL_STRING_TYPE: STRING,
SQL_OBJECT_TYPE: OBJECT,
}
| mpl-2.0 |
Ziqi-Li/bknqgis | bokeh/bokeh/core/compat/bokeh_renderer.py | 6 | 21121 | "Supporting objects and functions to convert Matplotlib objects into Bokeh."
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import warnings
import matplotlib as mpl
import numpy as np
from six import string_types
from ...layouts import gridplot
from ...models import (ColumnDataSource, FactorRange, DataRange1d, DatetimeAxis, GlyphRenderer,
Grid, LinearAxis, Plot, CategoricalAxis, Legend, LegendItem)
from ...models.glyphs import (Asterisk, Circle, Cross, Diamond, InvertedTriangle,
Line, MultiLine, Patches, Square, Text, Triangle, X)
from ...plotting import DEFAULT_TOOLS
from ...plotting.helpers import _process_tools_arg
from ...util.dependencies import import_optional
from ..properties import value
from .mplexporter.renderers import Renderer
from .mpl_helpers import convert_color, convert_dashes, get_props_cycled, xkcd_line
pd = import_optional('pandas')
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class BokehRenderer(Renderer):
def __init__(self, tools, use_pd, xkcd):
"Initial setup."
self.fig = None
self.use_pd = use_pd
self.tools = tools
self.xkcd = xkcd
self.zorder = {}
self.handles = {}
def open_figure(self, fig, props):
"Get the main plot properties and create the plot."
self.width = int(props['figwidth'] * props['dpi'])
self.height = int(props['figheight'] * props['dpi'])
self.plot = Plot(x_range=DataRange1d(),
y_range=DataRange1d(),
plot_width=self.width,
plot_height=self.height)
def close_figure(self, fig):
"Complete the plot: add tools."
# Add tools
tool_objs, tools_map = _process_tools_arg(self.plot, self.tools)
self.plot.add_tools(*tool_objs)
# Simple or Grid plot setup
if len(fig.axes) <= 1:
self.fig = self.plot
self.plot.renderers.sort(key=lambda x: self.zorder.get(x._id, 0))
else:
# This list comprehension splits the plot.renderers list at the "marker"
# points returning small sublists corresponding with each subplot.
subrends = []
for i in range(1, len(self._axes)):
start, end = self._axes[i-1], self._axes[i]
subrends += [self.plot.renderers[start:end]]
plots = []
for i, axes in enumerate(fig.axes):
# create a new plot for each subplot
_plot = Plot(x_range=self.plot.x_range,
y_range=self.plot.y_range,
plot_width=self.width,
plot_height=self.height)
_plot.title.text = ''
# and add new tools
_tool_objs, _tool_map = _process_tools_arg(_plot, DEFAULT_TOOLS)
_plot.add_tools(*_tool_objs)
# clean the plot ref from axis and grids
_plot_rends = subrends[i]
for r in _plot_rends:
if not isinstance(r, GlyphRenderer):
r.plot = None
# add all the renderers into the new subplot
for r in _plot_rends:
if isinstance(r, GlyphRenderer):
_plot.renderers.append(r)
elif isinstance(r, Grid):
_plot.add_layout(r)
else:
if r in self.plot.below:
_plot.add_layout(r, 'below')
elif r in self.plot.above:
_plot.add_layout(r, 'above')
elif r in self.plot.left:
_plot.add_layout(r, 'left')
elif r in self.plot.right:
_plot.add_layout(r, 'right')
_plot.renderers.sort(key=lambda x: self.zorder.get(x._id, 0))
plots.append(_plot)
(a, b, c) = fig.axes[0].get_geometry()
p = np.array(plots)
n = np.resize(p, (a, b))
grid = gridplot(n.tolist())
self.fig = grid
def open_axes(self, ax, props):
"Get axes data and create the axes and grids"
# Get axes, title and grid into class attributes.
self.ax = ax
self.plot.title.text = ax.get_title()
# to avoid title conversion by draw_text later
#Make sure that all information about the axes are passed to the properties
if props.get('xscale', False):
props['axes'][0]['scale'] = props['xscale']
if props.get('yscale', False):
props['axes'][1]['scale'] = props['yscale']
# Add axis
for props in props['axes']:
if props['position'] == "bottom" : location, dim, thing = "below", 0, ax.xaxis
elif props['position'] == "top" : location, dim, thing = "above", 0, ax.xaxis
else: location, dim, thing = props['position'], 1, ax.yaxis
baxis = self.make_axis(thing, location, props)
if dim==0:
gridlines = ax.get_xgridlines()
else:
gridlines = ax.get_ygridlines()
if gridlines:
self.make_grid(baxis, dim, gridlines[0])
def close_axes(self, ax):
"Complete the axes adding axes-dependent plot props"
if hasattr(ax, 'get_facecolor'):
background_fill_color = convert_color(ax.get_facecolor())
else:
background_fill_color = convert_color(ax.get_axis_bgcolor())
self.plot.background_fill_color = background_fill_color
if self.xkcd:
self.plot.title.text_font = "Comic Sans MS, Textile, cursive"
self.plot.title.text_font_style = "bold"
self.plot.title.text_color = "black"
# Add a "marker" Glyph to help the plot.renderers splitting in the GridPlot build
self._axes = getattr(self, "_axes", [0])
self._axes.append(len(self.plot.renderers))
def open_legend(self, legend, props):
lgnd = Legend(location="top_right")
try:
for label, obj in zip(props['labels'], props['handles']):
lgnd.items.append(LegendItem(label=value(label), renderers=[self.handles[id(obj)]]))
self.plot.add_layout(lgnd)
except KeyError:
pass
def close_legend(self, legend):
pass
def draw_line(self, data, coordinates, style, label, mplobj=None):
"Given a mpl line2d instance create a Bokeh Line glyph."
_x = data[:, 0]
if pd and self.use_pd:
try:
x = [pd.Period(ordinal=int(i), freq=self.ax.xaxis.freq).to_timestamp() for i in _x]
except AttributeError: # we probably can make this one more intelligent later
x = _x
else:
x = _x
y = data[:, 1]
if self.xkcd:
x, y = xkcd_line(x, y)
line = Line()
source = ColumnDataSource()
line.x = source.add(x)
line.y = source.add(y)
line.line_color = convert_color(style['color'])
line.line_width = style['linewidth']
line.line_alpha = style['alpha']
line.line_dash = [] if style['dasharray'] is "none" else [int(i) for i in style['dasharray'].split(",")] # str2list(int)
# line.line_join = line2d.get_solid_joinstyle() # not in mplexporter
# line.line_cap = cap_style_map[line2d.get_solid_capstyle()] # not in mplexporter
if self.xkcd:
line.line_width = 3
r = self.plot.add_glyph(source, line)
self.zorder[r._id] = style['zorder']
self.handles[id(mplobj)] = r
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"Given a mpl line2d instance create a Bokeh Marker glyph."
x = data[:, 0]
y = data[:, 1]
marker_map = {
".": Circle,
"o": Circle,
"s": Square,
"+": Cross,
"^": Triangle,
"v": InvertedTriangle,
"x": X,
"d": Diamond,
"D": Diamond,
"*": Asterisk,
}
# Not all matplotlib markers are currently handled; fall back to Circle if we encounter an
# unhandled marker. See http://matplotlib.org/api/markers_api.html for a list of markers.
try:
marker = marker_map[style['marker']]()
except KeyError:
warnings.warn("Unable to handle marker: %s; defaulting to Circle" % style['marker'])
marker = Circle()
source = ColumnDataSource()
marker.x = source.add(x)
marker.y = source.add(y)
marker.line_color = convert_color(style['edgecolor'])
marker.fill_color = convert_color(style['facecolor'])
marker.line_width = style['edgewidth']
marker.size = style['markersize']
marker.fill_alpha = marker.line_alpha = style['alpha']
r = self.plot.add_glyph(source, marker)
self.zorder[r._id] = style['zorder']
self.handles[id(mplobj)] = r
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
warnings.warn("Path drawing has performance issues, please use mpl PathCollection instead")
pass
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
"Given a mpl PathCollection instance create a Bokeh Marker glyph."
x = offsets[:, 0]
y = offsets[:, 1]
style = styles
warnings.warn("Path marker shapes currently not handled, defaulting to Circle")
marker = Circle()
source = ColumnDataSource()
marker.x = source.add(x)
marker.y = source.add(y)
if len(style['facecolor']) > 1:
fill_color = []
for color in style['facecolor']:
# Apparently there is an issue with ColumnDataSources and rgb/a tuples, converting to hex
fill_color.append('#%02x%02x%02x' % convert_color(tuple(map(tuple,[color]))[0]))
marker.fill_color = source.add(fill_color)
else:
marker.fill_color = convert_color(tuple(map(tuple,style['facecolor']))[0])
if len(style['edgecolor']) > 1:
edge_color = []
for color in style['edgecolor']:
# Apparently there is an issue with ColumnDataSources, line_color, and rgb/a tuples, converting to hex
edge_color.append('#%02x%02x%02x' % convert_color(tuple(map(tuple,[color]))[0]))
marker.line_color = source.add(edge_color)
else:
marker.line_color = convert_color(tuple(map(tuple,style['edgecolor']))[0])
if len(style['linewidth']) > 1:
line_width = []
for width in style['linewidth']:
line_width.append(width)
marker.line_width = source.add(line_width)
else:
marker.line_width = style['linewidth'][0]
if len(mplobj.get_axes().collections) > 1:
warnings.warn("Path marker sizes support is limited and may not display as expected")
marker.size = mplobj.get_sizes()[0]/mplobj.get_axes().collections[-1].get_sizes()[0]*20
else:
marker.size = 5
marker.fill_alpha = marker.line_alpha = style['alpha']
r = self.plot.add_glyph(source, marker)
self.zorder[r._id] = style['zorder']
self.handles[id(mplobj)] = r
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"Given a mpl text instance create a Bokeh Text glyph."
# mpl give you the title and axes names as a text object (with specific locations)
# inside the plot itself. That does not make sense inside Bokeh, so we
# just skip the title and axes names from the conversion and covert any other text.
if text_type in ['xlabel', 'ylabel', 'title']:
return
if coordinates != 'data':
return
x, y = position
text = Text(x=x, y=y, text=[text])
alignment_map = {"center": "middle", "top": "top", "bottom": "bottom", "baseline": "bottom"}
# baseline not implemented in Bokeh, defaulting to bottom.
text.text_alpha = style['alpha']
text.text_font_size = "%dpx" % style['fontsize']
text.text_color = convert_color(style['color'])
text.text_align = style['halign']
text.text_baseline = alignment_map[style['valign']]
text.angle = style['rotation']
## Using get_fontname() works, but it's oftentimes not available in the browser,
## so it's better to just use the font family here.
#text.text_font = mplText.get_fontname()) not in mplexporter
#text.text_font = mplText.get_fontfamily()[0] # not in mplexporter
#text.text_font_style = fontstyle_map[mplText.get_fontstyle()] # not in mplexporter
## we don't really have the full range of font weights, but at least handle bold
#if mplText.get_weight() in ("bold", "heavy"):
#text.text_font_style = bold
source = ColumnDataSource()
r = self.plot.add_glyph(source, text)
self.zorder[r._id] = style['zorder']
self.handles[id(mplobj)] = r
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
pass
def make_axis(self, ax, location, props):
"Given a mpl axes instance, returns a Bokeh LinearAxis object."
# TODO:
# * handle log scaling
# * map `labelpad` to `major_label_standoff`
# * deal with minor ticks once BokehJS supports them
# * handle custom tick locations once that is added to bokehJS
tf = props['tickformat']
tv = props['tickvalues']
if tf and any(isinstance(x, string_types) for x in tf):
laxis = CategoricalAxis(axis_label=ax.get_label_text())
assert np.min(tv) >= 0, "Assuming categorical axis have positive-integer dump tick values"
# Seaborn position its categories on dump tick values indented to zero;
# Matplotlib does from 1. We need then different offset given the assumed identation.
offset = np.min(tv) - 1
rng = FactorRange(factors=[str(x) for x in tf], offset=offset)
if location in ["above", "below"]:
self.plot.x_range = rng
else:
self.plot.y_range = rng
else:
if props['scale'] == "linear":
laxis = LinearAxis(axis_label=ax.get_label_text())
elif props['scale'] == "date":
laxis = DatetimeAxis(axis_label=ax.get_label_text())
self.plot.add_layout(laxis, location)
# First get the label properties by getting an mpl.Text object
label = ax.get_label()
self.text_props(label, laxis, prefix="axis_label_")
# Set the tick properties (for now just turn off if necessary)
# TODO: mirror tick properties
if props['nticks'] == 0:
laxis.major_tick_line_color = None
laxis.minor_tick_line_color = None
laxis.major_label_text_color = None
# To get the tick label format, we look at the first of the tick labels
# and assume the rest are formatted similarly.
ticklabels = ax.get_ticklabels()
if ticklabels:
self.text_props(ticklabels[0], laxis, prefix="major_label_")
#newaxis.bounds = axis.get_data_interval() # I think this is the right func...
if self.xkcd:
laxis.axis_line_width = 3
laxis.axis_label_text_font = "Comic Sans MS, Textile, cursive"
laxis.axis_label_text_font_style = "bold"
laxis.axis_label_text_color = "black"
laxis.major_label_text_font = "Comic Sans MS, Textile, cursive"
laxis.major_label_text_font_style = "bold"
laxis.major_label_text_color = "black"
return laxis
def make_grid(self, baxis, dimension, gridline):
"Given a mpl axes instance, returns a Bokeh Grid object."
lgrid = Grid(dimension=dimension,
ticker=baxis.ticker,
grid_line_color=convert_color(gridline.get_color()),
grid_line_width=gridline.get_linewidth())
self.plot.add_layout(lgrid)
def make_line_collection(self, col):
"Given a mpl collection instance create a Bokeh MultiLine glyph."
xydata = col.get_segments()
t_xydata = [np.transpose(seg) for seg in xydata]
xs = [t_xydata[x][0] for x in range(len(t_xydata))]
ys = [t_xydata[x][1] for x in range(len(t_xydata))]
if self.xkcd:
xkcd_xs = [xkcd_line(xs[i], ys[i])[0] for i in range(len(xs))]
xkcd_ys = [xkcd_line(xs[i], ys[i])[1] for i in range(len(ys))]
xs = xkcd_xs
ys = xkcd_ys
multiline = MultiLine()
source = ColumnDataSource()
multiline.xs = source.add(xs)
multiline.ys = source.add(ys)
self.multiline_props(source, multiline, col)
r = self.plot.add_glyph(source, multiline)
self.zorder[r._id] = col.zorder
self.handles[id(col)] = r
def make_poly_collection(self, col):
"Given a mpl collection instance create a Bokeh Patches glyph."
xs = []
ys = []
for path in col.get_paths():
for sub_poly in path.to_polygons():
xx, yy = sub_poly.transpose()
xs.append(xx)
ys.append(yy)
patches = Patches()
source = ColumnDataSource()
patches.xs = source.add(xs)
patches.ys = source.add(ys)
self.patches_props(source, patches, col)
r = self.plot.add_glyph(source, patches)
self.zorder[r._id] = col.zorder
self.handles[id(col)] = r
def multiline_props(self, source, multiline, col):
"Takes a mpl collection object to extract and set up some Bokeh multiline properties."
colors = get_props_cycled(col, col.get_colors(), fx=lambda x: mpl.colors.rgb2hex(x))
colors = [convert_color(x) for x in colors]
widths = get_props_cycled(col, col.get_linewidth())
multiline.line_color = source.add(colors)
multiline.line_width = source.add(widths)
if col.get_alpha() is not None:
multiline.line_alpha = col.get_alpha()
offset = col.get_linestyle()[0][0]
if not col.get_linestyle()[0][1]:
on_off = []
else:
on_off = map(int,col.get_linestyle()[0][1])
multiline.line_dash_offset = convert_dashes(offset)
multiline.line_dash = list(convert_dashes(tuple(on_off)))
def patches_props(self, source, patches, col):
"Takes a mpl collection object to extract and set up some Bokeh patches properties."
face_colors = get_props_cycled(col, col.get_facecolors(), fx=lambda x: mpl.colors.rgb2hex(x))
face_colors = [convert_color(x) for x in face_colors]
patches.fill_color = source.add(face_colors)
edge_colors = get_props_cycled(col, col.get_edgecolors(), fx=lambda x: mpl.colors.rgb2hex(x))
edge_colors = [convert_color(x) for x in edge_colors]
patches.line_color = source.add(edge_colors)
widths = get_props_cycled(col, col.get_linewidth())
patches.line_width = source.add(widths)
if col.get_alpha() is not None:
patches.line_alpha = col.get_alpha()
patches.fill_alpha = col.get_alpha()
offset = col.get_linestyle()[0][0]
if not col.get_linestyle()[0][1]:
on_off = []
else:
on_off = map(int,col.get_linestyle()[0][1])
patches.line_dash_offset = convert_dashes(offset)
patches.line_dash = list(convert_dashes(tuple(on_off)))
def text_props(self, text, obj, prefix=""):
fp = text.get_font_properties()
setattr(obj, prefix+"text_font", fp.get_family()[0])
setattr(obj, prefix+"text_font_size", "%fpt" % fp.get_size_in_points())
setattr(obj, prefix+"text_font_style", fp.get_style())
| gpl-2.0 |
gkc1000/pyscf | examples/tddft/30-change_xc_grids.py | 2 | 1470 | #!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
The TDDFT calculations by default use the same XC functional, grids, _numint
schemes as the ground state DFT calculations. Different XC, grids, _numint
can be set in TDDFT.
'''
import copy
from pyscf import gto, dft, tddft
mol = gto.M(atom='N 0 0 0; N 0 0 1', basis='6-31g*')
mf = dft.RKS(mol).run(xc='pbe0')
#
# A common change for TDDFT is to use different XC functional library. For
# example, PBE0 is not supported by the default XC library (libxc) in the TDDFT
# calculation. Changing to xcfun library for TDDFT can solve this problem
#
mf._numint.libxc = dft.xcfun
# PySCF-1.6.1 and newer supports the .TDDFT method to create a TDDFT
# object after importing tdscf module.
td = mf.TDDFT()
print(td.kernel()[0] * 27.2114)
#
# Overwriting the relevant attributes of the ground state mf object,
# the TDDFT calculations can be run with different XC, grids.
#
mf.xc = 'lda,vwn'
mf.grids.set(level=2).kernel(with_non0tab=True)
td = mf.TDDFT()
print(td.kernel()[0] * 27.2114)
#
# Overwriting the ground state SCF object is unsafe. A better solution is to
# create a new fake SCF object to hold different XC, grids parameters.
#
from pyscf.dft import numint
mf = dft.RKS(mol).run(xc='pbe0')
mf1 = copy.copy(mf)
mf1.xc = 'lda,vwn'
mf1.grids = dft.Grids(mol)
mf1.grids.level = 2
mf1._numint = numint.NumInt()
mf1._numint.libxc = dft.xcfun
td = mf1.TDDFT()
print(td.kernel()[0] * 27.2114)
| apache-2.0 |
drawks/ansible | test/units/modules/network/onyx/test_onyx_buffer_pool.py | 37 | 3265 | #
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.onyx import onyx_buffer_pool
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
class TestOnyxBufferPoolModule(TestOnyxModule):
module = onyx_buffer_pool
buffer_pool_configured = False
def setUp(self):
super(TestOnyxBufferPoolModule, self).setUp()
self.mock_get_buffer_pool_config = patch.object(
onyx_buffer_pool.OnyxBufferPoolModule, "_show_traffic_pool")
self.get_buffer_pool_config = self.mock_get_buffer_pool_config.start()
self.mock_load_config = patch(
'ansible.module_utils.network.onyx.onyx.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestOnyxBufferPoolModule, self).tearDown()
self.mock_get_buffer_pool_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
buffer_pool_config_file = 'onyx_buffer_pool.cfg'
self.get_buffer_pool_config.return_value = None
if self.buffer_pool_configured is True:
buffer_pool_data = load_fixture(buffer_pool_config_file)
self.get_buffer_pool_config.return_value = buffer_pool_data
self.load_config.return_value = None
def test_buffer_pool_no_change(self):
self.buffer_pool_configured = True
set_module_args(dict(name="roce", pool_type="lossless",
memory_percent=50.0, switch_priority=3))
self.execute_module(changed=False)
def test_buffer_pool_with_change(self):
set_module_args(dict(name="roce", pool_type="lossless",
memory_percent=50.0, switch_priority=3))
commands = ["traffic pool roce type lossless",
"traffic pool roce memory percent 50.0",
"traffic pool roce map switch-priority 3"
]
self.execute_module(changed=True, commands=commands)
def test_memory_percent_with_change(self):
self.buffer_pool_configured = True
set_module_args(dict(name="roce", pool_type="lossless",
memory_percent=60.0, switch_priority=3))
commands = ["traffic pool roce memory percent 60.0"]
self.execute_module(changed=True, commands=commands)
def test_switch_priority_with_change(self):
self.buffer_pool_configured = True
set_module_args(dict(name="roce", pool_type="lossless",
memory_percent=50.0, switch_priority=5))
commands = ["traffic pool roce map switch-priority 5"]
self.execute_module(changed=True, commands=commands)
def test_pool_type_with_change(self):
self.buffer_pool_configured = True
set_module_args(dict(name="roce", memory_percent=50.0, switch_priority=3))
commands = ["traffic pool roce type lossy"]
self.execute_module(changed=True, commands=commands)
| gpl-3.0 |
lochiiconnectivity/boto | boto/s3/key.py | 4 | 76664 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Nexenta Systems Inc.
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import with_statement
import errno
import mimetypes
import os
import re
import rfc822
import StringIO
import base64
import binascii
import math
import urllib
import boto.utils
from boto.exception import BotoClientError
from boto.exception import StorageDataError
from boto.exception import PleaseRetryException
from boto.provider import Provider
from boto.s3.keyfile import KeyFile
from boto.s3.user import User
from boto import UserAgent
from boto.utils import compute_md5
from boto.utils import find_matching_headers
from boto.utils import merge_headers_by_name
try:
from hashlib import md5
except ImportError:
from md5 import md5
class Key(object):
"""
Represents a key (object) in an S3 bucket.
:ivar bucket: The parent :class:`boto.s3.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in S3.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | REDUCED_REDUNDANCY | GLACIER
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar version_id: The version ID of this object, if it is a versioned
object.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
"""
DefaultContentType = 'application/octet-stream'
RestoreBody = """<?xml version="1.0" encoding="UTF-8"?>
<RestoreRequest xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Days>%s</Days>
</RestoreRequest>"""
BufferSize = boto.config.getint('Boto', 'key_buffer_size', 8192)
# The object metadata fields a user can set, other than custom metadata
# fields (i.e., those beginning with a provider-specific prefix like
# x-amz-meta).
base_user_settable_fields = set(["cache-control", "content-disposition",
"content-encoding", "content-language",
"content-md5", "content-type"])
_underscore_base_user_settable_fields = set()
for f in base_user_settable_fields:
_underscore_base_user_settable_fields.add(f.replace('-', '_'))
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
self.metadata = {}
self.cache_control = None
self.content_type = self.DefaultContentType
self.content_encoding = None
self.content_disposition = None
self.content_language = None
self.filename = None
self.etag = None
self.is_latest = False
self.last_modified = None
self.owner = None
self.storage_class = 'STANDARD'
self.path = None
self.resp = None
self.mode = None
self.size = None
self.version_id = None
self.source_version_id = None
self.delete_marker = False
self.encrypted = None
# If the object is being restored, this attribute will be set to True.
# If the object is restored, it will be set to False. Otherwise this
# value will be None. If the restore is completed (ongoing_restore =
# False), the expiry_date will be populated with the expiry date of the
# restored object.
self.ongoing_restore = None
self.expiry_date = None
self.local_hashes = {}
def __repr__(self):
if self.bucket:
return '<Key: %s,%s>' % (self.bucket.name, self.name)
else:
return '<Key: None,%s>' % self.name
def __iter__(self):
return self
@property
def provider(self):
provider = None
if self.bucket and self.bucket.connection:
provider = self.bucket.connection.provider
return provider
def _get_key(self):
return self.name
def _set_key(self, value):
self.name = value
key = property(_get_key, _set_key);
def _get_md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
return binascii.b2a_hex(self.local_hashes['md5'])
def _set_md5(self, value):
if value:
self.local_hashes['md5'] = binascii.a2b_hex(value)
elif 'md5' in self.local_hashes:
self.local_hashes.pop('md5', None)
md5 = property(_get_md5, _set_md5);
def _get_base64md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
return binascii.b2a_base64(self.local_hashes['md5']).rstrip('\n')
def _set_base64md5(self, value):
if value:
self.local_hashes['md5'] = binascii.a2b_base64(value)
elif 'md5' in self.local_hashes:
del self.local_hashes['md5']
base64md5 = property(_get_base64md5, _set_base64md5);
def get_md5_from_hexdigest(self, md5_hexdigest):
"""
A utility function to create the 2-tuple (md5hexdigest, base64md5)
from just having a precalculated md5_hexdigest.
"""
digest = binascii.unhexlify(md5_hexdigest)
base64md5 = base64.encodestring(digest)
if base64md5[-1] == '\n':
base64md5 = base64md5[0:-1]
return (md5_hexdigest, base64md5)
def handle_encryption_headers(self, resp):
provider = self.bucket.connection.provider
if provider.server_side_encryption_header:
self.encrypted = resp.getheader(
provider.server_side_encryption_header, None)
else:
self.encrypted = None
def handle_version_headers(self, resp, force=False):
provider = self.bucket.connection.provider
# If the Key object already has a version_id attribute value, it
# means that it represents an explicit version and the user is
# doing a get_contents_*(version_id=<foo>) to retrieve another
# version of the Key. In that case, we don't really want to
# overwrite the version_id in this Key object. Comprende?
if self.version_id is None or force:
self.version_id = resp.getheader(provider.version_id, None)
self.source_version_id = resp.getheader(provider.copy_source_version_id,
None)
if resp.getheader(provider.delete_marker, 'false') == 'true':
self.delete_marker = True
else:
self.delete_marker = False
def handle_restore_headers(self, response):
header = response.getheader('x-amz-restore')
if header is None:
return
parts = header.split(',', 1)
for part in parts:
key, val = [i.strip() for i in part.split('=')]
val = val.replace('"', '')
if key == 'ongoing-request':
self.ongoing_restore = True if val.lower() == 'true' else False
elif key == 'expiry-date':
self.expiry_date = val
def handle_addl_headers(self, headers):
"""
Used by Key subclasses to do additional, provider-specific
processing of response headers. No-op for this base class.
"""
pass
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string
(ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
if self.resp == None:
self.mode = 'r'
provider = self.bucket.connection.provider
self.resp = self.bucket.connection.make_request(
'GET', self.bucket.name, self.name, headers,
query_args=query_args,
override_num_retries=override_num_retries)
if self.resp.status < 199 or self.resp.status > 299:
body = self.resp.read()
raise provider.storage_response_error(self.resp.status,
self.resp.reason, body)
response_headers = self.resp.msg
self.metadata = boto.utils.get_aws_metadata(response_headers,
provider)
for name, value in response_headers.items():
# To get correct size for Range GETs, use Content-Range
# header if one was returned. If not, use Content-Length
# header.
if (name.lower() == 'content-length' and
'Content-Range' not in response_headers):
self.size = int(value)
elif name.lower() == 'content-range':
end_range = re.sub('.*/(.*)', '\\1', value)
self.size = int(end_range)
elif name.lower() == 'etag':
self.etag = value
elif name.lower() == 'content-type':
self.content_type = value
elif name.lower() == 'content-encoding':
self.content_encoding = value
elif name.lower() == 'content-language':
self.content_language = value
elif name.lower() == 'last-modified':
self.last_modified = value
elif name.lower() == 'cache-control':
self.cache_control = value
elif name.lower() == 'content-disposition':
self.content_disposition = value
self.handle_version_headers(self.resp)
self.handle_encryption_headers(self.resp)
self.handle_addl_headers(self.resp.getheaders())
def open_write(self, headers=None, override_num_retries=None):
"""
Open this key for writing.
Not yet implemented
:type headers: dict
:param headers: Headers to pass in the write request
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying PUT.
"""
raise BotoClientError('Not Implemented')
def open(self, mode='r', headers=None, query_args=None,
override_num_retries=None):
if mode == 'r':
self.mode = 'r'
self.open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries)
elif mode == 'w':
self.mode = 'w'
self.open_write(headers=headers,
override_num_retries=override_num_retries)
else:
raise BotoClientError('Invalid mode: %s' % mode)
closed = False
def close(self, fast=False):
"""
Close this key.
:type fast: bool
:param fast: True if you want the connection to be closed without first
reading the content. This should only be used in cases where subsequent
calls don't need to return the content from the open HTTP connection.
Note: As explained at
http://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.getresponse,
callers must read the whole response before sending a new request to the
server. Calling Key.close(fast=True) and making a subsequent request to
the server will work because boto will get an httplib exception and
close/reopen the connection.
"""
if self.resp and not fast:
self.resp.read()
self.resp = None
self.mode = None
self.closed = True
def next(self):
"""
By providing a next method, the key object supports use as an iterator.
For example, you can now say:
for bytes in key:
write bytes to a file or whatever
All of the HTTP connection stuff is handled for you.
"""
self.open_read()
data = self.resp.read(self.BufferSize)
if not data:
self.close()
raise StopIteration
return data
def read(self, size=0):
self.open_read()
if size == 0:
data = self.resp.read()
else:
data = self.resp.read(size)
if not data:
self.close()
return data
def change_storage_class(self, new_storage_class, dst_bucket=None,
validate_dst_bucket=True):
"""
Change the storage class of an existing key.
Depending on whether a different destination bucket is supplied
or not, this will either move the item within the bucket, preserving
all metadata and ACL info bucket changing the storage class or it
will copy the item to the provided destination bucket, also
preserving metadata and ACL info.
:type new_storage_class: string
:param new_storage_class: The new storage class for the Key.
Possible values are:
* STANDARD
* REDUCED_REDUNDANCY
:type dst_bucket: string
:param dst_bucket: The name of a destination bucket. If not
provided the current bucket of the key will be used.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
"""
if new_storage_class == 'STANDARD':
return self.copy(self.bucket.name, self.name,
reduced_redundancy=False, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
elif new_storage_class == 'REDUCED_REDUNDANCY':
return self.copy(self.bucket.name, self.name,
reduced_redundancy=True, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
else:
raise BotoClientError('Invalid storage class: %s' %
new_storage_class)
def copy(self, dst_bucket, dst_key, metadata=None,
reduced_redundancy=False, preserve_acl=False,
encrypt_key=False, validate_dst_bucket=True):
"""
Copy this Key to another bucket.
:type dst_bucket: string
:param dst_bucket: The name of the destination bucket
:type dst_key: string
:param dst_key: The name of the destination key
:type metadata: dict
:param metadata: Metadata to be associated with new key. If
metadata is supplied, it will replace the metadata of the
source key being copied. If no metadata is supplied, the
source key's metadata will be copied to the new key.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will force the
storage class of the new Key to be REDUCED_REDUNDANCY
regardless of the storage class of the key being copied.
The Reduced Redundancy Storage (RRS) feature of S3,
provides lower redundancy at lower storage cost.
:type preserve_acl: bool
:param preserve_acl: If True, the ACL from the source key will
be copied to the destination key. If False, the
destination key will have the default ACL. Note that
preserving the ACL in the new key object will require two
additional API calls to S3, one to retrieve the current
ACL and one to set that ACL on the new object. If you
don't care about the ACL, a value of False will be
significantly more efficient.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
"""
dst_bucket = self.bucket.connection.lookup(dst_bucket,
validate_dst_bucket)
if reduced_redundancy:
storage_class = 'REDUCED_REDUNDANCY'
else:
storage_class = self.storage_class
return dst_bucket.copy_key(dst_key, self.bucket.name,
self.name, metadata,
storage_class=storage_class,
preserve_acl=preserve_acl,
encrypt_key=encrypt_key)
def startElement(self, name, attrs, connection):
if name == 'Owner':
self.owner = User(self)
return self.owner
else:
return None
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'ETag':
self.etag = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
else:
setattr(self, name, value)
def exists(self):
"""
Returns True if the key exists
:rtype: bool
:return: Whether the key exists on S3
"""
return bool(self.bucket.lookup(self.name))
def delete(self):
"""
Delete this key from S3
"""
return self.bucket.delete_key(self.name, version_id=self.version_id)
def get_metadata(self, name):
return self.metadata.get(name)
def set_metadata(self, name, value):
self.metadata[name] = value
def update_metadata(self, d):
self.metadata.update(d)
# convenience methods for setting/getting ACL
def set_acl(self, acl_str, headers=None):
if self.bucket != None:
self.bucket.set_acl(acl_str, self.name, headers=headers)
def get_acl(self, headers=None):
if self.bucket != None:
return self.bucket.get_acl(self.name, headers=headers)
def get_xml_acl(self, headers=None):
if self.bucket != None:
return self.bucket.get_xml_acl(self.name, headers=headers)
def set_xml_acl(self, acl_str, headers=None):
if self.bucket != None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers)
def set_canned_acl(self, acl_str, headers=None):
return self.bucket.set_canned_acl(acl_str, self.name, headers)
def get_redirect(self):
"""Return the redirect location configured for this key.
If no redirect is configured (via set_redirect), then None
will be returned.
"""
response = self.bucket.connection.make_request(
'HEAD', self.bucket.name, self.name)
if response.status == 200:
return response.getheader('x-amz-website-redirect-location')
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def set_redirect(self, redirect_location, headers=None):
"""Configure this key to redirect to another location.
When the bucket associated with this key is accessed from the website
endpoint, a 301 redirect will be issued to the specified
`redirect_location`.
:type redirect_location: string
:param redirect_location: The location to redirect.
"""
if headers is None:
headers = {}
else:
headers = headers.copy()
headers['x-amz-website-redirect-location'] = redirect_location
response = self.bucket.connection.make_request('PUT', self.bucket.name,
self.name, headers)
if response.status == 200:
return True
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def make_public(self, headers=None):
return self.bucket.set_canned_acl('public-read', self.name, headers)
def generate_url(self, expires_in, method='GET', headers=None,
query_auth=True, force_http=False, response_headers=None,
expires_in_absolute=False, version_id=None,
policy=None, reduced_redundancy=False, encrypt_key=False):
"""
Generate a URL to access this key.
:type expires_in: int
:param expires_in: How long the url is valid for, in seconds
:type method: string
:param method: The method to use for retrieving the file
(default is GET)
:type headers: dict
:param headers: Any headers to pass along in the request
:type query_auth: bool
:param query_auth:
:type force_http: bool
:param force_http: If True, http will be used instead of https.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type expires_in_absolute: bool
:param expires_in_absolute:
:type version_id: string
:param version_id: The version_id of the object to GET. If specified
this overrides any value in the key.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:rtype: string
:return: The URL to access the key
"""
provider = self.bucket.connection.provider
version_id = version_id or self.version_id
if headers is None:
headers = {}
else:
headers = headers.copy()
# add headers accordingly (usually PUT case)
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
return self.bucket.connection.generate_url(expires_in, method,
self.bucket.name, self.name,
headers, query_auth,
force_http,
response_headers,
expires_in_absolute,
version_id)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None):
"""
Upload a file to a key into a bucket on S3.
:type fp: file
:param fp: The file pointer to upload. The file pointer must
point point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
:type query_args: string
:param query_args: (optional) Arguments to pass in the query string.
:type chunked_transfer: boolean
:param chunked_transfer: (optional) If true, we use chunked
Transfer-Encoding.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
def _send_file_internal(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None,
hash_algs=None):
provider = self.bucket.connection.provider
try:
spos = fp.tell()
except IOError:
spos = None
self.read_from_stream = False
# If hash_algs is unset and the MD5 hasn't already been computed,
# default to an MD5 hash_alg to hash the data on-the-fly.
if hash_algs is None and not self.md5:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
def sender(http_conn, method, path, data, headers):
# This function is called repeatedly for temporary retries
# so we must be sure the file pointer is pointing at the
# start of the data.
if spos is not None and spos != fp.tell():
fp.seek(spos)
elif spos is None and self.read_from_stream:
# if seek is not supported, and we've read from this
# stream already, then we need to abort retries to
# avoid setting bad data.
raise provider.storage_data_error(
'Cannot retry failed request. fp does not support seeking.')
http_conn.putrequest(method, path)
for key in headers:
http_conn.putheader(key, headers[key])
http_conn.endheaders()
save_debug = self.bucket.connection.debug
self.bucket.connection.debug = 0
# If the debuglevel < 4 we don't want to show connection
# payload, so turn off HTTP connection-level debug output (to
# be restored below).
# Use the getattr approach to allow this to work in AppEngine.
if getattr(http_conn, 'debuglevel', 0) < 4:
http_conn.set_debuglevel(0)
data_len = 0
if cb:
if size:
cb_size = size
elif self.size:
cb_size = self.size
else:
cb_size = 0
if chunked_transfer and cb_size == 0:
# For chunked Transfer, we call the cb for every 1MB
# of data transferred, except when we know size.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(
math.ceil(cb_size / self.BufferSize / (num_cb - 1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
bytes_togo = size
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if spos is None:
# read at least something from a non-seekable fp.
self.read_from_stream = True
while chunk:
chunk_len = len(chunk)
data_len += chunk_len
if chunked_transfer:
http_conn.send('%x;\r\n' % chunk_len)
http_conn.send(chunk)
http_conn.send('\r\n')
else:
http_conn.send(chunk)
for alg in digesters:
digesters[alg].update(chunk)
if bytes_togo:
bytes_togo -= chunk_len
if bytes_togo <= 0:
break
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
self.size = data_len
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if chunked_transfer:
http_conn.send('0\r\n')
# http_conn.send("Content-MD5: %s\r\n" % self.base64md5)
http_conn.send('\r\n')
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
http_conn.set_debuglevel(save_debug)
self.bucket.connection.debug = save_debug
response = http_conn.getresponse()
body = response.read()
if not self.should_retry(response, chunked_transfer):
raise provider.storage_response_error(
response.status, response.reason, body)
return response
if not headers:
headers = {}
else:
headers = headers.copy()
# Overwrite user-supplied user-agent.
for header in find_matching_headers('User-Agent', headers):
del headers[header]
headers['User-Agent'] = UserAgent
if self.storage_class != 'STANDARD':
headers[provider.storage_class_header] = self.storage_class
if find_matching_headers('Content-Encoding', headers):
self.content_encoding = merge_headers_by_name(
'Content-Encoding', headers)
if find_matching_headers('Content-Language', headers):
self.content_language = merge_headers_by_name(
'Content-Language', headers)
content_type_headers = find_matching_headers('Content-Type', headers)
if content_type_headers:
# Some use cases need to suppress sending of the Content-Type
# header and depend on the receiving server to set the content
# type. This can be achieved by setting headers['Content-Type']
# to None when calling this method.
if (len(content_type_headers) == 1 and
headers[content_type_headers[0]] is None):
# Delete null Content-Type value to skip sending that header.
del headers[content_type_headers[0]]
else:
self.content_type = merge_headers_by_name(
'Content-Type', headers)
elif self.path:
self.content_type = mimetypes.guess_type(self.path)[0]
if self.content_type == None:
self.content_type = self.DefaultContentType
headers['Content-Type'] = self.content_type
else:
headers['Content-Type'] = self.content_type
if self.base64md5:
headers['Content-MD5'] = self.base64md5
if chunked_transfer:
headers['Transfer-Encoding'] = 'chunked'
#if not self.base64md5:
# headers['Trailer'] = "Content-MD5"
else:
headers['Content-Length'] = str(self.size)
headers['Expect'] = '100-Continue'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
resp = self.bucket.connection.make_request(
'PUT',
self.bucket.name,
self.name,
headers,
sender=sender,
query_args=query_args
)
self.handle_version_headers(resp, force=True)
self.handle_addl_headers(resp.getheaders())
def should_retry(self, response, chunked_transfer=False):
provider = self.bucket.connection.provider
if not chunked_transfer:
if response.status in [500, 503]:
# 500 & 503 can be plain retries.
return True
if response.getheader('location'):
# If there's a redirect, plain retry.
return True
if 200 <= response.status <= 299:
self.etag = response.getheader('etag')
if self.etag != '"%s"' % self.md5:
raise provider.storage_data_error(
'ETag from S3 did not match computed MD5')
return True
if response.status == 400:
# The 400 must be trapped so the retry handler can check to
# see if it was a timeout.
# If ``RequestTimeout`` is present, we'll retry. Otherwise, bomb
# out.
body = response.read()
err = provider.storage_response_error(
response.status,
response.reason,
body
)
if err.error_code in ['RequestTimeout']:
raise PleaseRetryException(
"Saw %s, retrying" % err.error_code,
response=response
)
return False
def compute_md5(self, fp, size=None):
"""
:type fp: file
:param fp: File pointer to the file to MD5 hash. The file
pointer will be reset to the same position before the
method returns.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
in place into different parts. Less bytes may be available.
"""
hex_digest, b64_digest, data_size = compute_md5(fp, size=size)
# Returned values are MD5 hash, base64 encoded MD5 hash, and data size.
# The internal implementation of compute_md5() needs to return the
# data size but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code) so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = data_size
return (hex_digest, b64_digest)
def set_contents_from_stream(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None,
reduced_redundancy=False, query_args=None,
size=None):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the
Content-Size and Content-MD5 in the header. So for huge
uploads, the delay in calculating MD5 is avoided but with a
penalty of inability to verify the integrity of the uploaded
data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter, this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
file in multiple parts where you are splitting the file up
into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
provider = self.bucket.connection.provider
if not provider.supports_chunked_transfer():
raise BotoClientError('%s does not support chunked transfer'
% provider.get_provider_name())
# Name of the Object should be specified explicitly for Streams.
if not self.name or self.name == '':
raise BotoClientError('Cannot determine the destination '
'object name for the given stream')
if headers is None:
headers = {}
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if self.bucket != None:
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers, cb, num_cb, query_args,
chunked_transfer=True, size=size)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False, query_args=None,
encrypt_key=False, size=None, rewind=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file pointed to by 'fp' as the
contents. The data is read from 'fp' from its current position until
'size' bytes have been read or EOF.
:type fp: file
:param fp: the file whose contents to upload
:type headers: dict
:param headers: Additional HTTP headers that will be sent with
the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will
first check to see if an object exists in the bucket with
the same key. If it does, it won't overwrite it. The
default value is True which will overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type rewind: bool
:param rewind: (optional) If True, the file pointer (fp) will
be rewound to the start before any bytes are read from
it. The default behaviour is False which reads from the
current position of the file pointer (fp).
:rtype: int
:return: The number of bytes written to the key.
"""
provider = self.bucket.connection.provider
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
if rewind:
# caller requests reading from beginning of fp.
fp.seek(0, os.SEEK_SET)
else:
# The following seek/tell/seek logic is intended
# to detect applications using the older interface to
# set_contents_from_file(), which automatically rewound the
# file each time the Key was reused. This changed with commit
# 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
# split into multiple parts and uploaded in parallel, and at
# the time of that commit this check was added because otherwise
# older programs would get a success status and upload an empty
# object. Unfortuantely, it's very inefficient for fp's implemented
# by KeyFile (used, for example, by gsutil when copying between
# providers). So, we skip the check for the KeyFile case.
# TODO: At some point consider removing this seek/tell/seek
# logic, after enough time has passed that it's unlikely any
# programs remain that assume the older auto-rewind interface.
if not isinstance(fp, KeyFile):
spos = fp.tell()
fp.seek(0, os.SEEK_END)
if fp.tell() == spos:
fp.seek(0, os.SEEK_SET)
if fp.tell() != spos:
# Raise an exception as this is likely a programming
# error whereby there is data before the fp but nothing
# after it.
fp.seek(spos)
raise AttributeError('fp is at EOF. Use rewind option '
'or seek() to data start.')
# seek back to the correct position.
fp.seek(spos)
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
# TODO - What if provider doesn't support reduced reduncancy?
# What if different providers provide different classes?
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket != None:
if not md5 and provider.supports_chunked_transfer():
# defer md5 calculation to on the fly and
# we don't know anything about size yet.
chunked_transfer = True
self.size = None
else:
chunked_transfer = False
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
size = key.size - fp.tell()
self.size = size
# At present both GCS and S3 use MD5 for the etag for
# non-multipart-uploaded objects. If the etag is 32 hex
# chars use it as an MD5, to avoid having to read the file
# twice while transferring.
if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
etag = key.etag.strip('"')
md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
if not md5:
# compute_md5() and also set self.size to actual
# size of the bytes read computing the md5.
md5 = self.compute_md5(fp, size)
# adjust size if required
size = self.size
elif size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name == None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
# return number of bytes written.
return self.size
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto S3
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file
if it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost. :type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object
will be encrypted on the server-side by S3 and will be
stored in an encrypted form while at rest in S3.
:rtype: int
:return: The number of bytes written to the key.
"""
with open(filename, 'rb') as fp:
return self.set_contents_from_file(fp, headers, replace, cb,
num_cb, policy, md5,
reduced_redundancy,
encrypt_key=encrypt_key)
def set_contents_from_string(self, s, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
"""
if isinstance(s, unicode):
s = s.encode("utf-8")
fp = StringIO.StringIO(s)
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key)
fp.close()
return r
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None):
"""
Retrieves a file from an S3 Key
:type fp: file
:param fp: File pointer to put the data into
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
torrent=torrent, version_id=version_id,
override_num_retries=override_num_retries,
response_headers=response_headers,
hash_algs=None,
query_args=None)
def _get_file_internal(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, hash_algs=None, query_args=None):
if headers is None:
headers = {}
save_debug = self.bucket.connection.debug
if self.bucket.connection.debug == 1:
self.bucket.connection.debug = 0
query_args = query_args or []
if torrent:
query_args.append('torrent')
if hash_algs is None and not torrent:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
# If a version_id is passed in, use that. If not, check to see
# if the Key object has an explicit version_id and, if so, use that.
# Otherwise, don't pass a version_id query param.
if version_id is None:
version_id = self.version_id
if version_id:
query_args.append('versionId=%s' % version_id)
if response_headers:
for key in response_headers:
query_args.append('%s=%s' % (
key, urllib.quote(response_headers[key])))
query_args = '&'.join(query_args)
self.open('r', headers, query_args=query_args,
override_num_retries=override_num_retries)
data_len = 0
if cb:
if self.size is None:
cb_size = 0
else:
cb_size = self.size
if self.size is None and num_cb != -1:
# If size is not available due to chunked transfer for example,
# we'll call the cb for every 1MB of data transferred.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(math.ceil(cb_size/self.BufferSize/(num_cb-1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
try:
for bytes in self:
fp.write(bytes)
data_len += len(bytes)
for alg in digesters:
digesters[alg].update(bytes)
if cb:
if cb_size > 0 and data_len >= cb_size:
break
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
except IOError, e:
if e.errno == errno.ENOSPC:
raise StorageDataError('Out of space for destination file '
'%s' % fp.name)
raise
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if self.size is None and not torrent and "Range" not in headers:
self.size = data_len
self.close()
self.bucket.connection.debug = save_debug
def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10):
"""
Get a torrent file (see to get_file)
:type fp: file
:param fp: The file pointer of where to put the torrent
:type headers: dict
:param headers: Headers to be passed
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
"""
return self.get_file(fp, headers, cb, num_cb, torrent=True)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
if self.bucket != None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
def get_contents_to_filename(self, filename, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Store contents of the object to a file named by 'filename'.
See get_contents_to_file method for details about the
parameters.
:type filename: string
:param filename: The filename of where to put the file contents
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
fp = open(filename, 'wb')
try:
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
res_download_handler=res_download_handler,
response_headers=response_headers)
except Exception:
os.remove(filename)
raise
finally:
fp.close()
# if last_modified date was sent from s3, try to set file's timestamp
if self.last_modified != None:
try:
modified_tuple = rfc822.parsedate_tz(self.last_modified)
modified_stamp = int(rfc822.mktime_tz(modified_tuple))
os.utime(fp.name, (modified_stamp, modified_stamp))
except Exception:
pass
def get_contents_as_string(self, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Return the contents of the object as a string.
See get_contents_to_file method for details about the
parameters.
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:rtype: string
:returns: The contents of the file as a string
"""
fp = StringIO.StringIO()
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
return fp.getvalue()
def add_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email grant
to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the AWS
account your are granting the permission to.
:type recursive: boolean
:param recursive: A boolean value to controls whether the
command will apply the grant to all keys within the bucket
or not. The default value is False. By passing a True
value, the call will iterate through all keys in the
bucket and apply the same grant to each key. CAUTION: If
you have a lot of keys, this could take a long time!
"""
policy = self.get_acl(headers=headers)
policy.acl.add_email_grant(permission, email_address)
self.set_acl(policy, headers=headers)
def add_user_grant(self, permission, user_id, headers=None,
display_name=None):
"""
Convenience method that provides a quick way to add a canonical
user grant to a key. This method retrieves the current ACL,
creates a new grant based on the parameters passed in, adds that
grant to the ACL and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type user_id: string
:param user_id: The canonical user id associated with the AWS
account your are granting the permission to.
:type display_name: string
:param display_name: An option string containing the user's
Display Name. Only required on Walrus.
"""
policy = self.get_acl(headers=headers)
policy.acl.add_user_grant(permission, user_id,
display_name=display_name)
self.set_acl(policy, headers=headers)
def _normalize_metadata(self, metadata):
if type(metadata) == set:
norm_metadata = set()
for k in metadata:
norm_metadata.add(k.lower())
else:
norm_metadata = {}
for k in metadata:
norm_metadata[k.lower()] = metadata[k]
return norm_metadata
def _get_remote_metadata(self, headers=None):
"""
Extracts metadata from existing URI into a dict, so we can
overwrite/delete from it to form the new set of metadata to apply to a
key.
"""
metadata = {}
for underscore_name in self._underscore_base_user_settable_fields:
if hasattr(self, underscore_name):
value = getattr(self, underscore_name)
if value:
# Generate HTTP field name corresponding to "_" named field.
field_name = underscore_name.replace('_', '-')
metadata[field_name.lower()] = value
# self.metadata contains custom metadata, which are all user-settable.
prefix = self.provider.metadata_prefix
for underscore_name in self.metadata:
field_name = underscore_name.replace('_', '-')
metadata['%s%s' % (prefix, field_name.lower())] = (
self.metadata[underscore_name])
return metadata
def set_remote_metadata(self, metadata_plus, metadata_minus, preserve_acl,
headers=None):
metadata_plus = self._normalize_metadata(metadata_plus)
metadata_minus = self._normalize_metadata(metadata_minus)
metadata = self._get_remote_metadata()
metadata.update(metadata_plus)
for h in metadata_minus:
if h in metadata:
del metadata[h]
src_bucket = self.bucket
# Boto prepends the meta prefix when adding headers, so strip prefix in
# metadata before sending back in to copy_key() call.
rewritten_metadata = {}
for h in metadata:
if (h.startswith('x-goog-meta-') or h.startswith('x-amz-meta-')):
rewritten_h = (h.replace('x-goog-meta-', '')
.replace('x-amz-meta-', ''))
else:
rewritten_h = h
rewritten_metadata[rewritten_h] = metadata[h]
metadata = rewritten_metadata
src_bucket.copy_key(self.name, self.bucket.name, self.name,
metadata=metadata, preserve_acl=preserve_acl,
headers=headers)
def restore(self, days, headers=None):
"""Restore an object from an archive.
:type days: int
:param days: The lifetime of the restored object (must
be at least 1 day). If the object is already restored
then this parameter can be used to readjust the lifetime
of the restored object. In this case, the days
param is with respect to the initial time of the request.
If the object has not been restored, this param is with
respect to the completion time of the request.
"""
response = self.bucket.connection.make_request(
'POST', self.bucket.name, self.name,
data=self.RestoreBody % days,
headers=headers, query_args='restore')
if response.status not in (200, 202):
provider = self.bucket.connection.provider
raise provider.storage_response_error(response.status,
response.reason,
response.read())
| mit |
andris210296/andris-projeto | backend/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/compat.py | 2943 | 1157 | ######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
| mit |
broganross/kivy_tests | pong/paint.py | 1 | 1113 |
from random import random
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.widget import Widget
from kivy.graphics import Color
from kivy.graphics import Ellipse
from kivy.graphics import Line
class PaintWidget(Widget):
def on_touch_down(self, touch):
color = (random(), random(), random())
with self.canvas:
Color(*color, mode="hsv")
d = 30.0
Ellipse(pos=(touch.x - d/2, touch.y - d/2), size=(d,d))
touch.ud["line"] = Line(points=(touch.x, touch.y), width=d)
def on_touch_move(self, touch):
touch.ud["line"].points += [touch.x, touch.y]
class PaintApp(App):
def build(self):
parent = Widget()
painter = PaintWidget()
clearbtn = Button(text="clear")
parent.add_widget(painter)
parent.add_widget(clearbtn)
def clear_canvas(obj):
painter.canvas.clear()
clearbtn.bind(on_release=clear_canvas)
return parent
if __name__ == "__main__":
PaintApp().run() | mit |
lamdnhan/osf.io | website/project/__init__.py | 4 | 3835 | # -*- coding: utf-8 -*-
import uuid
from .model import Node, PrivateLink
from framework.forms.utils import sanitize
from framework.mongo.utils import from_mongo
from modularodm import Q
from website.exceptions import NodeStateError
def show_diff(seqm):
"""Unify operations between two compared strings
seqm is a difflib.SequenceMatcher instance whose a & b are strings"""
output = []
insert_el = '<span style="background:#4AA02C; font-size:1.5em; ">'
ins_el_close = '</span>'
del_el = '<span style="background:#D16587; font-size:1.5em;">'
del_el_close = '</span>'
for opcode, a0, a1, b0, b1 in seqm.get_opcodes():
content_a = sanitize(seqm.a[a0:a1])
content_b = sanitize(seqm.b[b0:b1])
if opcode == 'equal':
output.append(content_a)
elif opcode == 'insert':
output.append(insert_el + content_b + ins_el_close)
elif opcode == 'delete':
output.append(del_el + content_a + del_el_close)
elif opcode == 'replace':
output.append(del_el + content_a + del_el_close + insert_el + content_b + ins_el_close)
else:
raise RuntimeError("unexpected opcode")
return ''.join(output)
# TODO: This should be a class method of Node
def new_node(category, title, user, description=None, project=None):
"""Create a new project or component.
:param str category: Node category
:param str title: Node title
:param User user: User object
:param str description: Node description
:param Node project: Optional parent object
:return Node: Created node
"""
category = category.strip().lower()
title = sanitize(title.strip())
if description:
description = sanitize(description.strip())
node = Node(
title=title,
category=category,
creator=user,
description=description,
project=project,
)
node.save()
return node
def new_dashboard(user):
"""Create a new dashboard project.
:param User user: User object
:return Node: Created node
"""
existing_dashboards = user.node__contributed.find(
Q('category', 'eq', 'project') &
Q('is_dashboard', 'eq', True)
)
if existing_dashboards.count() > 0:
raise NodeStateError("Users may only have one dashboard")
node = Node(
title='Dashboard',
creator=user,
category='project',
is_dashboard=True,
is_folder=True
)
node.save()
return node
def new_folder(title, user):
"""Create a new folder project.
:param str title: Node title
:param User user: User object
:return Node: Created node
"""
title = sanitize(title.strip())
node = Node(
title=title,
creator=user,
category='project',
is_folder=True
)
node.save()
return node
def new_private_link(name, user, nodes, anonymous):
"""Create a new private link.
:param str name: private link name
:param User user: User object
:param list Node node: a list of node object
:param bool anonymous: make link anonymous or not
:return PrivateLink: Created private link
"""
key = str(uuid.uuid4()).replace("-", "")
if name:
name = sanitize(name.strip())
else:
name = "Shared project link"
private_link = PrivateLink(
key=key,
name=name,
creator=user,
nodes=nodes,
anonymous=anonymous
)
private_link.save()
return private_link
template_name_replacements = {
('.txt', ''),
('_', ' '),
}
def clean_template_name(template_name):
template_name = from_mongo(template_name)
for replacement in template_name_replacements:
template_name = template_name.replace(*replacement)
return template_name
| apache-2.0 |
joshblum/django-with-audit | tests/regressiontests/utils/checksums.py | 246 | 1098 | import unittest
from django.utils import checksums
class TestUtilsChecksums(unittest.TestCase):
def check_output(self, function, value, output=None):
"""
Check that function(value) equals output. If output is None,
check that function(value) equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_luhn(self):
f = checksums.luhn
items = (
(4111111111111111, True), ('4111111111111111', True),
(4222222222222, True), (378734493671000, True),
(5424000000000015, True), (5555555555554444, True),
(1008, True), ('0000001008', True), ('000000001008', True),
(4012888888881881, True), (1234567890123456789012345678909, True),
(4111111111211111, False), (42222222222224, False),
(100, False), ('100', False), ('0000100', False),
('abc', False), (None, False), (object(), False),
)
for value, output in items:
self.check_output(f, value, output)
| bsd-3-clause |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/scipy/sparse/linalg/isolve/lgmres.py | 2 | 10464 | # Copyright (C) 2009, Pauli Virtanen <pav@iki.fi>
# Distributed under the same license as Scipy.
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.lib.six.moves import xrange
from scipy.linalg import get_blas_funcs
from .utils import make_system
__all__ = ['lgmres']
def norm2(q):
q = np.asarray(q)
nrm2 = get_blas_funcs('nrm2', dtype=q.dtype)
return nrm2(q)
def lgmres(A, b, x0=None, tol=1e-5, maxiter=1000, M=None, callback=None,
inner_m=30, outer_k=3, outer_v=None, store_outer_Av=True):
"""
Solve a matrix equation using the LGMRES algorithm.
The LGMRES algorithm [BJM]_ [BPh]_ is designed to avoid some problems
in the convergence in restarted GMRES, and often converges in fewer
iterations.
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
The real or complex N-by-N matrix of the linear system.
b : {array, matrix}
Right hand side of the linear system. Has shape (N,) or (N,1).
x0 : {array, matrix}
Starting guess for the solution.
tol : float
Tolerance to achieve. The algorithm terminates when either the relative
or the absolute residual is below `tol`.
maxiter : int
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, dense matrix, LinearOperator}
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
inner_m : int, optional
Number of inner GMRES iterations per each outer iteration.
outer_k : int, optional
Number of vectors to carry between inner GMRES iterations.
According to [BJM]_, good values are in the range of 1...3.
However, note that if you want to use the additional vectors to
accelerate solving multiple similar problems, larger values may
be beneficial.
outer_v : list of tuples, optional
List containing tuples ``(v, Av)`` of vectors and corresponding
matrix-vector products, used to augment the Krylov subspace, and
carried between inner GMRES iterations. The element ``Av`` can
be `None` if the matrix-vector product should be re-evaluated.
This parameter is modified in-place by `lgmres`, and can be used
to pass "guess" vectors in and out of the algorithm when solving
similar problems.
store_outer_Av : bool, optional
Whether LGMRES should store also A*v in addition to vectors `v`
in the `outer_v` list. Default is True.
Returns
-------
x : array or matrix
The converged solution.
info : int
Provides convergence information:
- 0 : successful exit
- >0 : convergence to tolerance not achieved, number of iterations
- <0 : illegal input or breakdown
Notes
-----
The LGMRES algorithm [BJM]_ [BPh]_ is designed to avoid the
slowing of convergence in restarted GMRES, due to alternating
residual vectors. Typically, it often outperforms GMRES(m) of
comparable memory requirements by some measure, or at least is not
much worse.
Another advantage in this algorithm is that you can supply it with
'guess' vectors in the `outer_v` argument that augment the Krylov
subspace. If the solution lies close to the span of these vectors,
the algorithm converges faster. This can be useful if several very
similar matrices need to be inverted one after another, such as in
Newton-Krylov iteration where the Jacobian matrix often changes
little in the nonlinear steps.
References
----------
.. [BJM] A.H. Baker and E.R. Jessup and T. Manteuffel,
SIAM J. Matrix Anal. Appl. 26, 962 (2005).
.. [BPh] A.H. Baker, PhD thesis, University of Colorado (2003).
http://amath.colorado.edu/activities/thesis/allisonb/Thesis.ps
"""
from scipy.linalg.basic import lstsq
A,M,x,b,postprocess = make_system(A,M,x0,b)
if not np.isfinite(b).all():
raise ValueError("RHS must contain only finite numbers")
matvec = A.matvec
psolve = M.matvec
if outer_v is None:
outer_v = []
axpy, dot, scal = None, None, None
b_norm = norm2(b)
if b_norm == 0:
b_norm = 1
for k_outer in xrange(maxiter):
r_outer = matvec(x) - b
# -- callback
if callback is not None:
callback(x)
# -- determine input type routines
if axpy is None:
if np.iscomplexobj(r_outer) and not np.iscomplexobj(x):
x = x.astype(r_outer.dtype)
axpy, dot, scal = get_blas_funcs(['axpy', 'dot', 'scal'],
(x, r_outer))
# -- check stopping condition
r_norm = norm2(r_outer)
if r_norm < tol * b_norm or r_norm < tol:
break
# -- inner LGMRES iteration
vs0 = -psolve(r_outer)
inner_res_0 = norm2(vs0)
if inner_res_0 == 0:
rnorm = norm2(r_outer)
raise RuntimeError("Preconditioner returned a zero vector; "
"|v| ~ %.1g, |M v| = 0" % rnorm)
vs0 = scal(1.0/inner_res_0, vs0)
hs = []
vs = [vs0]
ws = []
y = None
for j in xrange(1, 1 + inner_m + len(outer_v)):
# -- Arnoldi process:
#
# Build an orthonormal basis V and matrices W and H such that
# A W = V H
# Columns of W, V, and H are stored in `ws`, `vs` and `hs`.
#
# The first column of V is always the residual vector, `vs0`;
# V has *one more column* than the other of the three matrices.
#
# The other columns in V are built by feeding in, one
# by one, some vectors `z` and orthonormalizing them
# against the basis so far. The trick here is to
# feed in first some augmentation vectors, before
# starting to construct the Krylov basis on `v0`.
#
# It was shown in [BJM]_ that a good choice (the LGMRES choice)
# for these augmentation vectors are the `dx` vectors obtained
# from a couple of the previous restart cycles.
#
# Note especially that while `vs0` is always the first
# column in V, there is no reason why it should also be
# the first column in W. (In fact, below `vs0` comes in
# W only after the augmentation vectors.)
#
# The rest of the algorithm then goes as in GMRES, one
# solves a minimization problem in the smaller subspace
# spanned by W (range) and V (image).
#
# XXX: Below, I'm lazy and use `lstsq` to solve the
# small least squares problem. Performance-wise, this
# is in practice acceptable, but it could be nice to do
# it on the fly with Givens etc.
#
# ++ evaluate
v_new = None
if j < len(outer_v) + 1:
z, v_new = outer_v[j-1]
elif j == len(outer_v) + 1:
z = vs0
else:
z = vs[-1]
if v_new is None:
v_new = psolve(matvec(z))
else:
# Note: v_new is modified in-place below. Must make a
# copy to ensure that the outer_v vectors are not
# clobbered.
v_new = v_new.copy()
# ++ orthogonalize
hcur = []
for v in vs:
alpha = dot(v, v_new)
hcur.append(alpha)
v_new = axpy(v, v_new, v.shape[0], -alpha) # v_new -= alpha*v
hcur.append(norm2(v_new))
if hcur[-1] == 0:
# Exact solution found; bail out.
# Zero basis vector (v_new) in the least-squares problem
# does no harm, so we can just use the same code as usually;
# it will give zero (inner) residual as a result.
bailout = True
else:
bailout = False
v_new = scal(1.0/hcur[-1], v_new)
vs.append(v_new)
hs.append(hcur)
ws.append(z)
# XXX: Ugly: should implement the GMRES iteration properly,
# with Givens rotations and not using lstsq. Instead, we
# spare some work by solving the LSQ problem only every 5
# iterations.
if not bailout and j % 5 != 1 and j < inner_m + len(outer_v) - 1:
continue
# -- GMRES optimization problem
hess = np.zeros((j+1, j), x.dtype)
e1 = np.zeros((j+1,), x.dtype)
e1[0] = inner_res_0
for q in xrange(j):
hess[:(q+2),q] = hs[q]
y, resids, rank, s = lstsq(hess, e1)
inner_res = norm2(np.dot(hess, y) - e1)
# -- check for termination
if inner_res < tol * inner_res_0:
break
# -- GMRES terminated: eval solution
dx = ws[0]*y[0]
for w, yc in zip(ws[1:], y[1:]):
dx = axpy(w, dx, dx.shape[0], yc) # dx += w*yc
# -- Store LGMRES augmentation vectors
nx = norm2(dx)
if store_outer_Av:
q = np.dot(hess, y)
ax = vs[0]*q[0]
for v, qc in zip(vs[1:], q[1:]):
ax = axpy(v, ax, ax.shape[0], qc)
outer_v.append((dx/nx, ax/nx))
else:
outer_v.append((dx/nx, None))
# -- Retain only a finite number of augmentation vectors
while len(outer_v) > outer_k:
del outer_v[0]
# -- Apply step
x += dx
else:
# didn't converge ...
return postprocess(x), maxiter
return postprocess(x), 0
| gpl-3.0 |
translate/pootle | pootle/apps/pootle_misc/forms.py | 5 | 3996 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from django import forms
from django.core.validators import EMPTY_VALUES
from django.forms.models import ModelChoiceIterator
from pootle.i18n.gettext import ugettext_lazy as _
class GroupedModelChoiceIterator(ModelChoiceIterator):
def __init__(self, field):
self.field = field
self.choice_groups = field.choice_groups
def __iter__(self):
if self.field.empty_label is not None:
yield (u'', self.field.empty_label)
for title, queryset in self.choice_groups:
if title is not None:
yield (title, [self.choice(choice) for choice in queryset])
else:
for choice in queryset:
yield self.choice(choice)
class GroupedModelChoiceField(forms.ModelChoiceField):
"""A `ModelChoiceField` with grouping capabilities.
:param choice_groups: List of tuples including the `title` and `queryset` of
each individual choice group.
"""
def __init__(self, choice_groups, *args, **kwargs):
self.choice_groups = choice_groups
super(GroupedModelChoiceField, self).__init__(*args, **kwargs)
def _get_choices(self):
if hasattr(self, '_choices'):
return self._choices
return GroupedModelChoiceIterator(self)
choices = property(_get_choices, forms.ModelChoiceField._set_choices)
class LiberalModelChoiceField(forms.ModelChoiceField):
"""ModelChoiceField that doesn't complain about choices not present in the
queryset.
This is essentially a hack for admin pages. to be able to exclude currently
used choices from dropdowns without failing validation.
"""
def clean(self, value):
if value in EMPTY_VALUES:
return None
try:
key = self.to_field_name or 'pk'
value = self.queryset.model.objects.get(**{key: value})
except self.queryset.model.DoesNotExist:
raise forms.ValidationError(self.error_messages['invalid_choice'])
return value
def make_search_form(*args, **kwargs):
"""Factory that instantiates one of the search forms below."""
request = kwargs.pop('request', None)
if request is not None:
sparams_cookie = request.COOKIES.get('pootle-search')
if sparams_cookie:
import json
import urllib
try:
initial_sparams = json.loads(urllib.unquote(sparams_cookie))
except ValueError:
pass
else:
if (isinstance(initial_sparams, dict) and
'sfields' in initial_sparams):
kwargs.update({
'initial': initial_sparams,
})
return SearchForm(*args, **kwargs)
class SearchForm(forms.Form):
"""Normal search form for translation projects."""
search = forms.CharField(
widget=forms.TextInput(attrs={
'autocomplete': 'off',
'size': '15',
'placeholder': _('Search'),
'title': _("Search (Ctrl+Shift+S)<br/>Type and press Enter to "
"search"),
}),
)
soptions = forms.MultipleChoiceField(
required=False,
widget=forms.CheckboxSelectMultiple,
choices=(
('exact', _('Phrase match')),
('case', _('Case-sensitive match'))))
sfields = forms.MultipleChoiceField(
required=False,
widget=forms.CheckboxSelectMultiple,
choices=(
('source', _('Source Text')),
('target', _('Target Text')),
('notes', _('Comments')),
('locations', _('Locations'))
),
initial=['source', 'target'],
)
| gpl-3.0 |
gumstix/linux | tools/perf/scripts/python/futex-contention.py | 1997 | 1508 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
xebialabs-community/xlr-xldeploy-plugin | src/main/resources/xlr_xldeploy/SetCITags.py | 1 | 1446 | #
# Copyright 2019 XEBIALABS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import json
from xlr_xldeploy.XLDeployClientUtil import XLDeployClientUtil
xld_client = XLDeployClientUtil.create_xldeploy_client(xldeployServer, username, password)
response = xld_client.check_ci_exist(xldCI, True)
my_ci = json.loads( xld_client.get_ci(xldCI, 'json') )
print xldNewTags
xld_client.update_ci_property(xldCI, "tags", json.dumps(xldNewTags))
xldTags = xldNewTags
| mit |
kapy2010/treeherder | tests/seta/test_update_job_priority.py | 4 | 4220 | import datetime
import pytest
from mock import patch
from treeherder.seta.models import JobPriority
from treeherder.seta.runnable_jobs import RunnableJobsClient
from treeherder.seta.update_job_priority import (_initialize_values,
_sanitize_data,
_unique_key,
_update_table,
query_sanitized_data)
def test_unique_key():
new_job = {
'build_system_type': 'buildbot',
'platform': 'windows8-64',
'platform_option': 'opt',
'testtype': 'web-platform-tests-1'
}
assert _unique_key(new_job), ('web-platform-tests-1', 'opt', 'windows8-64')
def test_sanitize_data(runnable_jobs_data):
assert len(runnable_jobs_data['results']) == runnable_jobs_data['meta']['count']
data = _sanitize_data(runnable_jobs_data)
bb_jobs = 0
tc_jobs = 0
for datum in data:
if datum['build_system_type'] in ('taskcluster', '*'):
tc_jobs += 1
if datum['build_system_type'] in ('buildbot', '*'):
bb_jobs += 1
assert bb_jobs == 2
assert tc_jobs == 2
@patch.object(RunnableJobsClient, 'query_runnable_jobs')
def test_query_sanitized_data(query_runnable_jobs, runnable_jobs_data, sanitized_data):
query_runnable_jobs.return_value = runnable_jobs_data
data = query_sanitized_data()
assert data == sanitized_data
@pytest.mark.django_db()
def test_initialize_values_no_data():
results = _initialize_values()
assert results == ({}, 5, None)
@pytest.mark.django_db()
@patch.object(JobPriority.objects, 'all')
@patch('treeherder.seta.update_job_priority._two_weeks_from_now')
def test_initialize_values(two_weeks, jp_all,
job_priority_list, jp_index_fixture):
fourteen_days = datetime.datetime.now() + datetime.timedelta(days=14)
two_weeks.return_value = fourteen_days
jp_all.return_value = job_priority_list
assert _initialize_values() == (jp_index_fixture, 1, fourteen_days)
@patch('treeherder.seta.update_job_priority._two_weeks_from_now')
@patch('treeherder.seta.update_job_priority._initialize_values')
def test_update_table_no_new_jobs(initial_values, two_weeks,
job_priority_list, jp_index_fixture, sanitized_data):
'''
We test that once a table has information about job priorities future calls with the same data will not change the table
'''
# By doing this we won't need DB access
initial_values.return_value = jp_index_fixture, 1, two_weeks
assert _update_table(sanitized_data) == (0, 0, 0)
@patch.object(JobPriority, 'save')
@patch('treeherder.seta.update_job_priority._initialize_values')
def test_update_table_empty_table(initial_values, jp_save,
sanitized_data):
'''
We test that starting from an empty table
'''
# This set of values is when we're bootstrapping the service (aka empty table)
initial_values.return_value = {}, 5, None
jp_save.return_value = None # Since we don't want to write to the DB
assert _update_table(sanitized_data) == (3, 0, 0)
@pytest.mark.django_db()
def test_update_table_job_from_other_buildsysten(all_job_priorities_stored):
# We already have a TaskCluster job like this in the DB
# The DB entry should be changed to '*'
data = {
'build_system_type': 'buildbot',
'platform': 'linux64',
'platform_option': 'opt',
'testtype': 'reftest-e10s-2'
}
# Before calling update_table the priority is only for TaskCluster
assert len(JobPriority.objects.filter(
buildsystem='taskcluster',
buildtype=data['platform_option'],
platform=data['platform'],
testtype=data['testtype'],
)) == 1
# We are checking that only 1 job was updated
ret_val = _update_table([data])
assert ret_val == (0, 0, 1)
assert len(JobPriority.objects.filter(
buildsystem='*',
buildtype=data['platform_option'],
platform=data['platform'],
testtype=data['testtype'],
)) == 1
| mpl-2.0 |
jeking3/thrift | lib/py/src/transport/THttpClient.py | 10 | 7114 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from io import BytesIO
import os
import ssl
import sys
import warnings
import base64
from six.moves import urllib
from six.moves import http_client
from .TTransport import TTransportBase
import six
class THttpClient(TTransportBase):
"""Http implementation of TTransport base."""
def __init__(self, uri_or_host, port=None, path=None, cafile=None, cert_file=None, key_file=None, ssl_context=None):
"""THttpClient supports two different types of construction:
THttpClient(host, port, path) - deprecated
THttpClient(uri, [port=<n>, path=<s>, cafile=<filename>, cert_file=<filename>, key_file=<filename>, ssl_context=<context>])
Only the second supports https. To properly authenticate against the server,
provide the client's identity by specifying cert_file and key_file. To properly
authenticate the server, specify either cafile or ssl_context with a CA defined.
NOTE: if both cafile and ssl_context are defined, ssl_context will override cafile.
"""
if port is not None:
warnings.warn(
"Please use the THttpClient('http{s}://host:port/path') constructor",
DeprecationWarning,
stacklevel=2)
self.host = uri_or_host
self.port = port
assert path
self.path = path
self.scheme = 'http'
else:
parsed = urllib.parse.urlparse(uri_or_host)
self.scheme = parsed.scheme
assert self.scheme in ('http', 'https')
if self.scheme == 'http':
self.port = parsed.port or http_client.HTTP_PORT
elif self.scheme == 'https':
self.port = parsed.port or http_client.HTTPS_PORT
self.certfile = cert_file
self.keyfile = key_file
self.context = ssl.create_default_context(cafile=cafile) if (cafile and not ssl_context) else ssl_context
self.host = parsed.hostname
self.path = parsed.path
if parsed.query:
self.path += '?%s' % parsed.query
try:
proxy = urllib.request.getproxies()[self.scheme]
except KeyError:
proxy = None
else:
if urllib.request.proxy_bypass(self.host):
proxy = None
if proxy:
parsed = urllib.parse.urlparse(proxy)
self.realhost = self.host
self.realport = self.port
self.host = parsed.hostname
self.port = parsed.port
self.proxy_auth = self.basic_proxy_auth_header(parsed)
else:
self.realhost = self.realport = self.proxy_auth = None
self.__wbuf = BytesIO()
self.__http = None
self.__http_response = None
self.__timeout = None
self.__custom_headers = None
@staticmethod
def basic_proxy_auth_header(proxy):
if proxy is None or not proxy.username:
return None
ap = "%s:%s" % (urllib.parse.unquote(proxy.username),
urllib.parse.unquote(proxy.password))
cr = base64.b64encode(ap).strip()
return "Basic " + cr
def using_proxy(self):
return self.realhost is not None
def open(self):
if self.scheme == 'http':
self.__http = http_client.HTTPConnection(self.host, self.port,
timeout=self.__timeout)
elif self.scheme == 'https':
self.__http = http_client.HTTPSConnection(self.host, self.port,
key_file=self.keyfile,
cert_file=self.certfile,
timeout=self.__timeout,
context=self.context)
if self.using_proxy():
self.__http.set_tunnel(self.realhost, self.realport,
{"Proxy-Authorization": self.proxy_auth})
def close(self):
self.__http.close()
self.__http = None
self.__http_response = None
def isOpen(self):
return self.__http is not None
def setTimeout(self, ms):
if ms is None:
self.__timeout = None
else:
self.__timeout = ms / 1000.0
def setCustomHeaders(self, headers):
self.__custom_headers = headers
def read(self, sz):
return self.__http_response.read(sz)
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
if self.isOpen():
self.close()
self.open()
# Pull data out of buffer
data = self.__wbuf.getvalue()
self.__wbuf = BytesIO()
# HTTP request
if self.using_proxy() and self.scheme == "http":
# need full URL of real host for HTTP proxy here (HTTPS uses CONNECT tunnel)
self.__http.putrequest('POST', "http://%s:%s%s" %
(self.realhost, self.realport, self.path))
else:
self.__http.putrequest('POST', self.path)
# Write headers
self.__http.putheader('Content-Type', 'application/x-thrift')
self.__http.putheader('Content-Length', str(len(data)))
if self.using_proxy() and self.scheme == "http" and self.proxy_auth is not None:
self.__http.putheader("Proxy-Authorization", self.proxy_auth)
if not self.__custom_headers or 'User-Agent' not in self.__custom_headers:
user_agent = 'Python/THttpClient'
script = os.path.basename(sys.argv[0])
if script:
user_agent = '%s (%s)' % (user_agent, urllib.parse.quote(script))
self.__http.putheader('User-Agent', user_agent)
if self.__custom_headers:
for key, val in six.iteritems(self.__custom_headers):
self.__http.putheader(key, val)
self.__http.endheaders()
# Write payload
self.__http.send(data)
# Get reply to flush the request
self.__http_response = self.__http.getresponse()
self.code = self.__http_response.status
self.message = self.__http_response.reason
self.headers = self.__http_response.msg
| apache-2.0 |
ehirt/odoo | addons/l10n_ro/__openerp__.py | 186 | 2241 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Author: Fekete Mihai <feketemihai@gmail.com>, Tatár Attila <atta@nvm.ro>
# Copyright (C) 2011-2014 TOTAL PC SYSTEMS (http://www.erpsystems.ro).
# Copyright (C) 2014 Fekete Mihai
# Copyright (C) 2014 Tatár Attila
# Based on precedent versions developed by Fil System, Fekete Mihai
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Romania - Accounting",
"version" : "1.0",
"author" : "ERPsystems Solutions",
"website": "http://www.erpsystems.ro",
"category" : "Localization/Account Charts",
"depends" : ['account','account_chart','base_vat'],
"description": """
This is the module to manage the Accounting Chart, VAT structure, Fiscal Position and Tax Mapping.
It also adds the Registration Number for Romania in OpenERP.
================================================================================================================
Romanian accounting chart and localization.
""",
"demo" : [],
"data" : ['partner_view.xml',
'account_chart.xml',
'account_tax_code_template.xml',
'account_chart_template.xml',
'account_tax_template.xml',
'fiscal_position_template.xml',
'l10n_chart_ro_wizard.xml',
'res.country.state.csv',
'res.bank.csv',
],
"installable": True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
eadgarchen/tensorflow | tensorflow/python/training/slot_creator_test.py | 45 | 5256 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for slot_creator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import slot_creator
class SlotCreatorTest(test.TestCase):
def testCreateSlotFromVariable(self):
with self.test_session():
v = variables.Variable([1.0, 2.5], name="var")
slot = slot_creator.create_slot(v, v.initialized_value(), name="slot")
variables.global_variables_initializer().run()
self.assertEqual("var/slot", slot.op.name)
self.assertEqual([2], slot.get_shape().as_list())
self.assertEqual(dtypes.float32, slot.dtype.base_dtype)
self.assertAllEqual([1.0, 2.5], slot.eval())
def testCreateSlotFromTensor(self):
with self.test_session():
v = constant_op.constant([1.0, 2.5], name="const")
slot = slot_creator.create_slot(v, v * 2, name="slot")
variables.global_variables_initializer().run()
self.assertEqual("const/slot", slot.op.name)
self.assertEqual([2], slot.get_shape().as_list())
self.assertEqual(dtypes.float32, slot.dtype.base_dtype)
self.assertAllEqual([2.0, 5.0], slot.eval())
def testCreateZerosSlotFromVariable(self):
with self.test_session():
v = variables.Variable([1.0, 2.5], name="var")
with ops.control_dependencies(None):
slot = slot_creator.create_zeros_slot(
v, name="slot", dtype=dtypes.float64)
variables.global_variables_initializer().run()
self.assertEqual("var/slot", slot.op.name)
self.assertEqual([2], slot.get_shape().as_list())
self.assertEqual(dtypes.float64, slot.dtype.base_dtype)
self.assertAllEqual([0.0, 0.0], slot.eval())
def testCreateZerosSlotFromDynamicShapedVariable(self):
with self.test_session():
dyn_shape = constant_op.constant([2], dtype=dtypes.int32)
dyn_shape = array_ops.placeholder_with_default(dyn_shape,
shape=[None])
v = variable_scope.get_variable(
"var",
initializer=random_ops.random_uniform(dyn_shape,
dtype=dtypes.float64),
validate_shape=False)
with ops.control_dependencies(None):
slot = slot_creator.create_zeros_slot(
v, name="slot", dtype=dtypes.float64)
variables.global_variables_initializer().run()
self.assertEqual("var/slot", slot.op.name)
self.assertEqual([2], array_ops.shape(slot).eval())
self.assertEqual(dtypes.float64, slot.dtype.base_dtype)
self.assertAllEqual([0.0, 0.0], slot.eval())
def testCreateZerosSlotFromTensor(self):
with self.test_session():
v = constant_op.constant([1.0, 2.5], name="const")
with ops.control_dependencies(None):
slot = slot_creator.create_zeros_slot(v, name="slot")
variables.global_variables_initializer().run()
self.assertEqual("const/slot", slot.op.name)
self.assertEqual([2], slot.get_shape().as_list())
self.assertEqual(dtypes.float32, slot.dtype.base_dtype)
self.assertAllEqual([0.0, 0.0], slot.eval())
def testCreateZerosSlotFromDynamicShapedTensor(self):
with self.test_session():
v = random_ops.random_uniform([2], dtype=dtypes.float64)
v = array_ops.placeholder_with_default(v, shape=[None], name="const")
with ops.control_dependencies(None):
slot = slot_creator.create_zeros_slot(
v, name="slot", dtype=dtypes.float64)
variables.global_variables_initializer().run()
self.assertEqual("const/slot", slot.op.name)
self.assertEqual([2], array_ops.shape(slot).eval())
self.assertEqual(dtypes.float64, slot.dtype.base_dtype)
self.assertAllEqual([0.0, 0.0], slot.eval())
def testCreateSlotFromVariableRespectsScope(self):
# See discussion on #2740.
with self.test_session():
with variable_scope.variable_scope("scope"):
v = variables.Variable([1.0, 2.5], name="var")
slot = slot_creator.create_slot(v, v.initialized_value(), name="slot")
self.assertEqual("scope/scope/var/slot", slot.op.name)
if __name__ == "__main__":
test.main()
| apache-2.0 |
lupyuen/RaspberryPiImage | home/pi/GrovePi/Software/Python/others/temboo/Library/GitHub/OAuth/FinalizeOAuth.py | 4 | 5674 | # -*- coding: utf-8 -*-
###############################################################################
#
# FinalizeOAuth
# Completes the OAuth process by retrieving a GitHub access token for a user, after they have visited the authorization URL returned by the InitializeOAuth Choreo and clicked "allow."
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class FinalizeOAuth(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the FinalizeOAuth Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(FinalizeOAuth, self).__init__(temboo_session, '/Library/GitHub/OAuth/FinalizeOAuth')
def new_input_set(self):
return FinalizeOAuthInputSet()
def _make_result_set(self, result, path):
return FinalizeOAuthResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return FinalizeOAuthChoreographyExecution(session, exec_id, path)
class FinalizeOAuthInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the FinalizeOAuth
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccountName(self, value):
"""
Set the value of the AccountName input for this Choreo. ((optional, string) Deprecated (retained for backward compatibility only).)
"""
super(FinalizeOAuthInputSet, self)._set_input('AccountName', value)
def set_AppKeyName(self, value):
"""
Set the value of the AppKeyName input for this Choreo. ((optional, string) Deprecated (retained for backward compatibility only).)
"""
super(FinalizeOAuthInputSet, self)._set_input('AppKeyName', value)
def set_AppKeyValue(self, value):
"""
Set the value of the AppKeyValue input for this Choreo. ((optional, string) Deprecated (retained for backward compatibility only).)
"""
super(FinalizeOAuthInputSet, self)._set_input('AppKeyValue', value)
def set_CallbackID(self, value):
"""
Set the value of the CallbackID input for this Choreo. ((required, string) The callback token returned by the InitializeOAuth Choreo. Used to retrieve the authorization code after the user authorizes.)
"""
super(FinalizeOAuthInputSet, self)._set_input('CallbackID', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((required, string) The Client ID provided by GitHub after registering your application.)
"""
super(FinalizeOAuthInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((required, string) The Client Secret provided by GitHub after registering your application.)
"""
super(FinalizeOAuthInputSet, self)._set_input('ClientSecret', value)
def set_SuppressErrors(self, value):
"""
Set the value of the SuppressErrors input for this Choreo. ((optional, boolean) When set to true, errors received during the OAuth redirect process will be suppressed and returned in the ErrorMessage output.)
"""
super(FinalizeOAuthInputSet, self)._set_input('SuppressErrors', value)
def set_Timeout(self, value):
"""
Set the value of the Timeout input for this Choreo. ((optional, integer) The amount of time (in seconds) to poll your Temboo callback URL to see if your app's user has allowed or denied the request for access. Defaults to 20. Max is 60.)
"""
super(FinalizeOAuthInputSet, self)._set_input('Timeout', value)
class FinalizeOAuthResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the FinalizeOAuth Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_AccessToken(self):
"""
Retrieve the value for the "AccessToken" output from this Choreo execution. ((string) The access token for the user that has granted access to your application.)
"""
return self._output.get('AccessToken', None)
def get_ErrorMessage(self):
"""
Retrieve the value for the "ErrorMessage" output from this Choreo execution. ((string) Contains an error message if an error occurs during the OAuth redirect process and if SuppressErrors is set to true.)
"""
return self._output.get('ErrorMessage', None)
class FinalizeOAuthChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return FinalizeOAuthResultSet(response, path)
| apache-2.0 |
dmoon4117/mutagen | tests/test_oggspeex.py | 4 | 2140 | import os
import shutil
from mutagen._compat import cBytesIO
from mutagen.ogg import OggPage
from mutagen.oggspeex import OggSpeex, OggSpeexInfo, delete
from tests import TestCase
from tests.test_ogg import TOggFileTypeMixin
from tempfile import mkstemp
class TOggSpeex(TestCase, TOggFileTypeMixin):
Kind = OggSpeex
def setUp(self):
original = os.path.join("tests", "data", "empty.spx")
fd, self.filename = mkstemp(suffix='.ogg')
os.close(fd)
shutil.copy(original, self.filename)
self.audio = self.Kind(self.filename)
def tearDown(self):
os.unlink(self.filename)
def test_module_delete(self):
delete(self.filename)
self.scan_file()
self.failIf(OggSpeex(self.filename).tags)
def test_channels(self):
self.failUnlessEqual(2, self.audio.info.channels)
def test_sample_rate(self):
self.failUnlessEqual(44100, self.audio.info.sample_rate)
def test_bitrate(self):
self.failUnlessEqual(0, self.audio.info.bitrate)
def test_invalid_not_first(self):
page = OggPage(open(self.filename, "rb"))
page.first = False
self.failUnlessRaises(IOError, OggSpeexInfo, cBytesIO(page.write()))
def test_vendor(self):
self.failUnless(
self.audio.tags.vendor.startswith("Encoded with Speex 1.1.12"))
self.failUnlessRaises(KeyError, self.audio.tags.__getitem__, "vendor")
def test_not_my_ogg(self):
fn = os.path.join('tests', 'data', 'empty.oggflac')
self.failUnlessRaises(IOError, type(self.audio), fn)
self.failUnlessRaises(IOError, self.audio.save, fn)
self.failUnlessRaises(IOError, self.audio.delete, fn)
def test_multiplexed_in_headers(self):
shutil.copy(
os.path.join("tests", "data", "multiplexed.spx"), self.filename)
audio = self.Kind(self.filename)
audio.tags["foo"] = ["bar"]
audio.save()
audio = self.Kind(self.filename)
self.failUnlessEqual(audio.tags["foo"], ["bar"])
def test_mime(self):
self.failUnless("audio/x-speex" in self.audio.mime)
| gpl-2.0 |
dardevelin/rhythmbox-shuffle | plugins/rb/URLCache.py | 4 | 8135 | # -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*-
#
# Copyright (C) 2009 Jonathan Matthew
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# The Rhythmbox authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and Rhythmbox. This permission is above and beyond the permissions granted
# by the GPL license by which Rhythmbox is covered. If you modify this code
# you may extend this exception to your version of the code, but you are not
# obligated to do so. If you do not wish to do so, delete this exception
# statement from your version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import os.path
import time
import errno
import rb
from gi.repository import RB
SECS_PER_DAY = 86400
class URLCache(object):
def __init__(self, name, path, refresh=-1, discard=-1, lifetime=-1):
"""
Creates a new cache. 'name' is a symbolic name for the cache.
'path' is either an absolute path to the cache directory, or a
path relative to the user cache directory.
'refresh' is the length of time for which cache entries are always
considered valid. 'lifetime' is the maximum time an entry can live
in the cache. 'discard' is the length of time for which a cache entry
can go unused before being discarded. These are all specified in days,
with -1 meaning unlimited.
"""
self.name = name
if path.startswith("/"):
self.path = path
else:
self.path = os.path.join(RB.user_cache_dir(), path)
self.refresh = refresh
self.discard = discard
self.lifetime = lifetime
def clean(self):
"""
This sweeps all entries stored in the cache, removing entries that
are past the cache lifetime limit, or have not been used for longer
than the cache discard time. This should be called on plugin activation,
and perhaps periodically (infrequently) after that.
"""
now = time.time()
if os.path.exists(self.path) == False:
print "cache directory %s does not exist" % self.path
return
print "cleaning cache directory %s" % self.path
for f in os.listdir(self.path):
try:
path = os.path.join(self.path, f)
stat = os.stat(path)
if self.lifetime != -1:
if stat.st_ctime + (self.lifetime * SECS_PER_DAY) < now:
print "removing stale cache file %s:%s: age %s (past lifetime limit)" % (self.name, f, int(now - stat.st_ctime))
os.unlink(path)
continue
if self.discard != -1:
# hmm, noatime mounts will break this, probably
if stat.st_atime + (self.discard * SECS_PER_DAY) < now:
print "removing stale cache file %s:%s: age %s (past discard limit)" % (self.name, f, int(now - stat.st_atime))
os.unlink(path)
continue
except Exception, e:
print "error while checking cache entry %s:%s: %s" % (self.name, f, str(e))
print "finished cleaning cache directory %s" % self.path
def cachefile(self, key):
"""
Constructs the full path of the file used to store a given cache key.
"""
fkey = key.replace('/', '_')
return os.path.join(self.path, fkey)
def check(self, key, can_refresh=True):
"""
Checks for a fresh cache entry with a given key.
If can_refresh is True, only cache entries that are within the
refresh time will be considered.
If can_refresh is False, cache entries that are older than the
refresh time, but not past the lifetime limit or discard period,
will also be considered.
The intent is to allow older cache entries to be used if a network
connection is not available or if the origin site is down.
If successful, this returns the name of the file storing the cached data.
Otherwise, it returns None.
"""
now = time.time()
try:
path = self.cachefile(key)
stat = os.stat(path)
# check freshness
stale = False
if can_refresh and self.refresh != -1:
if stat.st_ctime + (self.refresh * SECS_PER_DAY) < now:
stale = True
if self.lifetime != -1:
if stat.st_ctime + (self.lifetime * SECS_PER_DAY) < now:
stale = True
if stale:
print "removing stale cache entry %s:%s" % (self.name, key)
os.unlink(path)
return None
return path
except Exception, e:
if hasattr(e, 'errno') is False or (e.errno != errno.ENOENT):
print "error checking cache for %s:%s: %s" % (self.name, key, e)
return None
def store(self, key, data):
"""
Stores an entry in the cache.
"""
try:
# construct cache filename
if not os.path.exists(self.path):
os.makedirs(self.path, mode=0700)
path = self.cachefile(key)
# consider using gio set contents async?
f = open(path, 'w')
f.write(data)
f.close()
print "stored cache data %s:%s" % (self.name, key)
except Exception, e:
print "exception storing cache data %s:%s: %s" % (self.name, key, e)
def __fetch_cb(self, data, url, key, callback, args):
if data is None:
cachefile = self.check(key, False)
if cachefile is not None:
f = open(cachefile)
data = f.read()
f.close()
if callback(data, *args) is False:
print "cache entry %s:%s invalidated by callback" % (self.name, key)
os.unlink(cachefile)
else:
callback(None, *args)
else:
if callback(data, *args) is False:
print "cache entry %s:%s invalidated by callback" % (self.name, key)
else:
self.store(key, data)
def fetch(self, key, url, callback, *args):
"""
Retrieve the specified URL, satisfying the request from the cache
if possible, and refreshing the cache if necessary.
The callback function may return False to indicate that the data
passed to it is invalid. Generally this should only happen if the
data cannot be parsed and it is likely that a later attempt to fetch
from the origin site will result in valid data.
"""
# check if we've got a fresh entry in the cache
print "fetching cache entry %s:%s [%s]" % (self.name, key, url)
cachefile = self.check(key, True)
if cachefile is not None:
# could use a loader here, maybe
f = open(cachefile)
data = f.read()
f.close()
if callback(data, *args) is not False:
return
print "cache entry %s:%s invalidated by callback" % (self.name, key)
os.unlink(cachefile)
ld = rb.Loader()
ld.get_url(url, self.__fetch_cb, url, key, callback, args)
# vim: set ts=4 sw=4 expandtab :
| gpl-2.0 |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/scipy/linalg/tests/test_decomp_polar.py | 126 | 2797 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.linalg import norm
from numpy.testing import (assert_, assert_allclose, assert_equal,
run_module_suite)
from scipy.linalg import polar, eigh
diag2 = np.array([[2, 0], [0, 3]])
a13 = np.array([[1, 2, 2]])
precomputed_cases = [
[[[0]], 'right', [[1]], [[0]]],
[[[0]], 'left', [[1]], [[0]]],
[[[9]], 'right', [[1]], [[9]]],
[[[9]], 'left', [[1]], [[9]]],
[diag2, 'right', np.eye(2), diag2],
[diag2, 'left', np.eye(2), diag2],
[a13, 'right', a13/norm(a13[0]), a13.T.dot(a13)/norm(a13[0])],
]
verify_cases = [
[[1, 2], [3, 4]],
[[1, 2, 3]],
[[1], [2], [3]],
[[1, 2, 3], [3, 4, 0]],
[[1, 2], [3, 4], [5, 5]],
[[1, 2], [3, 4+5j]],
[[1, 2, 3j]],
[[1], [2], [3j]],
[[1, 2, 3+2j], [3, 4-1j, -4j]],
[[1, 2], [3-2j, 4+0.5j], [5, 5]],
[[10000, 10, 1], [-1, 2, 3j], [0, 1, 2]],
]
def check_precomputed_polar(a, side, expected_u, expected_p):
# Compare the result of the polar decomposition to a
# precomputed result.
u, p = polar(a, side=side)
assert_allclose(u, expected_u, atol=1e-15)
assert_allclose(p, expected_p, atol=1e-15)
def verify_polar(a):
# Compute the polar decomposition, and then verify that
# the result has all the expected properties.
product_atol = np.sqrt(np.finfo(float).eps)
aa = np.asarray(a)
m, n = aa.shape
u, p = polar(a, side='right')
assert_equal(u.shape, (m, n))
assert_equal(p.shape, (n, n))
# a = up
assert_allclose(u.dot(p), a, atol=product_atol)
if m >= n:
assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15)
else:
assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15)
# p is Hermitian positive semidefinite.
assert_allclose(p.conj().T, p)
evals = eigh(p, eigvals_only=True)
nonzero_evals = evals[abs(evals) > 1e-14]
assert_((nonzero_evals >= 0).all())
u, p = polar(a, side='left')
assert_equal(u.shape, (m, n))
assert_equal(p.shape, (m, m))
# a = pu
assert_allclose(p.dot(u), a, atol=product_atol)
if m >= n:
assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15)
else:
assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15)
# p is Hermitian positive semidefinite.
assert_allclose(p.conj().T, p)
evals = eigh(p, eigvals_only=True)
nonzero_evals = evals[abs(evals) > 1e-14]
assert_((nonzero_evals >= 0).all())
def test_precomputed_cases():
for a, side, expected_u, expected_p in precomputed_cases:
yield check_precomputed_polar, a, side, expected_u, expected_p
def test_verify_cases():
for a in verify_cases:
yield verify_polar, a
if __name__ == "__main__":
run_module_suite()
| apache-2.0 |
sgzsh269/django | tests/gis_tests/utils.py | 124 | 1965 | import unittest
from functools import wraps
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS, connection
def skipUnlessGISLookup(*gis_lookups):
"""
Skip a test unless a database supports all of gis_lookups.
"""
def decorator(test_func):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if any(key not in connection.ops.gis_operators for key in gis_lookups):
raise unittest.SkipTest(
"Database doesn't support all the lookups: %s" % ", ".join(gis_lookups)
)
return test_func(*args, **kwargs)
return skip_wrapper
return decorator
def no_backend(test_func, backend):
"Use this decorator to disable test on specified backend."
if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].rsplit('.')[-1] == backend:
@unittest.skip("This test is skipped on '%s' backend" % backend)
def inner():
pass
return inner
else:
return test_func
# Decorators to disable entire test functions for specific
# spatial backends.
def no_oracle(func):
return no_backend(func, 'oracle')
# Shortcut booleans to omit only portions of tests.
_default_db = settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].rsplit('.')[-1]
oracle = _default_db == 'oracle'
postgis = _default_db == 'postgis'
mysql = _default_db == 'mysql'
spatialite = _default_db == 'spatialite'
# MySQL spatial indices can't handle NULL geometries.
gisfield_may_be_null = not mysql
if oracle and 'gis' in settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE']:
from django.contrib.gis.db.backends.oracle.models import OracleSpatialRefSys as SpatialRefSys
elif postgis:
from django.contrib.gis.db.backends.postgis.models import PostGISSpatialRefSys as SpatialRefSys
elif spatialite:
from django.contrib.gis.db.backends.spatialite.models import SpatialiteSpatialRefSys as SpatialRefSys
else:
SpatialRefSys = None
| bsd-3-clause |
CG-F16-4-Rutgers/steersuite-rutgers | steerstats/tools/deap/base.py | 10 | 11067 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
"""The :mod:`~deap.base` module provides basic structures to build
evolutionary algorithms. It contains the :class:`~deap.base.Toolbox`, useful
to store evolutionary operators, and a virtual :class:`~deap.base.Fitness`
class used as base class, for the fitness member of any individual. """
import sys
from collections import Sequence
from copy import deepcopy
from functools import partial
from operator import mul, truediv
class Toolbox(object):
"""A toolbox for evolution that contains the evolutionary operators. At
first the toolbox contains a :meth:`~deap.toolbox.clone` method that
duplicates any element it is passed as argument, this method defaults to
the :func:`copy.deepcopy` function. and a :meth:`~deap.toolbox.map`
method that applies the function given as first argument to every items
of the iterables given as next arguments, this method defaults to the
:func:`map` function. You may populate the toolbox with any other
function by using the :meth:`~deap.base.Toolbox.register` method.
Concrete usages of the toolbox are shown for initialization in the
:ref:`creating-types` tutorial and for tools container in the
:ref:`next-step` tutorial.
"""
def __init__(self):
self.register("clone", deepcopy)
self.register("map", map)
def register(self, alias, function, *args, **kargs):
"""Register a *function* in the toolbox under the name *alias*. You
may provide default arguments that will be passed automatically when
calling the registered function. Fixed arguments can then be overriden
at function call time.
:param alias: The name the operator will take in the toolbox. If the
alias already exist it will overwrite the the operator
already present.
:param function: The function to which refer the alias.
:param argument: One or more argument (and keyword argument) to pass
automatically to the registered function when called,
optional.
The following code block is an example of how the toolbox is used. ::
>>> def func(a, b, c=3):
... print a, b, c
...
>>> tools = Toolbox()
>>> tools.register("myFunc", func, 2, c=4)
>>> tools.myFunc(3)
2 3 4
The registered function will be given the attributes :attr:`__name__`
set to the alias and :attr:`__doc__` set to the original function's
documentation. The :attr:`__dict__` attribute will also be updated
with the original function's instance dictionnary, if any.
"""
pfunc = partial(function, *args, **kargs)
pfunc.__name__ = alias
pfunc.__doc__ = function.__doc__
if hasattr(function, "__dict__") and not isinstance(function, type):
# Some functions don't have a dictionary, in these cases
# simply don't copy it. Moreover, if the function is actually
# a class, we do not want to copy the dictionary.
pfunc.__dict__.update(function.__dict__.copy())
setattr(self, alias, pfunc)
def unregister(self, alias):
"""Unregister *alias* from the toolbox.
:param alias: The name of the operator to remove from the toolbox.
"""
delattr(self, alias)
def decorate(self, alias, *decorators):
"""Decorate *alias* with the specified *decorators*, *alias*
has to be a registered function in the current toolbox.
:param alias: The name of the operator to decorate.
:param decorator: One or more function decorator. If multiple
decorators are provided they will be applied in
order, with the last decorator decorating all the
others.
.. note::
Decorate a function using the toolbox makes it unpicklable, and
will produce an error on pickling. Although this limitation is not
relevant in most cases, it may have an impact on distributed
environments like multiprocessing.
A function can still be decorated manually before it is added to
the toolbox (using the @ notation) in order to be picklable.
"""
pfunc = getattr(self, alias)
function, args, kargs = pfunc.func, pfunc.args, pfunc.keywords
for decorator in decorators:
function = decorator(function)
self.register(alias, function, *args, **kargs)
class Fitness(object):
"""The fitness is a measure of quality of a solution. If *values* are
provided as a tuple, the fitness is initalized using those values,
otherwise it is empty (or invalid).
:param values: The initial values of the fitness as a tuple, optional.
Fitnesses may be compared using the ``>``, ``<``, ``>=``, ``<=``, ``==``,
``!=``. The comparison of those operators is made lexicographically.
Maximization and minimization are taken care off by a multiplication
between the :attr:`weights` and the fitness :attr:`values`. The comparison
can be made between fitnesses of different size, if the fitnesses are
equal until the extra elements, the longer fitness will be superior to the
shorter.
Different types of fitnesses are created in the :ref:`creating-types`
tutorial.
.. note::
When comparing fitness values that are **minimized**, ``a > b`` will
return :data:`True` if *a* is **smaller** than *b*.
"""
weights = None
"""The weights are used in the fitness comparison. They are shared among
all fitnesses of the same type. When subclassing :class:`Fitness`, the
weights must be defined as a tuple where each element is associated to an
objective. A negative weight element corresponds to the minimization of
the associated objective and positive weight to the maximization.
.. note::
If weights is not defined during subclassing, the following error will
occur at instantiation of a subclass fitness object:
``TypeError: Can't instantiate abstract <class Fitness[...]> with
abstract attribute weights.``
"""
wvalues = ()
"""Contains the weighted values of the fitness, the multiplication with the
weights is made when the values are set via the property :attr:`values`.
Multiplication is made on setting of the values for efficiency.
Generally it is unnecessary to manipulate wvalues as it is an internal
attribute of the fitness used in the comparison operators.
"""
def __init__(self, values=()):
if self.weights is None:
raise TypeError("Can't instantiate abstract %r with abstract "
"attribute weights." % (self.__class__))
if not isinstance(self.weights, Sequence):
raise TypeError("Attribute weights of %r must be a sequence."
% self.__class__)
if len(values) > 0:
self.values = values
def getValues(self):
return tuple(map(truediv, self.wvalues, self.weights))
def setValues(self, values):
try:
self.wvalues = tuple(map(mul, values, self.weights))
except TypeError:
_, _, traceback = sys.exc_info()
raise TypeError, ("Both weights and assigned values must be a "
"sequence of numbers when assigning to values of "
"%r. Currently assigning value(s) %r of %r to a fitness with "
"weights %s."
% (self.__class__, values, type(values), self.weights)), traceback
def delValues(self):
self.wvalues = ()
values = property(getValues, setValues, delValues,
("Fitness values. Use directly ``individual.fitness.values = values`` "
"in order to set the fitness and ``del individual.fitness.values`` "
"in order to clear (invalidate) the fitness. The (unweighted) fitness "
"can be directly accessed via ``individual.fitness.values``."))
def dominates(self, other, obj=slice(None)):
"""Return true if each objective of *self* is not strictly worse than
the corresponding objective of *other* and at least one objective is
strictly better.
:param obj: Slice indicating on which objectives the domination is
tested. The default value is `slice(None)`, representing
every objectives.
"""
not_equal = False
for self_wvalue, other_wvalue in zip(self.wvalues[obj], other.wvalues[obj]):
if self_wvalue > other_wvalue:
not_equal = True
elif self_wvalue < other_wvalue:
return False
return not_equal
@property
def valid(self):
"""Assess if a fitness is valid or not."""
return len(self.wvalues) != 0
def __hash__(self):
return hash(self.wvalues)
def __gt__(self, other):
return not self.__le__(other)
def __ge__(self, other):
return not self.__lt__(other)
def __le__(self, other):
return self.wvalues <= other.wvalues
def __lt__(self, other):
return self.wvalues < other.wvalues
def __eq__(self, other):
return self.wvalues == other.wvalues
def __ne__(self, other):
return not self.__eq__(other)
def __deepcopy__(self, memo):
"""Replace the basic deepcopy function with a faster one.
It assumes that the elements in the :attr:`values` tuple are
immutable and the fitness does not contain any other object
than :attr:`values` and :attr:`weights`.
"""
copy_ = self.__class__()
copy_.wvalues = self.wvalues
return copy_
def __str__(self):
"""Return the values of the Fitness object."""
return str(self.values if self.valid else tuple())
def __repr__(self):
"""Return the Python code to build a copy of the object."""
return "%s.%s(%r)" % (self.__module__, self.__class__.__name__,
self.values if self.valid else tuple())
| gpl-3.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/pywin32-219/Pythonwin/pywin/dialogs/login.py | 34 | 4245 | '''login -- PythonWin user ID and password dialog box
(Adapted from originally distributed with Mark Hammond's PythonWin -
this now replaces it!)
login.GetLogin() displays a modal "OK/Cancel" dialog box with input
fields for a user ID and password. The password field input is masked
with *'s. GetLogin takes two optional parameters, a window title, and a
default user ID. If these parameters are omitted, the title defaults to
"Login", and the user ID is left blank. GetLogin returns a (userid, password)
tuple. GetLogin can be called from scripts running on the console - i.e. you
don't need to write a full-blown GUI app to use it.
login.GetPassword() is similar, except there is no username field.
Example:
import pywin.dialogs.login
title = "FTP Login"
def_user = "fred"
userid, password = pywin.dialogs.login.GetLogin(title, def_user)
Jim Eggleston, 28 August 1996
Merged with dlgpass and moved to pywin.dialogs by Mark Hammond Jan 1998.
'''
import win32ui
import win32api
import win32con
from pywin.mfc import dialog
def MakeLoginDlgTemplate(title):
style = win32con.DS_MODALFRAME | win32con.WS_POPUP | win32con.WS_VISIBLE | win32con.WS_CAPTION | win32con.WS_SYSMENU | win32con.DS_SETFONT
cs = win32con.WS_CHILD | win32con.WS_VISIBLE
# Window frame and title
dlg = [ [title, (0, 0, 184, 40), style, None, (8, "MS Sans Serif")], ]
# ID label and text box
dlg.append([130, "User ID:", -1, (7, 9, 69, 9), cs | win32con.SS_LEFT])
s = cs | win32con.WS_TABSTOP | win32con.WS_BORDER
dlg.append(['EDIT', None, win32ui.IDC_EDIT1, (50, 7, 60, 12), s])
# Password label and text box
dlg.append([130, "Password:", -1, (7, 22, 69, 9), cs | win32con.SS_LEFT])
s = cs | win32con.WS_TABSTOP | win32con.WS_BORDER
dlg.append(['EDIT', None, win32ui.IDC_EDIT2, (50, 20, 60, 12), s | win32con.ES_PASSWORD])
# OK/Cancel Buttons
s = cs | win32con.WS_TABSTOP
dlg.append([128, "OK", win32con.IDOK, (124, 5, 50, 14), s | win32con.BS_DEFPUSHBUTTON])
s = win32con.BS_PUSHBUTTON | s
dlg.append([128, "Cancel", win32con.IDCANCEL, (124, 20, 50, 14), s])
return dlg
def MakePasswordDlgTemplate(title):
style = win32con.DS_MODALFRAME | win32con.WS_POPUP | win32con.WS_VISIBLE | win32con.WS_CAPTION | win32con.WS_SYSMENU | win32con.DS_SETFONT
cs = win32con.WS_CHILD | win32con.WS_VISIBLE
# Window frame and title
dlg = [ [title, (0, 0, 177, 45), style, None, (8, "MS Sans Serif")], ]
# Password label and text box
dlg.append([130, "Password:", -1, (7, 7, 69, 9), cs | win32con.SS_LEFT])
s = cs | win32con.WS_TABSTOP | win32con.WS_BORDER
dlg.append(['EDIT', None, win32ui.IDC_EDIT1, (50, 7, 60, 12), s | win32con.ES_PASSWORD])
# OK/Cancel Buttons
s = cs | win32con.WS_TABSTOP | win32con.BS_PUSHBUTTON
dlg.append([128, "OK", win32con.IDOK, (124, 5, 50, 14), s | win32con.BS_DEFPUSHBUTTON])
dlg.append([128, "Cancel", win32con.IDCANCEL, (124, 22, 50, 14), s])
return dlg
class LoginDlg(dialog.Dialog):
Cancel = 0
def __init__(self, title):
dialog.Dialog.__init__(self, MakeLoginDlgTemplate(title) )
self.AddDDX(win32ui.IDC_EDIT1,'userid')
self.AddDDX(win32ui.IDC_EDIT2,'password')
def GetLogin(title='Login', userid='', password=''):
d = LoginDlg(title)
d['userid'] = userid
d['password'] = password
if d.DoModal() != win32con.IDOK:
return (None, None)
else:
return (d['userid'], d['password'])
class PasswordDlg(dialog.Dialog):
def __init__(self, title):
dialog.Dialog.__init__(self, MakePasswordDlgTemplate(title) )
self.AddDDX(win32ui.IDC_EDIT1,'password')
def GetPassword(title='Password', password=''):
d = PasswordDlg(title)
d['password'] = password
if d.DoModal()!=win32con.IDOK:
return None
return d['password']
if __name__ == "__main__":
import sys
title = 'Login'
def_user = ''
if len(sys.argv) > 1:
title = sys.argv[1]
if len(sys.argv) > 2:
def_userid = sys.argv[2]
userid, password = GetLogin(title, def_user)
if userid == password == None:
print "User pressed Cancel"
else:
print "User ID: ", userid
print "Password:", password
newpassword = GetPassword("Reenter just for fun", password)
if newpassword is None:
print "User cancelled"
else:
what = ""
if newpassword != password:
what = "not "
print "The passwords did %smatch" % (what)
| mit |
AudioGod/Gods_kernel_YU | scripts/gcc-wrapper.py | 580 | 3524 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"return_address.c:63",
"kprobes.c:1493",
"rcutree.c:1614",
"af_unix.c:893",
"nl80211.c:58",
"jhash.h:137",
"cmpxchg.h:162",
"ping.c:87",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| gpl-2.0 |
baidu/Paddle | python/paddle/fluid/tests/unittests/test_ref_by_trainer_id_op.py | 4 | 1177 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
class TestRefByTrainerIdOp(OpTest):
def setUp(self):
self.op_type = "ref_by_trainer_id"
param_baks = [("x%d" % x, np.random.random((10, 10)).astype("float32"))
for x in range(10)]
self.inputs = {
'X': param_baks,
'TrainerId': np.array([8]).astype("int64")
}
self.outputs = {'Out': param_baks[8][1]}
def test_check_output(self):
self.check_output()
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
gnina/scripts | pymol_arrows.py | 1 | 5070 | #!/usr/bin/env python3
import sys
import os
import argparse
def write_pymol_arrows(base, structs, scale, color, radius, hradius, hlength, threshold):
pymol_file = base + '_arrows.pymol'
lines = []
arrow_objs = set()
t2 = threshold**2
s2 = scale**2
for i, struct in enumerate(structs):
for j, atom in enumerate(struct):
arrow_obj = base + '_arrow_' + str(j)
arrow_objs.add(arrow_obj)
elem, xi, yi, zi, dx, dy, dz = atom
xf = xi + scale*dx
yf = yi + scale*dy
zf = zi + scale*dz
line = 'cgo_arrow [{}, {}, {}], [{}, {}, {}]'.format(xi, yi, zi, xf, yf, zf)
if len(structs) > 1:
line += ', state={}'.format(i+1)
if radius:
line += ', radius={}'.format(radius)
if hradius > 0:
line += ', hradius={}'.format(hradius)
if hlength > 0:
line += ', hlength={}'.format(hlength)
if color:
line += ', color={}'.format(color)
line += ', name={}'.format(arrow_obj)
if (dx**2 + dy**2 + dz**2)*s2 > t2:
lines.append(line)
arrow_group = base + '_arrows'
line = 'group {}, {}'.format(arrow_group, ' '.join(arrow_objs))
lines.append(line)
with open(pymol_file, 'w') as f:
f.write('\n'.join(lines))
def xyz_line_to_atom(xyz_line):
fields = xyz_line.split()
elem = fields[0]
x = float(fields[1])
y = float(fields[2])
z = float(fields[3])
dx = float(fields[4])
dy = float(fields[5])
dz = float(fields[6])
return elem, x, y, z, dx, dy, dz
def atom_to_pdb_line(atom, idx, dosum):
if not isinstance(idx, int) or idx < 0 or idx > 99999:
raise TypeError('idx must be an integer from 0 to 99999 ({})'.format(idx))
elem, x, y, z, dx, dy, dz = atom
if len(elem) not in {1, 2}:
raise IndexError('atom elem must be a string of length 1 or 2 ({})'.format(elem))
if dosum:
d = dx+dy+dz
else:
d = (dx**2 + dy**2 + dz**2)**0.5
return '{:6}{:5} {:4}{:1}{:3} {:1}{:4}{:1} {:8.3f}{:8.3f}{:8.3f}{:6.2f}{:6f} {:2}{:2}' \
.format('ATOM', idx, '', '', '', '', '', '', x, y, z, 1.0, d, elem.rjust(2), '')
def read_xyz_file(xyz_file, header_len=2):
with open(xyz_file, 'r') as f:
lines = f.readlines()
structs = []
struct_start = 0
for i, line in enumerate(lines):
try:
# line index relative to struct start
j = i - struct_start
if j == 0 or j >= header_len + n_atoms:
struct_start = i
structs.append([])
n_atoms = int(lines[i])
elif j < header_len:
continue
else:
atom = xyz_line_to_atom(lines[i])
structs[-1].append(atom)
except:
print('{}:{} {}'.format(xyz_file, i, repr(line)), file=sys.stderr)
raise
return structs
def write_pdb_file(pdb_file, atoms, dosum):
lines = []
for i, atom in enumerate(atoms):
line = atom_to_pdb_line(atom, i, dosum)
lines.append(line)
if pdb_file:
with open(pdb_file, 'w') as f:
f.write('\n'.join(lines))
else:
print('\n'.join(lines))
def parse_args():
parser = argparse.ArgumentParser(description='Output a pymol script that creates \
arrows from an .xyz file containing atom coordinates and gradient components, \
can also create a .pdb file where the b-factor is the gradient magnitude')
parser.add_argument('xyz_file')
parser.add_argument('-s', '--scale', type=float, default=1.0,
help='Arrow length scaling factor')
parser.add_argument('-c', '--color', type=str, default='',
help='Arrow color or pair of colors, e.g. "white black"')
parser.add_argument('-r', '--radius', type=float, default=0.2,
help='Radius of arrow body')
parser.add_argument('-hr', '--hradius', type=float, default=-1,
help='Radius of arrow head')
parser.add_argument('-hl', '--hlength', type=float, default=-1,
help='Length of arrow head')
parser.add_argument('-p', '--pdb_file', action='store_true', default=False,
help='Output a .pdb file where the b-factor is gradient magnitude')
parser.add_argument('--sum', action='store_true', default=False,
help='Sum gradient components instead of taking magnitude')
parser.add_argument('-t', '--threshold', type=float, default=0,
help="Gradient threshold for drawing arrows (using scale factor)")
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
structs = read_xyz_file(args.xyz_file)
base_name = args.xyz_file.replace('.xyz', '')
write_pymol_arrows(base_name, structs, args.scale, args.color, args.radius, args.hradius, args.hlength, args.threshold)
if args.pdb_file:
pdb_file = base_name + '.pdb'
write_pdb_file(pdb_file, atoms, args.sum)
| bsd-3-clause |
DDelon/youtube-dl | youtube_dl/extractor/aljazeera.py | 20 | 1321 | from __future__ import unicode_literals
from .common import InfoExtractor
class AlJazeeraIE(InfoExtractor):
_VALID_URL = r'http://www\.aljazeera\.com/programmes/.*?/(?P<id>[^/]+)\.html'
_TEST = {
'url': 'http://www.aljazeera.com/programmes/the-slum/2014/08/deliverance-201482883754237240.html',
'info_dict': {
'id': '3792260579001',
'ext': 'mp4',
'title': 'The Slum - Episode 1: Deliverance',
'description': 'As a birth attendant advocating for family planning, Remy is on the frontline of Tondo\'s battle with overcrowding.',
'uploader': 'Al Jazeera English',
},
'add_ie': ['BrightcoveLegacy'],
'skip': 'Not accessible from Travis CI server',
}
def _real_extract(self, url):
program_name = self._match_id(url)
webpage = self._download_webpage(url, program_name)
brightcove_id = self._search_regex(
r'RenderPagesVideo\(\'(.+?)\'', webpage, 'brightcove id')
return {
'_type': 'url',
'url': (
'brightcove:'
'playerKey=AQ~~%2CAAAAmtVJIFk~%2CTVGOQ5ZTwJbeMWnq5d_H4MOM57xfzApc'
'&%40videoPlayer={0}'.format(brightcove_id)
),
'ie_key': 'BrightcoveLegacy',
}
| unlicense |
abcht/pattern | pattern/server/cherrypy/cherrypy/_cpmodpy.py | 41 | 10979 | """Native adapter for serving CherryPy via mod_python
Basic usage:
##########################################
# Application in a module called myapp.py
##########################################
import cherrypy
class Root:
@cherrypy.expose
def index(self):
return 'Hi there, Ho there, Hey there'
# We will use this method from the mod_python configuration
# as the entry point to our application
def setup_server():
cherrypy.tree.mount(Root())
cherrypy.config.update({'environment': 'production',
'log.screen': False,
'show_tracebacks': False})
##########################################
# mod_python settings for apache2
# This should reside in your httpd.conf
# or a file that will be loaded at
# apache startup
##########################################
# Start
DocumentRoot "/"
Listen 8080
LoadModule python_module /usr/lib/apache2/modules/mod_python.so
<Location "/">
PythonPath "sys.path+['/path/to/my/application']"
SetHandler python-program
PythonHandler cherrypy._cpmodpy::handler
PythonOption cherrypy.setup myapp::setup_server
PythonDebug On
</Location>
# End
The actual path to your mod_python.so is dependent on your
environment. In this case we suppose a global mod_python
installation on a Linux distribution such as Ubuntu.
We do set the PythonPath configuration setting so that
your application can be found by from the user running
the apache2 instance. Of course if your application
resides in the global site-package this won't be needed.
Then restart apache2 and access http://127.0.0.1:8080
"""
import logging
import sys
import cherrypy
from cherrypy._cpcompat import BytesIO, copyitems, ntob
from cherrypy._cperror import format_exc, bare_error
from cherrypy.lib import httputil
# ------------------------------ Request-handling
def setup(req):
from mod_python import apache
# Run any setup functions defined by a "PythonOption cherrypy.setup" directive.
options = req.get_options()
if 'cherrypy.setup' in options:
for function in options['cherrypy.setup'].split():
atoms = function.split('::', 1)
if len(atoms) == 1:
mod = __import__(atoms[0], globals(), locals())
else:
modname, fname = atoms
mod = __import__(modname, globals(), locals(), [fname])
func = getattr(mod, fname)
func()
cherrypy.config.update({'log.screen': False,
"tools.ignore_headers.on": True,
"tools.ignore_headers.headers": ['Range'],
})
engine = cherrypy.engine
if hasattr(engine, "signal_handler"):
engine.signal_handler.unsubscribe()
if hasattr(engine, "console_control_handler"):
engine.console_control_handler.unsubscribe()
engine.autoreload.unsubscribe()
cherrypy.server.unsubscribe()
def _log(msg, level):
newlevel = apache.APLOG_ERR
if logging.DEBUG >= level:
newlevel = apache.APLOG_DEBUG
elif logging.INFO >= level:
newlevel = apache.APLOG_INFO
elif logging.WARNING >= level:
newlevel = apache.APLOG_WARNING
# On Windows, req.server is required or the msg will vanish. See
# http://www.modpython.org/pipermail/mod_python/2003-October/014291.html.
# Also, "When server is not specified...LogLevel does not apply..."
apache.log_error(msg, newlevel, req.server)
engine.subscribe('log', _log)
engine.start()
def cherrypy_cleanup(data):
engine.exit()
try:
# apache.register_cleanup wasn't available until 3.1.4.
apache.register_cleanup(cherrypy_cleanup)
except AttributeError:
req.server.register_cleanup(req, cherrypy_cleanup)
class _ReadOnlyRequest:
expose = ('read', 'readline', 'readlines')
def __init__(self, req):
for method in self.expose:
self.__dict__[method] = getattr(req, method)
recursive = False
_isSetUp = False
def handler(req):
from mod_python import apache
try:
global _isSetUp
if not _isSetUp:
setup(req)
_isSetUp = True
# Obtain a Request object from CherryPy
local = req.connection.local_addr
local = httputil.Host(local[0], local[1], req.connection.local_host or "")
remote = req.connection.remote_addr
remote = httputil.Host(remote[0], remote[1], req.connection.remote_host or "")
scheme = req.parsed_uri[0] or 'http'
req.get_basic_auth_pw()
try:
# apache.mpm_query only became available in mod_python 3.1
q = apache.mpm_query
threaded = q(apache.AP_MPMQ_IS_THREADED)
forked = q(apache.AP_MPMQ_IS_FORKED)
except AttributeError:
bad_value = ("You must provide a PythonOption '%s', "
"either 'on' or 'off', when running a version "
"of mod_python < 3.1")
threaded = options.get('multithread', '').lower()
if threaded == 'on':
threaded = True
elif threaded == 'off':
threaded = False
else:
raise ValueError(bad_value % "multithread")
forked = options.get('multiprocess', '').lower()
if forked == 'on':
forked = True
elif forked == 'off':
forked = False
else:
raise ValueError(bad_value % "multiprocess")
sn = cherrypy.tree.script_name(req.uri or "/")
if sn is None:
send_response(req, '404 Not Found', [], '')
else:
app = cherrypy.tree.apps[sn]
method = req.method
path = req.uri
qs = req.args or ""
reqproto = req.protocol
headers = copyitems(req.headers_in)
rfile = _ReadOnlyRequest(req)
prev = None
try:
redirections = []
while True:
request, response = app.get_serving(local, remote, scheme,
"HTTP/1.1")
request.login = req.user
request.multithread = bool(threaded)
request.multiprocess = bool(forked)
request.app = app
request.prev = prev
# Run the CherryPy Request object and obtain the response
try:
request.run(method, path, qs, reqproto, headers, rfile)
break
except cherrypy.InternalRedirect:
ir = sys.exc_info()[1]
app.release_serving()
prev = request
if not recursive:
if ir.path in redirections:
raise RuntimeError("InternalRedirector visited the "
"same URL twice: %r" % ir.path)
else:
# Add the *previous* path_info + qs to redirections.
if qs:
qs = "?" + qs
redirections.append(sn + path + qs)
# Munge environment and try again.
method = "GET"
path = ir.path
qs = ir.query_string
rfile = BytesIO()
send_response(req, response.output_status, response.header_list,
response.body, response.stream)
finally:
app.release_serving()
except:
tb = format_exc()
cherrypy.log(tb, 'MOD_PYTHON', severity=logging.ERROR)
s, h, b = bare_error()
send_response(req, s, h, b)
return apache.OK
def send_response(req, status, headers, body, stream=False):
# Set response status
req.status = int(status[:3])
# Set response headers
req.content_type = "text/plain"
for header, value in headers:
if header.lower() == 'content-type':
req.content_type = value
continue
req.headers_out.add(header, value)
if stream:
# Flush now so the status and headers are sent immediately.
req.flush()
# Set response body
if isinstance(body, basestring):
req.write(body)
else:
for seg in body:
req.write(seg)
# --------------- Startup tools for CherryPy + mod_python --------------- #
import os
import re
try:
import subprocess
def popen(fullcmd):
p = subprocess.Popen(fullcmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
close_fds=True)
return p.stdout
except ImportError:
def popen(fullcmd):
pipein, pipeout = os.popen4(fullcmd)
return pipeout
def read_process(cmd, args=""):
fullcmd = "%s %s" % (cmd, args)
pipeout = popen(fullcmd)
try:
firstline = pipeout.readline()
if (re.search(ntob("(not recognized|No such file|not found)"), firstline,
re.IGNORECASE)):
raise IOError('%s must be on your system path.' % cmd)
output = firstline + pipeout.read()
finally:
pipeout.close()
return output
class ModPythonServer(object):
template = """
# Apache2 server configuration file for running CherryPy with mod_python.
DocumentRoot "/"
Listen %(port)s
LoadModule python_module modules/mod_python.so
<Location %(loc)s>
SetHandler python-program
PythonHandler %(handler)s
PythonDebug On
%(opts)s
</Location>
"""
def __init__(self, loc="/", port=80, opts=None, apache_path="apache",
handler="cherrypy._cpmodpy::handler"):
self.loc = loc
self.port = port
self.opts = opts
self.apache_path = apache_path
self.handler = handler
def start(self):
opts = "".join([" PythonOption %s %s\n" % (k, v)
for k, v in self.opts])
conf_data = self.template % {"port": self.port,
"loc": self.loc,
"opts": opts,
"handler": self.handler,
}
mpconf = os.path.join(os.path.dirname(__file__), "cpmodpy.conf")
f = open(mpconf, 'wb')
try:
f.write(conf_data)
finally:
f.close()
response = read_process(self.apache_path, "-k start -f %s" % mpconf)
self.ready = True
return response
def stop(self):
os.popen("apache -k stop")
self.ready = False
| bsd-3-clause |
fitoria/askbot-devel | askbot/migrations/0001_initial.py | 15 | 122672 | # encoding: utf-8
import os
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from askbot.migrations_api import safe_add_column
app_dir_name = os.path.basename(os.path.dirname(os.path.dirname(__file__)))
class Migration(SchemaMigration):
def forwards(self, orm):
#1) patch the existing auth_user table
safe_add_column('auth_user', 'website', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True, null=True), keep_default = False)
safe_add_column('auth_user', 'about', self.gf('django.db.models.fields.TextField')(blank=True, null=True), keep_default = False)
safe_add_column('auth_user', 'hide_ignored_questions', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True), keep_default = False)
safe_add_column('auth_user', 'gold', self.gf('django.db.models.fields.SmallIntegerField')(default=0), keep_default = False)
safe_add_column('auth_user', 'email_isvalid', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True), keep_default = False)
safe_add_column('auth_user', 'real_name', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True, null=True), keep_default = False)
safe_add_column('auth_user', 'location', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True, null=True), keep_default = False)
safe_add_column('auth_user', 'email_key', self.gf('django.db.models.fields.CharField')(max_length=32, null=True), keep_default = False)
safe_add_column('auth_user', 'date_of_birth', self.gf('django.db.models.fields.DateField')(null=True, blank=True), keep_default = False)
safe_add_column('auth_user', 'reputation', self.gf('django.db.models.fields.PositiveIntegerField')(default=1), keep_default = False)
safe_add_column('auth_user', 'gravatar', self.gf('django.db.models.fields.CharField')(max_length=32, null=True), keep_default = False)
safe_add_column('auth_user', 'bronze', self.gf('django.db.models.fields.SmallIntegerField')(default=0), keep_default = False)
safe_add_column('auth_user', 'tag_filter_setting', self.gf('django.db.models.fields.CharField')(default='ignored', max_length=16), keep_default = False)
safe_add_column('auth_user', 'last_seen', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now), keep_default = False)
safe_add_column('auth_user', 'silver', self.gf('django.db.models.fields.SmallIntegerField')(default=0), keep_default = False),
safe_add_column('auth_user', 'questions_per_page', self.gf('django.db.models.fields.SmallIntegerField')(default=10), keep_default = False),
safe_add_column('auth_user', 'response_count', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding model 'Vote'
if app_dir_name == 'forum':
db.create_table(u'vote', (
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('voted_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='votes', to=orm['auth.User'])),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('vote', self.gf('django.db.models.fields.SmallIntegerField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('forum', ['Vote'])
# Adding unique constraint on 'Vote', fields ['content_type', 'object_id', 'user']
db.create_unique(u'vote', ['content_type_id', 'object_id', 'user_id'])
# Adding model 'FlaggedItem'
db.create_table(u'flagged_item', (
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('flagged_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='flaggeditems', to=orm['auth.User'])),
))
db.send_create_signal('forum', ['FlaggedItem'])
# Adding unique constraint on 'FlaggedItem', fields ['content_type', 'object_id', 'user']
db.create_unique(u'flagged_item', ['content_type_id', 'object_id', 'user_id'])
# Adding model 'Comment'
db.create_table(u'comment', (
('comment', self.gf('django.db.models.fields.CharField')(max_length=300)),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('added_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='comments', to=orm['auth.User'])),
))
db.send_create_signal('forum', ['Comment'])
# Adding model 'Tag'
db.create_table(u'tag', (
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='created_tags', to=orm['auth.User'])),
('deleted_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='deleted_tags', null=True, to=orm['auth.User'])),
('used_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('deleted_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('forum', ['Tag'])
# Adding model 'MarkedTag'
db.create_table('forum_markedtag', (
('reason', self.gf('django.db.models.fields.CharField')(max_length=16)),
('tag', self.gf('django.db.models.fields.related.ForeignKey')(related_name='user_selections', to=orm['forum.Tag'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='tag_selections', to=orm['auth.User'])),
))
db.send_create_signal('forum', ['MarkedTag'])
# Adding model 'Question'
db.create_table(u'question', (
('wiki', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('vote_up_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('answer_accepted', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('offensive_flag_count', self.gf('django.db.models.fields.SmallIntegerField')(default=0)),
('closed_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('deleted_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('last_activity_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='last_active_in_questions', to=orm['auth.User'])),
('view_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('locked_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('score', self.gf('django.db.models.fields.IntegerField')(default=0)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(related_name='questions', to=orm['auth.User'])),
('comment_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('html', self.gf('django.db.models.fields.TextField')()),
('vote_down_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('closed', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('last_edited_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='last_edited_questions', null=True, to=orm['auth.User'])),
('favourite_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('summary', self.gf('django.db.models.fields.CharField')(max_length=180)),
('answer_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('last_activity_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('closed_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='closed_questions', null=True, to=orm['auth.User'])),
('close_reason', self.gf('django.db.models.fields.SmallIntegerField')(null=True, blank=True)),
('locked', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('tagnames', self.gf('django.db.models.fields.CharField')(max_length=125)),
('locked_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='locked_questions', null=True, to=orm['auth.User'])),
('added_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('deleted_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='deleted_questions', null=True, to=orm['auth.User'])),
('wikified_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=300)),
('last_edited_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('forum', ['Question'])
# Adding M2M table for field followed_by on 'Question'
db.create_table(u'question_followed_by', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('question', models.ForeignKey(orm['forum.question'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique(u'question_followed_by', ['question_id', 'user_id'])
# Adding M2M table for field tags on 'Question'
db.create_table(u'question_tags', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('question', models.ForeignKey(orm['forum.question'], null=False)),
('tag', models.ForeignKey(orm['forum.tag'], null=False))
))
db.create_unique(u'question_tags', ['question_id', 'tag_id'])
# Adding model 'QuestionView'
db.create_table('forum_questionview', (
('when', self.gf('django.db.models.fields.DateTimeField')()),
('who', self.gf('django.db.models.fields.related.ForeignKey')(related_name='question_views', to=orm['auth.User'])),
('question', self.gf('django.db.models.fields.related.ForeignKey')(related_name='viewed', to=orm['forum.Question'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('forum', ['QuestionView'])
# Adding model 'FavoriteQuestion'
db.create_table(u'favorite_question', (
('question', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forum.Question'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('added_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='user_favorite_questions', to=orm['auth.User'])),
))
db.send_create_signal('forum', ['FavoriteQuestion'])
# Adding model 'QuestionRevision'
db.create_table(u'question_revision', (
('author', self.gf('django.db.models.fields.related.ForeignKey')(related_name='questionrevisions', to=orm['auth.User'])),
('tagnames', self.gf('django.db.models.fields.CharField')(max_length=125)),
('text', self.gf('django.db.models.fields.TextField')()),
('title', self.gf('django.db.models.fields.CharField')(max_length=300)),
('question', self.gf('django.db.models.fields.related.ForeignKey')(related_name='revisions', to=orm['forum.Question'])),
('revised_at', self.gf('django.db.models.fields.DateTimeField')()),
('summary', self.gf('django.db.models.fields.CharField')(max_length=300, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('revision', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('forum', ['QuestionRevision'])
# Adding model 'AnonymousQuestion'
db.create_table('forum_anonymousquestion', (
('wiki', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('ip_addr', self.gf('django.db.models.fields.IPAddressField')(max_length=15)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('tagnames', self.gf('django.db.models.fields.CharField')(max_length=125)),
('text', self.gf('django.db.models.fields.TextField')()),
('title', self.gf('django.db.models.fields.CharField')(max_length=300)),
('added_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('summary', self.gf('django.db.models.fields.CharField')(max_length=180)),
('session_key', self.gf('django.db.models.fields.CharField')(max_length=40)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('forum', ['AnonymousQuestion'])
# Adding model 'Answer'
db.create_table(u'answer', (
('wiki', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('vote_up_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('offensive_flag_count', self.gf('django.db.models.fields.SmallIntegerField')(default=0)),
('deleted_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('locked_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('score', self.gf('django.db.models.fields.IntegerField')(default=0)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(related_name='answers', to=orm['auth.User'])),
('question', self.gf('django.db.models.fields.related.ForeignKey')(related_name='answers', to=orm['forum.Question'])),
('comment_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('html', self.gf('django.db.models.fields.TextField')()),
('vote_down_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('last_edited_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='last_edited_answers', null=True, to=orm['auth.User'])),
('accepted_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('accepted', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('locked', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('locked_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='locked_answers', null=True, to=orm['auth.User'])),
('added_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('deleted_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='deleted_answers', null=True, to=orm['auth.User'])),
('wikified_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('last_edited_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('forum', ['Answer'])
# Adding model 'AnswerRevision'
db.create_table(u'answer_revision', (
('author', self.gf('django.db.models.fields.related.ForeignKey')(related_name='answerrevisions', to=orm['auth.User'])),
('text', self.gf('django.db.models.fields.TextField')()),
('revised_at', self.gf('django.db.models.fields.DateTimeField')()),
('summary', self.gf('django.db.models.fields.CharField')(max_length=300, blank=True)),
('answer', self.gf('django.db.models.fields.related.ForeignKey')(related_name='revisions', to=orm['forum.Answer'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('revision', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('forum', ['AnswerRevision'])
# Adding model 'AnonymousAnswer'
db.create_table('forum_anonymousanswer', (
('wiki', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('ip_addr', self.gf('django.db.models.fields.IPAddressField')(max_length=15)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('text', self.gf('django.db.models.fields.TextField')()),
('question', self.gf('django.db.models.fields.related.ForeignKey')(related_name='anonymous_answers', to=orm['forum.Question'])),
('added_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('summary', self.gf('django.db.models.fields.CharField')(max_length=180)),
('session_key', self.gf('django.db.models.fields.CharField')(max_length=40)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('forum', ['AnonymousAnswer'])
# Adding model 'Activity'
db.create_table(u'activity', (
('is_auditted', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('active_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('activity_type', self.gf('django.db.models.fields.SmallIntegerField')()),
))
db.send_create_signal('forum', ['Activity'])
# Adding model 'EmailFeedSetting'
db.create_table('forum_emailfeedsetting', (
('reported_at', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('added_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('subscriber', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('feed_type', self.gf('django.db.models.fields.CharField')(max_length=16)),
('frequency', self.gf('django.db.models.fields.CharField')(default='n', max_length=8)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('forum', ['EmailFeedSetting'])
# Adding model 'ValidationHash'
db.create_table('forum_validationhash', (
('hash_code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('seed', self.gf('django.db.models.fields.CharField')(max_length=12)),
('expiration', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2010, 4, 25, 13, 14, 41, 581000))),
('type', self.gf('django.db.models.fields.CharField')(max_length=12)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal('forum', ['ValidationHash'])
# Adding unique constraint on 'ValidationHash', fields ['user', 'type']
db.create_unique('forum_validationhash', ['user_id', 'type'])
# Adding model 'AuthKeyUserAssociation'
db.create_table('forum_authkeyuserassociation', (
('added_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='auth_keys', to=orm['auth.User'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('key', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('provider', self.gf('django.db.models.fields.CharField')(max_length=64)),
))
db.send_create_signal('forum', ['AuthKeyUserAssociation'])
# Adding model 'Badge'
db.create_table(u'badge', (
('multiple', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=300)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('awarded_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('type', self.gf('django.db.models.fields.SmallIntegerField')()),
('slug', self.gf('django.db.models.fields.SlugField')(db_index=True, max_length=50, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal('forum', ['Badge'])
# Adding unique constraint on 'Badge', fields ['name', 'type']
db.create_unique(u'badge', ['name', 'type'])
# Adding model 'Award'
db.create_table(u'award', (
('awarded_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('notified', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='award_user', to=orm['auth.User'])),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('badge', self.gf('django.db.models.fields.related.ForeignKey')(related_name='award_badge', to=orm['forum.Badge'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('forum', ['Award'])
# Adding model 'Repute'
db.create_table(u'repute', (
('positive', self.gf('django.db.models.fields.SmallIntegerField')(default=0)),
('question', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forum.Question'])),
('negative', self.gf('django.db.models.fields.SmallIntegerField')(default=0)),
('reputation_type', self.gf('django.db.models.fields.SmallIntegerField')()),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('reputed_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('reputation', self.gf('django.db.models.fields.IntegerField')(default=1)),
))
db.send_create_signal('forum', ['Repute'])
# Adding model 'Book'
db.create_table(u'book', (
('publication', self.gf('django.db.models.fields.CharField')(max_length=255)),
('short_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('author', self.gf('django.db.models.fields.CharField')(max_length=255)),
('cover_img', self.gf('django.db.models.fields.CharField')(max_length=255)),
('price', self.gf('django.db.models.fields.DecimalField')(max_digits=6, decimal_places=2)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('added_at', self.gf('django.db.models.fields.DateTimeField')()),
('pages', self.gf('django.db.models.fields.SmallIntegerField')()),
('tagnames', self.gf('django.db.models.fields.CharField')(max_length=125)),
('published_at', self.gf('django.db.models.fields.DateTimeField')()),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('last_edited_at', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal('forum', ['Book'])
# Adding M2M table for field questions on 'Book'
db.create_table('book_question', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('book', models.ForeignKey(orm['forum.book'], null=False)),
('question', models.ForeignKey(orm['forum.question'], null=False))
))
db.create_unique('book_question', ['book_id', 'question_id'])
# Adding model 'BookAuthorInfo'
db.create_table(u'book_author_info', (
('added_at', self.gf('django.db.models.fields.DateTimeField')()),
('book', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forum.Book'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('blog_url', self.gf('django.db.models.fields.CharField')(max_length=255)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('last_edited_at', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal('forum', ['BookAuthorInfo'])
# Adding model 'BookAuthorRss'
db.create_table(u'book_author_rss', (
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('url', self.gf('django.db.models.fields.CharField')(max_length=255)),
('added_at', self.gf('django.db.models.fields.DateTimeField')()),
('book', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forum.Book'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('rss_created_at', self.gf('django.db.models.fields.DateTimeField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('forum', ['BookAuthorRss'])
else:
db.create_table(u'vote', (
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('voted_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='votes', to=orm['auth.User'])),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('vote', self.gf('django.db.models.fields.SmallIntegerField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('askbot', ['Vote'])
# Adding unique constraint on 'Vote', fields ['content_type', 'object_id', 'user']
db.create_unique(u'vote', ['content_type_id', 'object_id', 'user_id'])
# Adding model 'FlaggedItem'
db.create_table(u'flagged_item', (
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('flagged_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='flaggeditems', to=orm['auth.User'])),
))
db.send_create_signal('askbot', ['FlaggedItem'])
# Adding unique constraint on 'FlaggedItem', fields ['content_type', 'object_id', 'user']
db.create_unique(u'flagged_item', ['content_type_id', 'object_id', 'user_id'])
# Adding model 'Comment'
db.create_table(u'comment', (
('comment', self.gf('django.db.models.fields.CharField')(max_length=300)),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('added_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='comments', to=orm['auth.User'])),
))
db.send_create_signal('askbot', ['Comment'])
# Adding model 'Tag'
db.create_table(u'tag', (
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='created_tags', to=orm['auth.User'])),
('deleted_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='deleted_tags', null=True, to=orm['auth.User'])),
('used_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('deleted_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('askbot', ['Tag'])
# Adding model 'MarkedTag'
db.create_table('askbot_markedtag', (
('reason', self.gf('django.db.models.fields.CharField')(max_length=16)),
('tag', self.gf('django.db.models.fields.related.ForeignKey')(related_name='user_selections', to=orm['askbot.Tag'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='tag_selections', to=orm['auth.User'])),
))
db.send_create_signal('askbot', ['MarkedTag'])
# Adding model 'Question'
db.create_table(u'question', (
('wiki', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('vote_up_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('answer_accepted', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('offensive_flag_count', self.gf('django.db.models.fields.SmallIntegerField')(default=0)),
('closed_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('deleted_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('last_activity_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='last_active_in_questions', to=orm['auth.User'])),
('view_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('locked_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('score', self.gf('django.db.models.fields.IntegerField')(default=0)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(related_name='questions', to=orm['auth.User'])),
('comment_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('html', self.gf('django.db.models.fields.TextField')()),
('vote_down_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('closed', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('last_edited_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='last_edited_questions', null=True, to=orm['auth.User'])),
('favourite_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('summary', self.gf('django.db.models.fields.CharField')(max_length=180)),
('answer_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('last_activity_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('closed_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='closed_questions', null=True, to=orm['auth.User'])),
('close_reason', self.gf('django.db.models.fields.SmallIntegerField')(null=True, blank=True)),
('locked', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('tagnames', self.gf('django.db.models.fields.CharField')(max_length=125)),
('locked_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='locked_questions', null=True, to=orm['auth.User'])),
('added_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('deleted_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='deleted_questions', null=True, to=orm['auth.User'])),
('wikified_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=300)),
('last_edited_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('askbot', ['Question'])
# Adding M2M table for field followed_by on 'Question'
db.create_table(u'question_followed_by', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('question', models.ForeignKey(orm['askbot.question'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique(u'question_followed_by', ['question_id', 'user_id'])
# Adding M2M table for field tags on 'Question'
db.create_table(u'question_tags', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('question', models.ForeignKey(orm['askbot.question'], null=False)),
('tag', models.ForeignKey(orm['askbot.tag'], null=False))
))
db.create_unique(u'question_tags', ['question_id', 'tag_id'])
# Adding model 'QuestionView'
db.create_table('askbot_questionview', (
('when', self.gf('django.db.models.fields.DateTimeField')()),
('who', self.gf('django.db.models.fields.related.ForeignKey')(related_name='question_views', to=orm['auth.User'])),
('question', self.gf('django.db.models.fields.related.ForeignKey')(related_name='viewed', to=orm['askbot.Question'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('askbot', ['QuestionView'])
# Adding model 'FavoriteQuestion'
db.create_table(u'favorite_question', (
('question', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['askbot.Question'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('added_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='user_favorite_questions', to=orm['auth.User'])),
))
db.send_create_signal('askbot', ['FavoriteQuestion'])
# Adding model 'QuestionRevision'
db.create_table(u'question_revision', (
('author', self.gf('django.db.models.fields.related.ForeignKey')(related_name='questionrevisions', to=orm['auth.User'])),
('tagnames', self.gf('django.db.models.fields.CharField')(max_length=125)),
('text', self.gf('django.db.models.fields.TextField')()),
('title', self.gf('django.db.models.fields.CharField')(max_length=300)),
('question', self.gf('django.db.models.fields.related.ForeignKey')(related_name='revisions', to=orm['askbot.Question'])),
('revised_at', self.gf('django.db.models.fields.DateTimeField')()),
('summary', self.gf('django.db.models.fields.CharField')(max_length=300, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('revision', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('askbot', ['QuestionRevision'])
# Adding model 'AnonymousQuestion'
db.create_table('askbot_anonymousquestion', (
('wiki', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('ip_addr', self.gf('django.db.models.fields.IPAddressField')(max_length=15)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('tagnames', self.gf('django.db.models.fields.CharField')(max_length=125)),
('text', self.gf('django.db.models.fields.TextField')()),
('title', self.gf('django.db.models.fields.CharField')(max_length=300)),
('added_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('summary', self.gf('django.db.models.fields.CharField')(max_length=180)),
('session_key', self.gf('django.db.models.fields.CharField')(max_length=40)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('askbot', ['AnonymousQuestion'])
# Adding model 'Answer'
db.create_table(u'answer', (
('wiki', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('vote_up_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('offensive_flag_count', self.gf('django.db.models.fields.SmallIntegerField')(default=0)),
('deleted_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('locked_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('score', self.gf('django.db.models.fields.IntegerField')(default=0)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(related_name='answers', to=orm['auth.User'])),
('question', self.gf('django.db.models.fields.related.ForeignKey')(related_name='answers', to=orm['askbot.Question'])),
('comment_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('html', self.gf('django.db.models.fields.TextField')()),
('vote_down_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('last_edited_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='last_edited_answers', null=True, to=orm['auth.User'])),
('accepted_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('accepted', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('locked', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('locked_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='locked_answers', null=True, to=orm['auth.User'])),
('added_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('deleted_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='deleted_answers', null=True, to=orm['auth.User'])),
('wikified_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('last_edited_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('askbot', ['Answer'])
# Adding model 'AnswerRevision'
db.create_table(u'answer_revision', (
('author', self.gf('django.db.models.fields.related.ForeignKey')(related_name='answerrevisions', to=orm['auth.User'])),
('text', self.gf('django.db.models.fields.TextField')()),
('revised_at', self.gf('django.db.models.fields.DateTimeField')()),
('summary', self.gf('django.db.models.fields.CharField')(max_length=300, blank=True)),
('answer', self.gf('django.db.models.fields.related.ForeignKey')(related_name='revisions', to=orm['askbot.Answer'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('revision', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('askbot', ['AnswerRevision'])
# Adding model 'AnonymousAnswer'
db.create_table('askbot_anonymousanswer', (
('wiki', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('ip_addr', self.gf('django.db.models.fields.IPAddressField')(max_length=15)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('text', self.gf('django.db.models.fields.TextField')()),
('question', self.gf('django.db.models.fields.related.ForeignKey')(related_name='anonymous_answers', to=orm['askbot.Question'])),
('added_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('summary', self.gf('django.db.models.fields.CharField')(max_length=180)),
('session_key', self.gf('django.db.models.fields.CharField')(max_length=40)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('askbot', ['AnonymousAnswer'])
# Adding model 'Activity'
db.create_table(u'activity', (
('is_auditted', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('active_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('activity_type', self.gf('django.db.models.fields.SmallIntegerField')()),
))
db.send_create_signal('askbot', ['Activity'])
# Adding model 'EmailFeedSetting'
db.create_table('askbot_emailfeedsetting', (
('reported_at', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('added_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('subscriber', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('feed_type', self.gf('django.db.models.fields.CharField')(max_length=16)),
('frequency', self.gf('django.db.models.fields.CharField')(default='n', max_length=8)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('askbot', ['EmailFeedSetting'])
# Adding model 'ValidationHash'
db.create_table('askbot_validationhash', (
('hash_code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('seed', self.gf('django.db.models.fields.CharField')(max_length=12)),
('expiration', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2010, 4, 25, 13, 14, 41, 581000))),
('type', self.gf('django.db.models.fields.CharField')(max_length=12)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal('askbot', ['ValidationHash'])
# Adding unique constraint on 'ValidationHash', fields ['user', 'type']
db.create_unique('askbot_validationhash', ['user_id', 'type'])
# Adding model 'AuthKeyUserAssociation'
db.create_table('askbot_authkeyuserassociation', (
('added_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='auth_keys', to=orm['auth.User'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('key', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('provider', self.gf('django.db.models.fields.CharField')(max_length=64)),
))
db.send_create_signal('askbot', ['AuthKeyUserAssociation'])
# Adding model 'Badge'
db.create_table(u'badge', (
('multiple', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=300)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('awarded_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('type', self.gf('django.db.models.fields.SmallIntegerField')()),
('slug', self.gf('django.db.models.fields.SlugField')(db_index=True, max_length=50, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal('askbot', ['Badge'])
# Adding unique constraint on 'Badge', fields ['name', 'type']
db.create_unique(u'badge', ['name', 'type'])
# Adding model 'Award'
db.create_table(u'award', (
('awarded_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('notified', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='award_user', to=orm['auth.User'])),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('badge', self.gf('django.db.models.fields.related.ForeignKey')(related_name='award_badge', to=orm['askbot.Badge'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('askbot', ['Award'])
# Adding model 'Repute'
db.create_table(u'repute', (
('positive', self.gf('django.db.models.fields.SmallIntegerField')(default=0)),
('question', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['askbot.Question'])),
('negative', self.gf('django.db.models.fields.SmallIntegerField')(default=0)),
('reputation_type', self.gf('django.db.models.fields.SmallIntegerField')()),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('reputed_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('reputation', self.gf('django.db.models.fields.IntegerField')(default=1)),
))
db.send_create_signal('askbot', ['Repute'])
# Adding model 'Book'
db.create_table(u'book', (
('publication', self.gf('django.db.models.fields.CharField')(max_length=255)),
('short_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('author', self.gf('django.db.models.fields.CharField')(max_length=255)),
('cover_img', self.gf('django.db.models.fields.CharField')(max_length=255)),
('price', self.gf('django.db.models.fields.DecimalField')(max_digits=6, decimal_places=2)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('added_at', self.gf('django.db.models.fields.DateTimeField')()),
('pages', self.gf('django.db.models.fields.SmallIntegerField')()),
('tagnames', self.gf('django.db.models.fields.CharField')(max_length=125)),
('published_at', self.gf('django.db.models.fields.DateTimeField')()),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('last_edited_at', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal('askbot', ['Book'])
# Adding M2M table for field questions on 'Book'
db.create_table('book_question', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('book', models.ForeignKey(orm['askbot.book'], null=False)),
('question', models.ForeignKey(orm['askbot.question'], null=False))
))
db.create_unique('book_question', ['book_id', 'question_id'])
# Adding model 'BookAuthorInfo'
db.create_table(u'book_author_info', (
('added_at', self.gf('django.db.models.fields.DateTimeField')()),
('book', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['askbot.Book'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('blog_url', self.gf('django.db.models.fields.CharField')(max_length=255)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('last_edited_at', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal('askbot', ['BookAuthorInfo'])
# Adding model 'BookAuthorRss'
db.create_table(u'book_author_rss', (
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('url', self.gf('django.db.models.fields.CharField')(max_length=255)),
('added_at', self.gf('django.db.models.fields.DateTimeField')()),
('book', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['askbot.Book'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('rss_created_at', self.gf('django.db.models.fields.DateTimeField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('askbot', ['BookAuthorRss'])
def backwards(self, orm):
db.delete_column('auth_user', 'website')
db.delete_column('auth_user', 'about')
db.delete_column('auth_user', 'hide_ignored_questions')
db.delete_column('auth_user', 'gold')
db.delete_column('auth_user', 'email_isvalid')
db.delete_column('auth_user', 'real_name')
db.delete_column('auth_user', 'location')
db.delete_column('auth_user', 'email_key')
db.delete_column('auth_user', 'date_of_birth')
db.delete_column('auth_user', 'reputation')
db.delete_column('auth_user', 'gravatar')
db.delete_column('auth_user', 'bronze')
db.delete_column('auth_user', 'tag_filter_setting')
db.delete_column('auth_user', 'last_seen')
db.delete_column('auth_user', 'silver')
db.delete_column('auth_user', 'questions_per_page')
db.delete_column('auth_user', 'response_count')
if app_dir_name == 'forum':
# Deleting model 'Vote'
db.delete_table(u'vote')
# Removing unique constraint on 'Vote', fields ['content_type', 'object_id', 'user']
db.delete_unique(u'vote', ['content_type_id', 'object_id', 'user_id'])
# Deleting model 'FlaggedItem'
db.delete_table(u'flagged_item')
# Removing unique constraint on 'FlaggedItem', fields ['content_type', 'object_id', 'user']
db.delete_unique(u'flagged_item', ['content_type_id', 'object_id', 'user_id'])
# Deleting model 'Comment'
db.delete_table(u'comment')
# Deleting model 'Tag'
db.delete_table(u'tag')
# Deleting model 'MarkedTag'
db.delete_table('forum_markedtag')
# Deleting model 'Question'
db.delete_table(u'question')
# Removing M2M table for field followed_by on 'Question'
db.delete_table('question_followed_by')
# Removing M2M table for field tags on 'Question'
db.delete_table('question_tags')
# Deleting model 'QuestionView'
db.delete_table('forum_questionview')
# Deleting model 'FavoriteQuestion'
db.delete_table(u'favorite_question')
# Deleting model 'QuestionRevision'
db.delete_table(u'question_revision')
# Deleting model 'AnonymousQuestion'
db.delete_table('forum_anonymousquestion')
# Deleting model 'Answer'
db.delete_table(u'answer')
# Deleting model 'AnswerRevision'
db.delete_table(u'answer_revision')
# Deleting model 'AnonymousAnswer'
db.delete_table('forum_anonymousanswer')
# Deleting model 'Activity'
db.delete_table(u'activity')
# Deleting model 'EmailFeedSetting'
db.delete_table('forum_emailfeedsetting')
# Deleting model 'ValidationHash'
db.delete_table('forum_validationhash')
# Removing unique constraint on 'ValidationHash', fields ['user', 'type']
db.delete_unique('forum_validationhash', ['user_id', 'type'])
# Deleting model 'AuthKeyUserAssociation'
db.delete_table('forum_authkeyuserassociation')
# Deleting model 'Badge'
db.delete_table(u'badge')
# Removing unique constraint on 'Badge', fields ['name', 'type']
db.delete_unique(u'badge', ['name', 'type'])
# Deleting model 'Award'
db.delete_table(u'award')
# Deleting model 'Repute'
db.delete_table(u'repute')
# Deleting model 'Book'
db.delete_table(u'book')
# Removing M2M table for field questions on 'Book'
db.delete_table('book_question')
# Deleting model 'BookAuthorInfo'
db.delete_table(u'book_author_info')
# Deleting model 'BookAuthorRss'
db.delete_table(u'book_author_rss')
else:
# Deleting model 'Vote'
db.delete_table(u'vote')
# Removing unique constraint on 'Vote', fields ['content_type', 'object_id', 'user']
db.delete_unique(u'vote', ['content_type_id', 'object_id', 'user_id'])
# Deleting model 'FlaggedItem'
db.delete_table(u'flagged_item')
# Removing unique constraint on 'FlaggedItem', fields ['content_type', 'object_id', 'user']
db.delete_unique(u'flagged_item', ['content_type_id', 'object_id', 'user_id'])
# Deleting model 'Comment'
db.delete_table(u'comment')
# Deleting model 'Tag'
db.delete_table(u'tag')
# Deleting model 'MarkedTag'
db.delete_table('askbot_markedtag')
# Deleting model 'Question'
db.delete_table(u'question')
# Removing M2M table for field followed_by on 'Question'
db.delete_table('question_followed_by')
# Removing M2M table for field tags on 'Question'
db.delete_table('question_tags')
# Deleting model 'QuestionView'
db.delete_table('askbot_questionview')
# Deleting model 'FavoriteQuestion'
db.delete_table(u'favorite_question')
# Deleting model 'QuestionRevision'
db.delete_table(u'question_revision')
# Deleting model 'AnonymousQuestion'
db.delete_table('askbot_anonymousquestion')
# Deleting model 'Answer'
db.delete_table(u'answer')
# Deleting model 'AnswerRevision'
db.delete_table(u'answer_revision')
# Deleting model 'AnonymousAnswer'
db.delete_table('askbot_anonymousanswer')
# Deleting model 'Activity'
db.delete_table(u'activity')
# Deleting model 'EmailFeedSetting'
db.delete_table('askbot_emailfeedsetting')
# Deleting model 'ValidationHash'
db.delete_table('askbot_validationhash')
# Removing unique constraint on 'ValidationHash', fields ['user', 'type']
db.delete_unique('askbot_validationhash', ['user_id', 'type'])
# Deleting model 'AuthKeyUserAssociation'
db.delete_table('askbot_authkeyuserassociation')
# Deleting model 'Badge'
db.delete_table(u'badge')
# Removing unique constraint on 'Badge', fields ['name', 'type']
db.delete_unique(u'badge', ['name', 'type'])
# Deleting model 'Award'
db.delete_table(u'award')
# Deleting model 'Repute'
db.delete_table(u'repute')
# Deleting model 'Book'
db.delete_table(u'book')
# Removing M2M table for field questions on 'Book'
db.delete_table('book_question')
# Deleting model 'BookAuthorInfo'
db.delete_table(u'book_author_info')
# Deleting model 'BookAuthorRss'
db.delete_table(u'book_author_rss')
if app_dir_name == 'forum':
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'hide_ignored_questions': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'tag_filter_setting': ('django.db.models.fields.CharField', [], {'default': "'ignored'", 'max_length': '16'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'forum.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'forum.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['forum.Question']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'forum.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'forum.answer': {
'Meta': {'object_name': 'Answer', 'db_table': "u'answer'"},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['forum.Question']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'forum.answerrevision': {
'Meta': {'object_name': 'AnswerRevision', 'db_table': "u'answer_revision'"},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['forum.Answer']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answerrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'forum.authkeyuserassociation': {
'Meta': {'object_name': 'AuthKeyUserAssociation'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'auth_keys'", 'to': "orm['auth.User']"})
},
'forum.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['forum.Badge']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'forum.badge': {
'Meta': {'unique_together': "(('name', 'type'),)", 'object_name': 'Badge', 'db_table': "u'badge'"},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'through': "'Award'", 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multiple': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'type': ('django.db.models.fields.SmallIntegerField', [], {})
},
'forum.book': {
'Meta': {'object_name': 'Book', 'db_table': "u'book'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cover_img': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {}),
'pages': ('django.db.models.fields.SmallIntegerField', [], {}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '2'}),
'publication': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'published_at': ('django.db.models.fields.DateTimeField', [], {}),
'questions': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'book'", 'db_table': "'book_question'", 'to': "orm['forum.Question']"}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'forum.bookauthorinfo': {
'Meta': {'object_name': 'BookAuthorInfo', 'db_table': "u'book_author_info'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {}),
'blog_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Book']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'forum.bookauthorrss': {
'Meta': {'object_name': 'BookAuthorRss', 'db_table': "u'book_author_rss'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {}),
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Book']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rss_created_at': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'forum.comment': {
'Meta': {'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'forum.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'forum.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Question']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['auth.User']"})
},
'forum.flaggeditem': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'FlaggedItem', 'db_table': "u'flagged_item'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'flagged_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flaggeditems'", 'to': "orm['auth.User']"})
},
'forum.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['forum.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'forum.question': {
'Meta': {'object_name': 'Question', 'db_table': "u'question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'answer_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': "orm['auth.User']"}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'closed_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'favorite_questions'", 'through': "'FavoriteQuestion'", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_questions'", 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_active_in_questions'", 'to': "orm['auth.User']"}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'questions'", 'to': "orm['forum.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'forum.questionrevision': {
'Meta': {'object_name': 'QuestionRevision', 'db_table': "u'question_revision'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questionrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['forum.Question']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'forum.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['forum.Question']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'forum.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Question']"}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'forum.tag': {
'Meta': {'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'forum.validationhash': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'ValidationHash'},
'expiration': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 4, 25, 13, 14, 41, 714642)'}),
'hash_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'seed': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'forum.vote': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
}
}
else:
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'hide_ignored_questions': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'tag_filter_setting': ('django.db.models.fields.CharField', [], {'default': "'ignored'", 'max_length': '16'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['askbot.Question']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'askbot.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'askbot.answer': {
'Meta': {'object_name': 'Answer', 'db_table': "u'answer'"},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['askbot.Question']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.answerrevision': {
'Meta': {'object_name': 'AnswerRevision', 'db_table': "u'answer_revision'"},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['askbot.Answer']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answerrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'askbot.authkeyuserassociation': {
'Meta': {'object_name': 'AuthKeyUserAssociation'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'auth_keys'", 'to': "orm['auth.User']"})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.Badge']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badge': {
'Meta': {'unique_together': "(('name', 'type'),)", 'object_name': 'Badge', 'db_table': "u'badge'"},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'through': "'Award'", 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multiple': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'type': ('django.db.models.fields.SmallIntegerField', [], {})
},
'askbot.book': {
'Meta': {'object_name': 'Book', 'db_table': "u'book'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cover_img': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {}),
'pages': ('django.db.models.fields.SmallIntegerField', [], {}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '2'}),
'publication': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'published_at': ('django.db.models.fields.DateTimeField', [], {}),
'questions': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'book'", 'db_table': "'book_question'", 'to': "orm['askbot.Question']"}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.bookauthorinfo': {
'Meta': {'object_name': 'BookAuthorInfo', 'db_table': "u'book_author_info'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {}),
'blog_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Book']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.bookauthorrss': {
'Meta': {'object_name': 'BookAuthorRss', 'db_table': "u'book_author_rss'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {}),
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Book']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rss_created_at': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.comment': {
'Meta': {'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['auth.User']"})
},
'askbot.flaggeditem': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'FlaggedItem', 'db_table': "u'flagged_item'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'flagged_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flaggeditems'", 'to': "orm['auth.User']"})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.question': {
'Meta': {'object_name': 'Question', 'db_table': "u'question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'answer_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': "orm['auth.User']"}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'closed_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'favorite_questions'", 'through': "'FavoriteQuestion'", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_questions'", 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_active_in_questions'", 'to': "orm['auth.User']"}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'questions'", 'to': "orm['askbot.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.questionrevision': {
'Meta': {'object_name': 'QuestionRevision', 'db_table': "u'question_revision'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questionrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['askbot.Question']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'askbot.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Question']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']"}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.validationhash': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'ValidationHash'},
'expiration': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 4, 25, 13, 14, 41, 714642)'}),
'hash_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'seed': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.vote': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
}
}
complete_apps = [app_dir_name]
| gpl-3.0 |
xpansa/pmis | purchase_stock_analytic/purchase.py | 2 | 1448 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Eficent (<http://www.eficent.com/>)
# Jordi Ballester Alomar <jordi.ballester@eficent.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class purchase_order(osv.osv):
_inherit = "purchase.order"
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, group_id, context=None):
res = super(purchase_order, self)._prepare_order_line_move(
cr, uid, order, order_line, picking_id, group_id, context=context)
# res['analytic_account_id'] = order_line.account_analytic_id.id
return res
| agpl-3.0 |
ChristianMayer/CometVisu | utils/ci/check-changes.py | 3 | 2016 | #!/usr/bin/env python
import sys
import sh
import json
import requests
import re
import os
from datetime import datetime
source_dir = '%s/source' % os.environ['GITHUB_WORKSPACE']
client_dir = '%s/client/source' % os.environ['GITHUB_WORKSPACE']
script_dir = os.path.dirname(os.path.realpath(__file__))
def check_for_changes(type = 'cv'):
if type not in ['cv', 'client']:
print(0)
return
with open('%s/bintray-deploy.json' % script_dir, 'r') as f:
bintray_config = json.load(f)
bintray_version = bintray_config['version']['name']
base_url = 'https://api.bintray.com/packages/%s/%s/%s' % (
bintray_config['package']['subject'],
bintray_config['package']['repo'],
bintray_config['package']['name']
)
r = requests.get('%s/versions/%s/files?include_unpublished=1' % (base_url, bintray_version), auth=(os.environ['BINTRAY_USER'], os.environ['BINTRAY_KEY']))
newest_build_time = None
build = True
if r.status_code == 200:
for file in r.json():
build_time = datetime.strptime(file['created'], '%Y-%m-%dT%H:%M:%S.%fz')
if type == 'cv' and file['name'][0:10] == 'CometVisu-':
if newest_build_time is None or newest_build_time < build_time:
newest_build_time = build_time
elif type == 'client' and 'CometVisuClient' in file['name']:
if newest_build_time is None or newest_build_time < build_time:
newest_build_time = build_time
if newest_build_time is not None:
raw = sh.git('--no-pager', 'log', '--pretty=format:', '--name-only', '--since="%s"' % newest_build_time, source_dir if type == 'cv' else client_dir)
changed_files = [x.rstrip("\n") for x in raw if len(x.rstrip("\n")) > 0]
build = len(changed_files) > 0
else:
build = False
print(1 if build else 0)
if __name__ == '__main__':
check_for_changes(sys.argv[1] if len(sys.argv) > 1 else "cv")
| gpl-3.0 |
tumbl3w33d/ansible | test/lib/ansible_test/_internal/cover.py | 10 | 25693 | """Code coverage utilities."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import re
import time
from xml.etree.ElementTree import (
Comment,
Element,
SubElement,
tostring,
)
from xml.dom import (
minidom,
)
from . import types as t
from .target import (
walk_module_targets,
walk_compile_targets,
walk_powershell_targets,
)
from .util import (
display,
ApplicationError,
common_environment,
ANSIBLE_TEST_DATA_ROOT,
to_text,
make_dirs,
)
from .util_common import (
intercept_command,
ResultType,
write_text_test_results,
write_json_test_results,
)
from .config import (
CoverageConfig,
CoverageReportConfig,
)
from .env import (
get_ansible_version,
)
from .executor import (
Delegate,
install_command_requirements,
)
from .data import (
data_context,
)
COVERAGE_GROUPS = ('command', 'target', 'environment', 'version')
COVERAGE_CONFIG_PATH = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'coveragerc')
COVERAGE_OUTPUT_FILE_NAME = 'coverage'
def command_coverage_combine(args):
"""Patch paths in coverage files and merge into a single file.
:type args: CoverageConfig
:rtype: list[str]
"""
paths = _command_coverage_combine_powershell(args) + _command_coverage_combine_python(args)
for path in paths:
display.info('Generated combined output: %s' % path, verbosity=1)
return paths
def _command_coverage_combine_python(args):
"""
:type args: CoverageConfig
:rtype: list[str]
"""
coverage = initialize_coverage(args)
modules = dict((target.module, target.path) for target in list(walk_module_targets()) if target.path.endswith('.py'))
coverage_dir = ResultType.COVERAGE.path
coverage_files = [os.path.join(coverage_dir, f) for f in os.listdir(coverage_dir)
if '=coverage.' in f and '=python' in f]
counter = 0
sources = _get_coverage_targets(args, walk_compile_targets)
groups = _build_stub_groups(args, sources, lambda line_count: set())
if data_context().content.collection:
collection_search_re = re.compile(r'/%s/' % data_context().content.collection.directory)
collection_sub_re = re.compile(r'^.*?/%s/' % data_context().content.collection.directory)
else:
collection_search_re = None
collection_sub_re = None
for coverage_file in coverage_files:
counter += 1
display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2)
original = coverage.CoverageData()
group = get_coverage_group(args, coverage_file)
if group is None:
display.warning('Unexpected name for coverage file: %s' % coverage_file)
continue
if os.path.getsize(coverage_file) == 0:
display.warning('Empty coverage file: %s' % coverage_file)
continue
try:
original.read_file(coverage_file)
except Exception as ex: # pylint: disable=locally-disabled, broad-except
display.error(u'%s' % ex)
continue
for filename in original.measured_files():
arcs = set(original.arcs(filename) or [])
if not arcs:
# This is most likely due to using an unsupported version of coverage.
display.warning('No arcs found for "%s" in coverage file: %s' % (filename, coverage_file))
continue
filename = _sanitise_filename(filename, modules=modules, collection_search_re=collection_search_re,
collection_sub_re=collection_sub_re)
if not filename:
continue
if group not in groups:
groups[group] = {}
arc_data = groups[group]
if filename not in arc_data:
arc_data[filename] = set()
arc_data[filename].update(arcs)
output_files = []
invalid_path_count = 0
invalid_path_chars = 0
coverage_file = os.path.join(ResultType.COVERAGE.path, COVERAGE_OUTPUT_FILE_NAME)
for group in sorted(groups):
arc_data = groups[group]
updated = coverage.CoverageData()
for filename in arc_data:
if not os.path.isfile(filename):
if collection_search_re and collection_search_re.search(filename) and os.path.basename(filename) == '__init__.py':
# the collection loader uses implicit namespace packages, so __init__.py does not need to exist on disk
continue
invalid_path_count += 1
invalid_path_chars += len(filename)
if args.verbosity > 1:
display.warning('Invalid coverage path: %s' % filename)
continue
updated.add_arcs({filename: list(arc_data[filename])})
if args.all:
updated.add_arcs(dict((source[0], []) for source in sources))
if not args.explain:
output_file = coverage_file + group
updated.write_file(output_file)
output_files.append(output_file)
if invalid_path_count > 0:
display.warning('Ignored %d characters from %d invalid coverage path(s).' % (invalid_path_chars, invalid_path_count))
return sorted(output_files)
def _get_coverage_targets(args, walk_func):
"""
:type args: CoverageConfig
:type walk_func: Func
:rtype: list[tuple[str, int]]
"""
sources = []
if args.all or args.stub:
# excludes symlinks of regular files to avoid reporting on the same file multiple times
# in the future it would be nice to merge any coverage for symlinks into the real files
for target in walk_func(include_symlinks=False):
target_path = os.path.abspath(target.path)
with open(target_path, 'r') as target_fd:
target_lines = len(target_fd.read().splitlines())
sources.append((target_path, target_lines))
sources.sort()
return sources
def _build_stub_groups(args, sources, default_stub_value):
"""
:type args: CoverageConfig
:type sources: List[tuple[str, int]]
:type default_stub_value: Func[int]
:rtype: dict
"""
groups = {}
if args.stub:
stub_group = []
stub_groups = [stub_group]
stub_line_limit = 500000
stub_line_count = 0
for source, source_line_count in sources:
stub_group.append((source, source_line_count))
stub_line_count += source_line_count
if stub_line_count > stub_line_limit:
stub_line_count = 0
stub_group = []
stub_groups.append(stub_group)
for stub_index, stub_group in enumerate(stub_groups):
if not stub_group:
continue
groups['=stub-%02d' % (stub_index + 1)] = dict((source, default_stub_value(line_count))
for source, line_count in stub_group)
return groups
def _sanitise_filename(filename, modules=None, collection_search_re=None, collection_sub_re=None):
"""
:type filename: str
:type modules: dict | None
:type collection_search_re: Pattern | None
:type collection_sub_re: Pattern | None
:rtype: str | None
"""
ansible_path = os.path.abspath('lib/ansible/') + '/'
root_path = data_context().content.root + '/'
integration_temp_path = os.path.sep + os.path.join(ResultType.TMP.relative_path, 'integration') + os.path.sep
if modules is None:
modules = {}
if '/ansible_modlib.zip/ansible/' in filename:
# Rewrite the module_utils path from the remote host to match the controller. Ansible 2.6 and earlier.
new_name = re.sub('^.*/ansible_modlib.zip/ansible/', ansible_path, filename)
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif collection_search_re and collection_search_re.search(filename):
new_name = os.path.abspath(collection_sub_re.sub('', filename))
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif re.search(r'/ansible_[^/]+_payload\.zip/ansible/', filename):
# Rewrite the module_utils path from the remote host to match the controller. Ansible 2.7 and later.
new_name = re.sub(r'^.*/ansible_[^/]+_payload\.zip/ansible/', ansible_path, filename)
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif '/ansible_module_' in filename:
# Rewrite the module path from the remote host to match the controller. Ansible 2.6 and earlier.
module_name = re.sub('^.*/ansible_module_(?P<module>.*).py$', '\\g<module>', filename)
if module_name not in modules:
display.warning('Skipping coverage of unknown module: %s' % module_name)
return None
new_name = os.path.abspath(modules[module_name])
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif re.search(r'/ansible_[^/]+_payload(_[^/]+|\.zip)/__main__\.py$', filename):
# Rewrite the module path from the remote host to match the controller. Ansible 2.7 and later.
# AnsiballZ versions using zipimporter will match the `.zip` portion of the regex.
# AnsiballZ versions not using zipimporter will match the `_[^/]+` portion of the regex.
module_name = re.sub(r'^.*/ansible_(?P<module>[^/]+)_payload(_[^/]+|\.zip)/__main__\.py$',
'\\g<module>', filename).rstrip('_')
if module_name not in modules:
display.warning('Skipping coverage of unknown module: %s' % module_name)
return None
new_name = os.path.abspath(modules[module_name])
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif re.search('^(/.*?)?/root/ansible/', filename):
# Rewrite the path of code running on a remote host or in a docker container as root.
new_name = re.sub('^(/.*?)?/root/ansible/', root_path, filename)
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif integration_temp_path in filename:
# Rewrite the path of code running from an integration test temporary directory.
new_name = re.sub(r'^.*' + re.escape(integration_temp_path) + '[^/]+/', root_path, filename)
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
return filename
def command_coverage_report(args):
"""
:type args: CoverageReportConfig
"""
output_files = command_coverage_combine(args)
for output_file in output_files:
if args.group_by or args.stub:
display.info('>>> Coverage Group: %s' % ' '.join(os.path.basename(output_file).split('=')[1:]))
if output_file.endswith('-powershell'):
display.info(_generate_powershell_output_report(args, output_file))
else:
options = []
if args.show_missing:
options.append('--show-missing')
if args.include:
options.extend(['--include', args.include])
if args.omit:
options.extend(['--omit', args.omit])
run_coverage(args, output_file, 'report', options)
def command_coverage_html(args):
"""
:type args: CoverageConfig
"""
output_files = command_coverage_combine(args)
for output_file in output_files:
if output_file.endswith('-powershell'):
# coverage.py does not support non-Python files so we just skip the local html report.
display.info("Skipping output file %s in html generation" % output_file, verbosity=3)
continue
dir_name = os.path.join(ResultType.REPORTS.path, os.path.basename(output_file))
make_dirs(dir_name)
run_coverage(args, output_file, 'html', ['-i', '-d', dir_name])
display.info('HTML report generated: file:///%s' % os.path.join(dir_name, 'index.html'))
def command_coverage_xml(args):
"""
:type args: CoverageConfig
"""
output_files = command_coverage_combine(args)
for output_file in output_files:
xml_name = '%s.xml' % os.path.basename(output_file)
if output_file.endswith('-powershell'):
report = _generage_powershell_xml(output_file)
rough_string = tostring(report, 'utf-8')
reparsed = minidom.parseString(rough_string)
pretty = reparsed.toprettyxml(indent=' ')
write_text_test_results(ResultType.REPORTS, xml_name, pretty)
else:
xml_path = os.path.join(ResultType.REPORTS.path, xml_name)
make_dirs(ResultType.REPORTS.path)
run_coverage(args, output_file, 'xml', ['-i', '-o', xml_path])
def command_coverage_erase(args):
"""
:type args: CoverageConfig
"""
initialize_coverage(args)
coverage_dir = ResultType.COVERAGE.path
for name in os.listdir(coverage_dir):
if not name.startswith('coverage') and '=coverage.' not in name:
continue
path = os.path.join(coverage_dir, name)
if not args.explain:
os.remove(path)
def initialize_coverage(args):
"""
:type args: CoverageConfig
:rtype: coverage
"""
if args.delegate:
raise Delegate()
if args.requirements:
install_command_requirements(args)
try:
import coverage
except ImportError:
coverage = None
if not coverage:
raise ApplicationError('You must install the "coverage" python module to use this command.')
return coverage
def get_coverage_group(args, coverage_file):
"""
:type args: CoverageConfig
:type coverage_file: str
:rtype: str
"""
parts = os.path.basename(coverage_file).split('=', 4)
if len(parts) != 5 or not parts[4].startswith('coverage.'):
return None
names = dict(
command=parts[0],
target=parts[1],
environment=parts[2],
version=parts[3],
)
group = ''
for part in COVERAGE_GROUPS:
if part in args.group_by:
group += '=%s' % names[part]
return group
def _command_coverage_combine_powershell(args):
"""
:type args: CoverageConfig
:rtype: list[str]
"""
coverage_dir = ResultType.COVERAGE.path
coverage_files = [os.path.join(coverage_dir, f) for f in os.listdir(coverage_dir)
if '=coverage.' in f and '=powershell' in f]
def _default_stub_value(lines):
val = {}
for line in range(lines):
val[line] = 0
return val
counter = 0
sources = _get_coverage_targets(args, walk_powershell_targets)
groups = _build_stub_groups(args, sources, _default_stub_value)
for coverage_file in coverage_files:
counter += 1
display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2)
group = get_coverage_group(args, coverage_file)
if group is None:
display.warning('Unexpected name for coverage file: %s' % coverage_file)
continue
if os.path.getsize(coverage_file) == 0:
display.warning('Empty coverage file: %s' % coverage_file)
continue
try:
with open(coverage_file, 'rb') as original_fd:
coverage_run = json.loads(to_text(original_fd.read(), errors='replace'))
except Exception as ex: # pylint: disable=locally-disabled, broad-except
display.error(u'%s' % ex)
continue
for filename, hit_info in coverage_run.items():
if group not in groups:
groups[group] = {}
coverage_data = groups[group]
filename = _sanitise_filename(filename)
if not filename:
continue
if filename not in coverage_data:
coverage_data[filename] = {}
file_coverage = coverage_data[filename]
if not isinstance(hit_info, list):
hit_info = [hit_info]
for hit_entry in hit_info:
if not hit_entry:
continue
line_count = file_coverage.get(hit_entry['Line'], 0) + hit_entry['HitCount']
file_coverage[hit_entry['Line']] = line_count
output_files = []
invalid_path_count = 0
invalid_path_chars = 0
for group in sorted(groups):
coverage_data = groups[group]
for filename in coverage_data:
if not os.path.isfile(filename):
invalid_path_count += 1
invalid_path_chars += len(filename)
if args.verbosity > 1:
display.warning('Invalid coverage path: %s' % filename)
continue
if args.all:
# Add 0 line entries for files not in coverage_data
for source, source_line_count in sources:
if source in coverage_data:
continue
coverage_data[source] = _default_stub_value(source_line_count)
if not args.explain:
output_file = COVERAGE_OUTPUT_FILE_NAME + group + '-powershell'
write_json_test_results(ResultType.COVERAGE, output_file, coverage_data)
output_files.append(os.path.join(ResultType.COVERAGE.path, output_file))
if invalid_path_count > 0:
display.warning(
'Ignored %d characters from %d invalid coverage path(s).' % (invalid_path_chars, invalid_path_count))
return sorted(output_files)
def _generage_powershell_xml(coverage_file):
"""
:type coverage_file: str
:rtype: Element
"""
with open(coverage_file, 'rb') as coverage_fd:
coverage_info = json.loads(to_text(coverage_fd.read()))
content_root = data_context().content.root
is_ansible = data_context().content.is_ansible
packages = {}
for path, results in coverage_info.items():
filename = os.path.splitext(os.path.basename(path))[0]
if filename.startswith('Ansible.ModuleUtils'):
package = 'ansible.module_utils'
elif is_ansible:
package = 'ansible.modules'
else:
rel_path = path[len(content_root) + 1:]
plugin_type = "modules" if rel_path.startswith("plugins/modules") else "module_utils"
package = 'ansible_collections.%splugins.%s' % (data_context().content.collection.prefix, plugin_type)
if package not in packages:
packages[package] = {}
packages[package][path] = results
elem_coverage = Element('coverage')
elem_coverage.append(
Comment(' Generated by ansible-test from the Ansible project: https://www.ansible.com/ '))
elem_coverage.append(
Comment(' Based on https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd '))
elem_sources = SubElement(elem_coverage, 'sources')
elem_source = SubElement(elem_sources, 'source')
elem_source.text = data_context().content.root
elem_packages = SubElement(elem_coverage, 'packages')
total_lines_hit = 0
total_line_count = 0
for package_name, package_data in packages.items():
lines_hit, line_count = _add_cobertura_package(elem_packages, package_name, package_data)
total_lines_hit += lines_hit
total_line_count += line_count
elem_coverage.attrib.update({
'branch-rate': '0',
'branches-covered': '0',
'branches-valid': '0',
'complexity': '0',
'line-rate': str(round(total_lines_hit / total_line_count, 4)) if total_line_count else "0",
'lines-covered': str(total_line_count),
'lines-valid': str(total_lines_hit),
'timestamp': str(int(time.time())),
'version': get_ansible_version(),
})
return elem_coverage
def _add_cobertura_package(packages, package_name, package_data):
"""
:type packages: SubElement
:type package_name: str
:type package_data: Dict[str, Dict[str, int]]
:rtype: Tuple[int, int]
"""
elem_package = SubElement(packages, 'package')
elem_classes = SubElement(elem_package, 'classes')
total_lines_hit = 0
total_line_count = 0
for path, results in package_data.items():
lines_hit = len([True for hits in results.values() if hits])
line_count = len(results)
total_lines_hit += lines_hit
total_line_count += line_count
elem_class = SubElement(elem_classes, 'class')
class_name = os.path.splitext(os.path.basename(path))[0]
if class_name.startswith("Ansible.ModuleUtils"):
class_name = class_name[20:]
content_root = data_context().content.root
filename = path
if filename.startswith(content_root):
filename = filename[len(content_root) + 1:]
elem_class.attrib.update({
'branch-rate': '0',
'complexity': '0',
'filename': filename,
'line-rate': str(round(lines_hit / line_count, 4)) if line_count else "0",
'name': class_name,
})
SubElement(elem_class, 'methods')
elem_lines = SubElement(elem_class, 'lines')
for number, hits in results.items():
elem_line = SubElement(elem_lines, 'line')
elem_line.attrib.update(
hits=str(hits),
number=str(number),
)
elem_package.attrib.update({
'branch-rate': '0',
'complexity': '0',
'line-rate': str(round(total_lines_hit / total_line_count, 4)) if total_line_count else "0",
'name': package_name,
})
return total_lines_hit, total_line_count
def _generate_powershell_output_report(args, coverage_file):
"""
:type args: CoverageReportConfig
:type coverage_file: str
:rtype: str
"""
with open(coverage_file, 'rb') as coverage_fd:
coverage_info = json.loads(to_text(coverage_fd.read()))
root_path = data_context().content.root + '/'
name_padding = 7
cover_padding = 8
file_report = []
total_stmts = 0
total_miss = 0
for filename in sorted(coverage_info.keys()):
hit_info = coverage_info[filename]
if filename.startswith(root_path):
filename = filename[len(root_path):]
if args.omit and filename in args.omit:
continue
if args.include and filename not in args.include:
continue
stmts = len(hit_info)
miss = len([c for c in hit_info.values() if c == 0])
name_padding = max(name_padding, len(filename) + 3)
total_stmts += stmts
total_miss += miss
cover = "{0}%".format(int((stmts - miss) / stmts * 100))
missing = []
current_missing = None
sorted_lines = sorted([int(x) for x in hit_info.keys()])
for idx, line in enumerate(sorted_lines):
hit = hit_info[str(line)]
if hit == 0 and current_missing is None:
current_missing = line
elif hit != 0 and current_missing is not None:
end_line = sorted_lines[idx - 1]
if current_missing == end_line:
missing.append(str(current_missing))
else:
missing.append('%s-%s' % (current_missing, end_line))
current_missing = None
if current_missing is not None:
end_line = sorted_lines[-1]
if current_missing == end_line:
missing.append(str(current_missing))
else:
missing.append('%s-%s' % (current_missing, end_line))
file_report.append({'name': filename, 'stmts': stmts, 'miss': miss, 'cover': cover, 'missing': missing})
if total_stmts == 0:
return ''
total_percent = '{0}%'.format(int((total_stmts - total_miss) / total_stmts * 100))
stmts_padding = max(8, len(str(total_stmts)))
miss_padding = max(7, len(str(total_miss)))
line_length = name_padding + stmts_padding + miss_padding + cover_padding
header = 'Name'.ljust(name_padding) + 'Stmts'.rjust(stmts_padding) + 'Miss'.rjust(miss_padding) + \
'Cover'.rjust(cover_padding)
if args.show_missing:
header += 'Lines Missing'.rjust(16)
line_length += 16
line_break = '-' * line_length
lines = ['%s%s%s%s%s' % (f['name'].ljust(name_padding), str(f['stmts']).rjust(stmts_padding),
str(f['miss']).rjust(miss_padding), f['cover'].rjust(cover_padding),
' ' + ', '.join(f['missing']) if args.show_missing else '')
for f in file_report]
totals = 'TOTAL'.ljust(name_padding) + str(total_stmts).rjust(stmts_padding) + \
str(total_miss).rjust(miss_padding) + total_percent.rjust(cover_padding)
report = '{0}\n{1}\n{2}\n{1}\n{3}'.format(header, line_break, "\n".join(lines), totals)
return report
def run_coverage(args, output_file, command, cmd): # type: (CoverageConfig, str, str, t.List[str]) -> None
"""Run the coverage cli tool with the specified options."""
env = common_environment()
env.update(dict(COVERAGE_FILE=output_file))
cmd = ['python', '-m', 'coverage', command, '--rcfile', COVERAGE_CONFIG_PATH] + cmd
intercept_command(args, target_name='coverage', env=env, cmd=cmd, disable_coverage=True)
| gpl-3.0 |
nparley/mylatitude | lib/apitools/base/py/compression_test.py | 8 | 5319 | #!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for compression."""
from apitools.base.py import compression
from apitools.base.py import gzip
import six
import unittest2
class CompressionTest(unittest2.TestCase):
def setUp(self):
# Sample highly compressible data (~50MB).
self.sample_data = b'abc' * 16777216
# Stream of the sample data.
self.stream = six.BytesIO()
self.stream.write(self.sample_data)
self.length = self.stream.tell()
self.stream.seek(0)
def testCompressionExhausted(self):
"""Test full compression.
Test that highly compressible data is actually compressed in entirety.
"""
output, read, exhausted = compression.CompressStream(
self.stream,
self.length,
9)
# Ensure the compressed buffer is smaller than the input buffer.
self.assertLess(output.length, self.length)
# Ensure we read the entire input stream.
self.assertEqual(read, self.length)
# Ensure the input stream was exhausted.
self.assertTrue(exhausted)
def testCompressionUnbounded(self):
"""Test unbounded compression.
Test that the input stream is exhausted when length is none.
"""
output, read, exhausted = compression.CompressStream(
self.stream,
None,
9)
# Ensure the compressed buffer is smaller than the input buffer.
self.assertLess(output.length, self.length)
# Ensure we read the entire input stream.
self.assertEqual(read, self.length)
# Ensure the input stream was exhausted.
self.assertTrue(exhausted)
def testCompressionPartial(self):
"""Test partial compression.
Test that the length parameter works correctly. The amount of data
that's compressed can be greater than or equal to the requested length.
"""
output_length = 40
output, _, exhausted = compression.CompressStream(
self.stream,
output_length,
9)
# Ensure the requested read size is <= the compressed buffer size.
self.assertLessEqual(output_length, output.length)
# Ensure the input stream was not exhausted.
self.assertFalse(exhausted)
def testCompressionIntegrity(self):
"""Test that compressed data can be decompressed."""
output, read, exhausted = compression.CompressStream(
self.stream,
self.length,
9)
# Ensure uncompressed data matches the sample data.
with gzip.GzipFile(fileobj=output) as f:
original = f.read()
self.assertEqual(original, self.sample_data)
# Ensure we read the entire input stream.
self.assertEqual(read, self.length)
# Ensure the input stream was exhausted.
self.assertTrue(exhausted)
class StreamingBufferTest(unittest2.TestCase):
def setUp(self):
self.stream = compression.StreamingBuffer()
def testSimpleStream(self):
"""Test simple stream operations.
Test that the stream can be written to and read from. Also test that
reading from the stream consumes the bytes.
"""
# Ensure the stream is empty.
self.assertEqual(self.stream.length, 0)
# Ensure data is correctly written.
self.stream.write(b'Sample data')
self.assertEqual(self.stream.length, 11)
# Ensure data can be read and the read data is purged from the stream.
data = self.stream.read(11)
self.assertEqual(data, b'Sample data')
self.assertEqual(self.stream.length, 0)
def testPartialReads(self):
"""Test partial stream reads.
Test that the stream can be read in chunks while perserving the
consumption mechanics.
"""
self.stream.write(b'Sample data')
# Ensure data can be read and the read data is purged from the stream.
data = self.stream.read(6)
self.assertEqual(data, b'Sample')
self.assertEqual(self.stream.length, 5)
# Ensure the remaining data can be read.
data = self.stream.read(5)
self.assertEqual(data, b' data')
self.assertEqual(self.stream.length, 0)
def testTooShort(self):
"""Test excessive stream reads.
Test that more data can be requested from the stream than available
without raising an exception.
"""
self.stream.write(b'Sample')
# Ensure requesting more data than available does not raise an
# exception.
data = self.stream.read(100)
self.assertEqual(data, b'Sample')
self.assertEqual(self.stream.length, 0)
| mit |
idncom/odoo | openerp/report/custom.py | 338 | 25091 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os
import time
import openerp
import openerp.tools as tools
from openerp.tools.safe_eval import safe_eval as eval
import print_xml
import render
from interface import report_int
import common
from openerp.osv.osv import except_osv
from openerp.osv.orm import BaseModel
from pychart import *
import misc
import cStringIO
from lxml import etree
from openerp.tools.translate import _
class external_pdf(render.render):
def __init__(self, pdf):
render.render.__init__(self)
self.pdf = pdf
self.output_type='pdf'
def _render(self):
return self.pdf
theme.use_color = 1
#TODO: devrait heriter de report_rml a la place de report_int
# -> pourrait overrider que create_xml a la place de tout create
# heuu, ca marche pas ds tous les cas car graphs sont generes en pdf directment
# par pychart, et on passe donc pas par du rml
class report_custom(report_int):
def __init__(self, name):
report_int.__init__(self, name)
#
# PRE:
# fields = [['address','city'],['name'], ['zip']]
# conditions = [[('zip','==','3'),(,)],(,),(,)] #same structure as fields
# row_canvas = ['Rue', None, None]
# POST:
# [ ['ville','name','zip'] ]
#
def _row_get(self, cr, uid, objs, fields, conditions, row_canvas=None, group_by=None):
result = []
for obj in objs:
tobreak = False
for cond in conditions:
if cond and cond[0]:
c = cond[0]
temp = c[0](eval('obj.'+c[1],{'obj': obj}))
if not eval('\''+temp+'\''+' '+c[2]+' '+'\''+str(c[3])+'\''):
tobreak = True
if tobreak:
break
levels = {}
row = []
for i in range(len(fields)):
if not fields[i]:
row.append(row_canvas and row_canvas[i])
if row_canvas[i]:
row_canvas[i]=False
elif len(fields[i])==1:
if obj:
row.append(str(eval('obj.'+fields[i][0],{'obj': obj})))
else:
row.append(None)
else:
row.append(None)
levels[fields[i][0]]=True
if not levels:
result.append(row)
else:
# Process group_by data first
key = []
if group_by is not None and fields[group_by] is not None:
if fields[group_by][0] in levels.keys():
key.append(fields[group_by][0])
for l in levels.keys():
if l != fields[group_by][0]:
key.append(l)
else:
key = levels.keys()
for l in key:
objs = eval('obj.'+l,{'obj': obj})
if not isinstance(objs, (BaseModel, list)):
objs = [objs]
field_new = []
cond_new = []
for f in range(len(fields)):
if (fields[f] and fields[f][0])==l:
field_new.append(fields[f][1:])
cond_new.append(conditions[f][1:])
else:
field_new.append(None)
cond_new.append(None)
if len(objs):
result += self._row_get(cr, uid, objs, field_new, cond_new, row, group_by)
else:
result.append(row)
return result
def create(self, cr, uid, ids, datas, context=None):
if not context:
context={}
self.pool = openerp.registry(cr.dbname)
report = self.pool['ir.report.custom'].browse(cr, uid, [datas['report_id']])[0]
datas['model'] = report.model_id.model
if report.menu_id:
ids = self.pool[report.model_id.model].search(cr, uid, [])
datas['ids'] = ids
report_id = datas['report_id']
report = self.pool['ir.report.custom'].read(cr, uid, [report_id], context=context)[0]
fields = self.pool['ir.report.custom.fields'].read(cr, uid, report['fields_child0'], context=context)
fields.sort(lambda x,y : x['sequence'] - y['sequence'])
if report['field_parent']:
parent_field = self.pool['ir.model.fields'].read(cr, uid, [report['field_parent'][0]], ['model'])
model_name = self.pool['ir.model'].read(cr, uid, [report['model_id'][0]], ['model'], context=context)[0]['model']
fct = {
'id': lambda x: x,
'gety': lambda x: x.split('-')[0],
'in': lambda x: x.split(',')
}
new_fields = []
new_cond = []
for f in fields:
row = []
cond = []
for i in range(4):
field_child = f['field_child'+str(i)]
if field_child:
row.append(
self.pool['ir.model.fields'].read(cr, uid, [field_child[0]], ['name'], context=context)[0]['name']
)
if f['fc'+str(i)+'_operande']:
fct_name = 'id'
cond_op = f['fc'+str(i)+'_op']
if len(f['fc'+str(i)+'_op'].split(',')) == 2:
cond_op = f['fc'+str(i)+'_op'].split(',')[1]
fct_name = f['fc'+str(i)+'_op'].split(',')[0]
cond.append((fct[fct_name], f['fc'+str(i)+'_operande'][1], cond_op, f['fc'+str(i)+'_condition']))
else:
cond.append(None)
new_fields.append(row)
new_cond.append(cond)
objs = self.pool[model_name].browse(cr, uid, ids)
# Group by
groupby = None
idx = 0
for f in fields:
if f['groupby']:
groupby = idx
idx += 1
results = []
if report['field_parent']:
level = []
def build_tree(obj, level, depth):
res = self._row_get(cr, uid,[obj], new_fields, new_cond)
level.append(depth)
new_obj = eval('obj.'+report['field_parent'][1],{'obj': obj})
if not isinstance(new_obj, list) :
new_obj = [new_obj]
for o in new_obj:
if o:
res += build_tree(o, level, depth+1)
return res
for obj in objs:
results += build_tree(obj, level, 0)
else:
results = self._row_get(cr, uid,objs, new_fields, new_cond, group_by=groupby)
fct = {
'calc_sum': lambda l: reduce(lambda x,y: float(x)+float(y), filter(None, l), 0),
'calc_avg': lambda l: reduce(lambda x,y: float(x)+float(y), filter(None, l), 0) / (len(filter(None, l)) or 1.0),
'calc_max': lambda l: reduce(lambda x,y: max(x,y), [(i or 0.0) for i in l], 0),
'calc_min': lambda l: reduce(lambda x,y: min(x,y), [(i or 0.0) for i in l], 0),
'calc_count': lambda l: len(filter(None, l)),
'False': lambda l: '\r\n'.join(filter(None, l)),
'groupby': lambda l: reduce(lambda x,y: x or y, l)
}
new_res = []
prev = None
if groupby is not None:
res_dic = {}
for line in results:
if not line[groupby] and prev in res_dic:
res_dic[prev].append(line)
else:
prev = line[groupby]
res_dic.setdefault(line[groupby], [])
res_dic[line[groupby]].append(line)
#we use the keys in results since they are ordered, whereas in res_dic.heys() they aren't
for key in filter(None, [x[groupby] for x in results]):
row = []
for col in range(len(fields)):
if col == groupby:
row.append(fct['groupby'](map(lambda x: x[col], res_dic[key])))
else:
row.append(fct[str(fields[col]['operation'])](map(lambda x: x[col], res_dic[key])))
new_res.append(row)
results = new_res
if report['type']=='table':
if report['field_parent']:
res = self._create_tree(uid, ids, report, fields, level, results, context)
else:
sort_idx = 0
for idx in range(len(fields)):
if fields[idx]['name'] == report['sortby']:
sort_idx = idx
break
try :
results.sort(lambda x,y : cmp(float(x[sort_idx]),float(y[sort_idx])))
except :
results.sort(lambda x,y : cmp(x[sort_idx],y[sort_idx]))
if report['limitt']:
results = results[:int(report['limitt'])]
res = self._create_table(uid, ids, report, fields, None, results, context)
elif report['type'] in ('pie','bar', 'line'):
results2 = []
prev = False
for r in results:
row = []
for j in range(len(r)):
if j == 0 and not r[j]:
row.append(prev)
elif j == 0 and r[j]:
prev = r[j]
row.append(r[j])
else:
try:
row.append(float(r[j]))
except Exception:
row.append(r[j])
results2.append(row)
if report['type']=='pie':
res = self._create_pie(cr,uid, ids, report, fields, results2, context)
elif report['type']=='bar':
res = self._create_bars(cr,uid, ids, report, fields, results2, context)
elif report['type']=='line':
res = self._create_lines(cr,uid, ids, report, fields, results2, context)
return self.obj.get(), 'pdf'
def _create_tree(self, uid, ids, report, fields, level, results, context):
pageSize=common.pageSize.get(report['print_format'], [210.0,297.0])
if report['print_orientation']=='landscape':
pageSize=[pageSize[1],pageSize[0]]
new_doc = etree.Element('report')
config = etree.SubElement(new_doc, 'config')
def _append_node(name, text):
n = etree.SubElement(config, name)
n.text = text
_append_node('date', time.strftime('%d/%m/%Y'))
_append_node('PageFormat', '%s' % report['print_format'])
_append_node('PageSize', '%.2fmm,%.2fmm' % tuple(pageSize))
_append_node('PageWidth', '%.2f' % (pageSize[0] * 2.8346,))
_append_node('PageHeight', '%.2f' %(pageSize[1] * 2.8346,))
length = pageSize[0]-30-reduce(lambda x,y:x+(y['width'] or 0), fields, 0)
count = 0
for f in fields:
if not f['width']: count+=1
for f in fields:
if not f['width']:
f['width']=round((float(length)/count)-0.5)
_append_node('tableSize', '%s' % ','.join(map(lambda x: '%.2fmm' % (x['width'],), fields)))
_append_node('report-header', '%s' % (report['title'],))
_append_node('report-footer', '%s' % (report['footer'],))
header = etree.SubElement(new_doc, 'header')
for f in fields:
field = etree.SubElement(header, 'field')
field.text = f['name']
lines = etree.SubElement(new_doc, 'lines')
level.reverse()
for line in results:
shift = level.pop()
node_line = etree.SubElement(lines, 'row')
prefix = '+'
for f in range(len(fields)):
col = etree.SubElement(node_line, 'col')
if f == 0:
col.attrib.update(para='yes',
tree='yes',
space=str(3*shift)+'mm')
if line[f] is not None:
col.text = prefix+str(line[f]) or ''
else:
col.text = '/'
prefix = ''
transform = etree.XSLT(
etree.parse(os.path.join(tools.config['root_path'],
'addons/base/report/custom_new.xsl')))
rml = etree.tostring(transform(new_doc))
self.obj = render.rml(rml)
self.obj.render()
return True
def _create_lines(self, cr, uid, ids, report, fields, results, context):
pool = openerp.registry(cr.dbname)
pdf_string = cStringIO.StringIO()
can = canvas.init(fname=pdf_string, format='pdf')
can.show(80,380,'/16/H'+report['title'])
ar = area.T(size=(350,350),
#x_coord = category_coord.T(['2005-09-01','2005-10-22'],0),
x_axis = axis.X(label = fields[0]['name'], format="/a-30{}%s"),
y_axis = axis.Y(label = ', '.join(map(lambda x : x['name'], fields[1:]))))
process_date = {
'D': lambda x: reduce(lambda xx, yy: xx + '-' + yy, x.split('-')[1:3]),
'M': lambda x: x.split('-')[1],
'Y': lambda x: x.split('-')[0]
}
order_date = {
'D': lambda x: time.mktime((2005, int(x.split('-')[0]), int(x.split('-')[1]), 0, 0, 0, 0, 0, 0)),
'M': lambda x: x,
'Y': lambda x: x
}
abscissa = []
idx = 0
date_idx = None
fct = {}
for f in fields:
field_id = (f['field_child3'] and f['field_child3'][0]) or (f['field_child2'] and f['field_child2'][0]) or (f['field_child1'] and f['field_child1'][0]) or (f['field_child0'] and f['field_child0'][0])
if field_id:
type = pool['ir.model.fields'].read(cr, uid, [field_id],['ttype'])
if type[0]['ttype'] == 'date':
date_idx = idx
fct[idx] = process_date[report['frequency']]
else:
fct[idx] = lambda x : x
else:
fct[idx] = lambda x : x
idx+=1
# plots are usually displayed year by year
# so we do so if the first field is a date
data_by_year = {}
if date_idx is not None:
for r in results:
key = process_date['Y'](r[date_idx])
if key not in data_by_year:
data_by_year[key] = []
for i in range(len(r)):
r[i] = fct[i](r[i])
data_by_year[key].append(r)
else:
data_by_year[''] = results
idx0 = 0
nb_bar = len(data_by_year)*(len(fields)-1)
colors = map(lambda x:line_style.T(color=x), misc.choice_colors(nb_bar))
abscissa = {}
for line in data_by_year.keys():
fields_bar = []
# sum data and save it in a list. An item for a fields
for d in data_by_year[line]:
for idx in range(len(fields)-1):
fields_bar.append({})
if d[0] in fields_bar[idx]:
fields_bar[idx][d[0]] += d[idx+1]
else:
fields_bar[idx][d[0]] = d[idx+1]
for idx in range(len(fields)-1):
data = {}
for k in fields_bar[idx].keys():
if k in data:
data[k] += fields_bar[idx][k]
else:
data[k] = fields_bar[idx][k]
data_cum = []
prev = 0.0
keys = data.keys()
keys.sort()
# cumulate if necessary
for k in keys:
data_cum.append([k, float(data[k])+float(prev)])
if fields[idx+1]['cumulate']:
prev += data[k]
idx0 = 0
plot = line_plot.T(label=fields[idx+1]['name']+' '+str(line), data = data_cum, line_style=colors[idx0*(len(fields)-1)+idx])
ar.add_plot(plot)
abscissa.update(fields_bar[idx])
idx0 += 1
abscissa = map(lambda x : [x, None], abscissa)
ar.x_coord = category_coord.T(abscissa,0)
ar.draw(can)
can.close()
self.obj = external_pdf(pdf_string.getvalue())
self.obj.render()
pdf_string.close()
return True
def _create_bars(self, cr, uid, ids, report, fields, results, context):
pool = openerp.registry(cr.dbname)
pdf_string = cStringIO.StringIO()
can = canvas.init(fname=pdf_string, format='pdf')
can.show(80,380,'/16/H'+report['title'])
process_date = {
'D': lambda x: reduce(lambda xx, yy: xx + '-' + yy, x.split('-')[1:3]),
'M': lambda x: x.split('-')[1],
'Y': lambda x: x.split('-')[0]
}
order_date = {
'D': lambda x: time.mktime((2005, int(x.split('-')[0]), int(x.split('-')[1]), 0, 0, 0, 0, 0, 0)),
'M': lambda x: x,
'Y': lambda x: x
}
ar = area.T(size=(350,350),
x_axis = axis.X(label = fields[0]['name'], format="/a-30{}%s"),
y_axis = axis.Y(label = ', '.join(map(lambda x : x['name'], fields[1:]))))
idx = 0
date_idx = None
fct = {}
for f in fields:
field_id = (f['field_child3'] and f['field_child3'][0]) or (f['field_child2'] and f['field_child2'][0]) or (f['field_child1'] and f['field_child1'][0]) or (f['field_child0'] and f['field_child0'][0])
if field_id:
type = pool['ir.model.fields'].read(cr, uid, [field_id],['ttype'])
if type[0]['ttype'] == 'date':
date_idx = idx
fct[idx] = process_date[report['frequency']]
else:
fct[idx] = lambda x : x
else:
fct[idx] = lambda x : x
idx+=1
# plot are usually displayed year by year
# so we do so if the first field is a date
data_by_year = {}
if date_idx is not None:
for r in results:
key = process_date['Y'](r[date_idx])
if key not in data_by_year:
data_by_year[key] = []
for i in range(len(r)):
r[i] = fct[i](r[i])
data_by_year[key].append(r)
else:
data_by_year[''] = results
nb_bar = len(data_by_year)*(len(fields)-1)
colors = map(lambda x:fill_style.Plain(bgcolor=x), misc.choice_colors(nb_bar))
abscissa = {}
for line in data_by_year.keys():
fields_bar = []
# sum data and save it in a list. An item for a fields
for d in data_by_year[line]:
for idx in range(len(fields)-1):
fields_bar.append({})
if d[0] in fields_bar[idx]:
fields_bar[idx][d[0]] += d[idx+1]
else:
fields_bar[idx][d[0]] = d[idx+1]
for idx in range(len(fields)-1):
data = {}
for k in fields_bar[idx].keys():
if k in data:
data[k] += fields_bar[idx][k]
else:
data[k] = fields_bar[idx][k]
data_cum = []
prev = 0.0
keys = data.keys()
keys.sort()
# cumulate if necessary
for k in keys:
data_cum.append([k, float(data[k])+float(prev)])
if fields[idx+1]['cumulate']:
prev += data[k]
idx0 = 0
plot = bar_plot.T(label=fields[idx+1]['name']+' '+str(line), data = data_cum, cluster=(idx0*(len(fields)-1)+idx,nb_bar), fill_style=colors[idx0*(len(fields)-1)+idx])
ar.add_plot(plot)
abscissa.update(fields_bar[idx])
idx0 += 1
abscissa = map(lambda x : [x, None], abscissa)
abscissa.sort()
ar.x_coord = category_coord.T(abscissa,0)
ar.draw(can)
can.close()
self.obj = external_pdf(pdf_string.getvalue())
self.obj.render()
pdf_string.close()
return True
def _create_pie(self, cr, uid, ids, report, fields, results, context):
pdf_string = cStringIO.StringIO()
can = canvas.init(fname=pdf_string, format='pdf')
ar = area.T(size=(350,350), legend=legend.T(),
x_grid_style = None, y_grid_style = None)
colors = map(lambda x:fill_style.Plain(bgcolor=x), misc.choice_colors(len(results)))
if reduce(lambda x,y : x+y, map(lambda x : x[1],results)) == 0.0:
raise except_osv(_('Error'), _("The sum of the data (2nd field) is null.\nWe can't draw a pie chart !"))
plot = pie_plot.T(data=results, arc_offsets=[0,10,0,10],
shadow = (2, -2, fill_style.gray50),
label_offset = 25,
arrow_style = arrow.a3,
fill_styles=colors)
ar.add_plot(plot)
ar.draw(can)
can.close()
self.obj = external_pdf(pdf_string.getvalue())
self.obj.render()
pdf_string.close()
return True
def _create_table(self, uid, ids, report, fields, tree, results, context):
pageSize=common.pageSize.get(report['print_format'], [210.0,297.0])
if report['print_orientation']=='landscape':
pageSize=[pageSize[1],pageSize[0]]
new_doc = etree.Element('report')
config = etree.SubElement(new_doc, 'config')
def _append_node(name, text):
n = etree.SubElement(config, name)
n.text = text
_append_node('date', time.strftime('%d/%m/%Y'))
_append_node('PageSize', '%.2fmm,%.2fmm' % tuple(pageSize))
_append_node('PageFormat', '%s' % report['print_format'])
_append_node('PageWidth', '%.2f' % (pageSize[0] * 2.8346,))
_append_node('PageHeight', '%.2f' %(pageSize[1] * 2.8346,))
length = pageSize[0]-30-reduce(lambda x,y:x+(y['width'] or 0), fields, 0)
count = 0
for f in fields:
if not f['width']: count+=1
for f in fields:
if not f['width']:
f['width']=round((float(length)/count)-0.5)
_append_node('tableSize', '%s' % ','.join(map(lambda x: '%.2fmm' % (x['width'],), fields)))
_append_node('report-header', '%s' % (report['title'],))
_append_node('report-footer', '%s' % (report['footer'],))
header = etree.SubElement(new_doc, 'header')
for f in fields:
field = etree.SubElement(header, 'field')
field.text = f['name']
lines = etree.SubElement(new_doc, 'lines')
for line in results:
node_line = etree.SubElement(lines, 'row')
for f in range(len(fields)):
col = etree.SubElement(node_line, 'col', tree='no')
if line[f] is not None:
col.text = line[f] or ''
else:
col.text = '/'
transform = etree.XSLT(
etree.parse(os.path.join(tools.config['root_path'],
'addons/base/report/custom_new.xsl')))
rml = etree.tostring(transform(new_doc))
self.obj = render.rml(rml)
self.obj.render()
return True
report_custom('report.custom')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sephii/django | tests/requests/tests.py | 5 | 34636 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime, timedelta
from io import BytesIO
from itertools import chain
import time
from django.core.exceptions import SuspiciousOperation
from django.core.handlers.wsgi import WSGIRequest, LimitedStream
from django.http import (HttpRequest, HttpResponse, parse_cookie,
build_request_repr, UnreadablePostError, RawPostDataException)
from django.test import SimpleTestCase, RequestFactory, override_settings
from django.test.client import FakePayload
from django.test.utils import str_prefix
from django.utils import six
from django.utils.encoding import force_str
from django.utils.http import cookie_date, urlencode
from django.utils.six.moves.urllib.parse import urlencode as original_urlencode
from django.utils.timezone import utc
class RequestsTests(SimpleTestCase):
def test_httprequest(self):
request = HttpRequest()
self.assertEqual(list(request.GET.keys()), [])
self.assertEqual(list(request.POST.keys()), [])
self.assertEqual(list(request.COOKIES.keys()), [])
self.assertEqual(list(request.META.keys()), [])
# .GET and .POST should be QueryDicts
self.assertEqual(request.GET.urlencode(), '')
self.assertEqual(request.POST.urlencode(), '')
# and FILES should be MultiValueDict
self.assertEqual(request.FILES.getlist('foo'), [])
def test_httprequest_full_path(self):
request = HttpRequest()
request.path = request.path_info = '/;some/?awful/=path/foo:bar/'
request.META['QUERY_STRING'] = ';some=query&+query=string'
expected = '/%3Bsome/%3Fawful/%3Dpath/foo:bar/?;some=query&+query=string'
self.assertEqual(request.get_full_path(), expected)
def test_httprequest_full_path_with_query_string_and_fragment(self):
request = HttpRequest()
request.path = request.path_info = '/foo#bar'
request.META['QUERY_STRING'] = 'baz#quux'
self.assertEqual(request.get_full_path(), '/foo%23bar?baz#quux')
def test_httprequest_repr(self):
request = HttpRequest()
request.path = '/somepath/'
request.method = 'GET'
request.GET = {'get-key': 'get-value'}
request.POST = {'post-key': 'post-value'}
request.COOKIES = {'post-key': 'post-value'}
request.META = {'post-key': 'post-value'}
self.assertEqual(repr(request), str_prefix("<HttpRequest: GET '/somepath/'>"))
self.assertEqual(build_request_repr(request), str_prefix("<HttpRequest\npath:/somepath/,\nGET:{%(_)s'get-key': %(_)s'get-value'},\nPOST:{%(_)s'post-key': %(_)s'post-value'},\nCOOKIES:{%(_)s'post-key': %(_)s'post-value'},\nMETA:{%(_)s'post-key': %(_)s'post-value'}>"))
self.assertEqual(build_request_repr(request, path_override='/otherpath/', GET_override={'a': 'b'}, POST_override={'c': 'd'}, COOKIES_override={'e': 'f'}, META_override={'g': 'h'}),
str_prefix("<HttpRequest\npath:/otherpath/,\nGET:{%(_)s'a': %(_)s'b'},\nPOST:{%(_)s'c': %(_)s'd'},\nCOOKIES:{%(_)s'e': %(_)s'f'},\nMETA:{%(_)s'g': %(_)s'h'}>"))
def test_httprequest_repr_invalid_method_and_path(self):
request = HttpRequest()
self.assertEqual(repr(request), str_prefix("<HttpRequest>"))
request = HttpRequest()
request.method = "GET"
self.assertEqual(repr(request), str_prefix("<HttpRequest>"))
request = HttpRequest()
request.path = ""
self.assertEqual(repr(request), str_prefix("<HttpRequest>"))
def test_bad_httprequest_repr(self):
"""
If an exception occurs when parsing GET, POST, COOKIES, or META, the
repr of the request should show it.
"""
class Bomb(object):
"""An object that raises an exception when printed out."""
def __repr__(self):
raise Exception('boom!')
bomb = Bomb()
for attr in ['GET', 'POST', 'COOKIES', 'META']:
request = HttpRequest()
setattr(request, attr, {'bomb': bomb})
self.assertIn('%s:<could not parse>' % attr, build_request_repr(request))
def test_wsgirequest(self):
request = WSGIRequest({'PATH_INFO': 'bogus', 'REQUEST_METHOD': 'bogus', 'wsgi.input': BytesIO(b'')})
self.assertEqual(list(request.GET.keys()), [])
self.assertEqual(list(request.POST.keys()), [])
self.assertEqual(list(request.COOKIES.keys()), [])
self.assertEqual(set(request.META.keys()), {'PATH_INFO', 'REQUEST_METHOD', 'SCRIPT_NAME', 'wsgi.input'})
self.assertEqual(request.META['PATH_INFO'], 'bogus')
self.assertEqual(request.META['REQUEST_METHOD'], 'bogus')
self.assertEqual(request.META['SCRIPT_NAME'], '')
def test_wsgirequest_with_script_name(self):
"""
Ensure that the request's path is correctly assembled, regardless of
whether or not the SCRIPT_NAME has a trailing slash.
Refs #20169.
"""
# With trailing slash
request = WSGIRequest({'PATH_INFO': '/somepath/', 'SCRIPT_NAME': '/PREFIX/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, '/PREFIX/somepath/')
# Without trailing slash
request = WSGIRequest({'PATH_INFO': '/somepath/', 'SCRIPT_NAME': '/PREFIX', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, '/PREFIX/somepath/')
def test_wsgirequest_with_force_script_name(self):
"""
Ensure that the FORCE_SCRIPT_NAME setting takes precedence over the
request's SCRIPT_NAME environment parameter.
Refs #20169.
"""
with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX/'):
request = WSGIRequest({'PATH_INFO': '/somepath/', 'SCRIPT_NAME': '/PREFIX/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, '/FORCED_PREFIX/somepath/')
def test_wsgirequest_path_with_force_script_name_trailing_slash(self):
"""
Ensure that the request's path is correctly assembled, regardless of
whether or not the FORCE_SCRIPT_NAME setting has a trailing slash.
Refs #20169.
"""
# With trailing slash
with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX/'):
request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, '/FORCED_PREFIX/somepath/')
# Without trailing slash
with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX'):
request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, '/FORCED_PREFIX/somepath/')
def test_wsgirequest_repr(self):
request = WSGIRequest({'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(repr(request), str_prefix("<WSGIRequest: GET '/'>"))
request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
request.GET = {'get-key': 'get-value'}
request.POST = {'post-key': 'post-value'}
request.COOKIES = {'post-key': 'post-value'}
request.META = {'post-key': 'post-value'}
self.assertEqual(repr(request), str_prefix("<WSGIRequest: GET '/somepath/'>"))
self.assertEqual(build_request_repr(request), str_prefix("<WSGIRequest\npath:/somepath/,\nGET:{%(_)s'get-key': %(_)s'get-value'},\nPOST:{%(_)s'post-key': %(_)s'post-value'},\nCOOKIES:{%(_)s'post-key': %(_)s'post-value'},\nMETA:{%(_)s'post-key': %(_)s'post-value'}>"))
self.assertEqual(build_request_repr(request, path_override='/otherpath/', GET_override={'a': 'b'}, POST_override={'c': 'd'}, COOKIES_override={'e': 'f'}, META_override={'g': 'h'}),
str_prefix("<WSGIRequest\npath:/otherpath/,\nGET:{%(_)s'a': %(_)s'b'},\nPOST:{%(_)s'c': %(_)s'd'},\nCOOKIES:{%(_)s'e': %(_)s'f'},\nMETA:{%(_)s'g': %(_)s'h'}>"))
def test_wsgirequest_path_info(self):
def wsgi_str(path_info):
path_info = path_info.encode('utf-8') # Actual URL sent by the browser (bytestring)
if six.PY3:
path_info = path_info.decode('iso-8859-1') # Value in the WSGI environ dict (native string)
return path_info
# Regression for #19468
request = WSGIRequest({'PATH_INFO': wsgi_str("/سلام/"), 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, "/سلام/")
def test_parse_cookie(self):
self.assertEqual(parse_cookie('invalid@key=true'), {})
def test_httprequest_location(self):
request = HttpRequest()
self.assertEqual(request.build_absolute_uri(location="https://www.example.com/asdf"),
'https://www.example.com/asdf')
request.get_host = lambda: 'www.example.com'
request.path = ''
self.assertEqual(request.build_absolute_uri(location="/path/with:colons"),
'http://www.example.com/path/with:colons')
def test_near_expiration(self):
"Cookie will expire when an near expiration time is provided"
response = HttpResponse()
# There is a timing weakness in this test; The
# expected result for max-age requires that there be
# a very slight difference between the evaluated expiration
# time, and the time evaluated in set_cookie(). If this
# difference doesn't exist, the cookie time will be
# 1 second larger. To avoid the problem, put in a quick sleep,
# which guarantees that there will be a time difference.
expires = datetime.utcnow() + timedelta(seconds=10)
time.sleep(0.001)
response.set_cookie('datetime', expires=expires)
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['max-age'], 10)
def test_aware_expiration(self):
"Cookie accepts an aware datetime as expiration time"
response = HttpResponse()
expires = (datetime.utcnow() + timedelta(seconds=10)).replace(tzinfo=utc)
time.sleep(0.001)
response.set_cookie('datetime', expires=expires)
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['max-age'], 10)
def test_far_expiration(self):
"Cookie will expire when an distant expiration time is provided"
response = HttpResponse()
response.set_cookie('datetime', expires=datetime(2028, 1, 1, 4, 5, 6))
datetime_cookie = response.cookies['datetime']
self.assertIn(
datetime_cookie['expires'],
# Slight time dependency; refs #23450
('Sat, 01-Jan-2028 04:05:06 GMT', 'Sat, 01-Jan-2028 04:05:07 GMT')
)
def test_max_age_expiration(self):
"Cookie will expire if max_age is provided"
response = HttpResponse()
response.set_cookie('max_age', max_age=10)
max_age_cookie = response.cookies['max_age']
self.assertEqual(max_age_cookie['max-age'], 10)
self.assertEqual(max_age_cookie['expires'], cookie_date(time.time() + 10))
def test_httponly_cookie(self):
response = HttpResponse()
response.set_cookie('example', httponly=True)
example_cookie = response.cookies['example']
# A compat cookie may be in use -- check that it has worked
# both as an output string, and using the cookie attributes
self.assertIn('; httponly', str(example_cookie))
self.assertTrue(example_cookie['httponly'])
def test_unicode_cookie(self):
"Verify HttpResponse.set_cookie() works with unicode data."
response = HttpResponse()
cookie_value = '清風'
response.set_cookie('test', cookie_value)
self.assertEqual(force_str(cookie_value), response.cookies['test'].value)
def test_limited_stream(self):
# Read all of a limited stream
stream = LimitedStream(BytesIO(b'test'), 2)
self.assertEqual(stream.read(), b'te')
# Reading again returns nothing.
self.assertEqual(stream.read(), b'')
# Read a number of characters greater than the stream has to offer
stream = LimitedStream(BytesIO(b'test'), 2)
self.assertEqual(stream.read(5), b'te')
# Reading again returns nothing.
self.assertEqual(stream.readline(5), b'')
# Read sequentially from a stream
stream = LimitedStream(BytesIO(b'12345678'), 8)
self.assertEqual(stream.read(5), b'12345')
self.assertEqual(stream.read(5), b'678')
# Reading again returns nothing.
self.assertEqual(stream.readline(5), b'')
# Read lines from a stream
stream = LimitedStream(BytesIO(b'1234\n5678\nabcd\nefgh\nijkl'), 24)
# Read a full line, unconditionally
self.assertEqual(stream.readline(), b'1234\n')
# Read a number of characters less than a line
self.assertEqual(stream.readline(2), b'56')
# Read the rest of the partial line
self.assertEqual(stream.readline(), b'78\n')
# Read a full line, with a character limit greater than the line length
self.assertEqual(stream.readline(6), b'abcd\n')
# Read the next line, deliberately terminated at the line end
self.assertEqual(stream.readline(4), b'efgh')
# Read the next line... just the line end
self.assertEqual(stream.readline(), b'\n')
# Read everything else.
self.assertEqual(stream.readline(), b'ijkl')
# Regression for #15018
# If a stream contains a newline, but the provided length
# is less than the number of provided characters, the newline
# doesn't reset the available character count
stream = LimitedStream(BytesIO(b'1234\nabcdef'), 9)
self.assertEqual(stream.readline(10), b'1234\n')
self.assertEqual(stream.readline(3), b'abc')
# Now expire the available characters
self.assertEqual(stream.readline(3), b'd')
# Reading again returns nothing.
self.assertEqual(stream.readline(2), b'')
# Same test, but with read, not readline.
stream = LimitedStream(BytesIO(b'1234\nabcdef'), 9)
self.assertEqual(stream.read(6), b'1234\na')
self.assertEqual(stream.read(2), b'bc')
self.assertEqual(stream.read(2), b'd')
self.assertEqual(stream.read(2), b'')
self.assertEqual(stream.read(), b'')
def test_stream(self):
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.read(), b'name=value')
def test_read_after_value(self):
"""
Reading from request is allowed after accessing request contents as
POST or body.
"""
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.POST, {'name': ['value']})
self.assertEqual(request.body, b'name=value')
self.assertEqual(request.read(), b'name=value')
def test_value_after_read(self):
"""
Construction of POST or body is not allowed after reading
from request.
"""
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.read(2), b'na')
self.assertRaises(RawPostDataException, lambda: request.body)
self.assertEqual(request.POST, {})
def test_non_ascii_POST(self):
payload = FakePayload(urlencode({'key': 'España'}))
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'wsgi.input': payload,
})
self.assertEqual(request.POST, {'key': ['España']})
def test_alternate_charset_POST(self):
"""
Test a POST with non-utf-8 payload encoding.
"""
payload = FakePayload(original_urlencode({'key': 'España'.encode('latin-1')}))
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': 'application/x-www-form-urlencoded; charset=iso-8859-1',
'wsgi.input': payload,
})
self.assertEqual(request.POST, {'key': ['España']})
def test_body_after_POST_multipart_form_data(self):
"""
Reading body after parsing multipart/form-data is not allowed
"""
# Because multipart is used for large amounts fo data i.e. file uploads,
# we don't want the data held in memory twice, and we don't want to
# silence the error by setting body = '' either.
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
'']))
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.POST, {'name': ['value']})
self.assertRaises(RawPostDataException, lambda: request.body)
def test_body_after_POST_multipart_related(self):
"""
Reading body after parsing multipart that isn't form-data is allowed
"""
# Ticket #9054
# There are cases in which the multipart data is related instead of
# being a binary upload, in which case it should still be accessible
# via body.
payload_data = b"\r\n".join([
b'--boundary',
b'Content-ID: id; name="name"',
b'',
b'value',
b'--boundary--'
b''])
payload = FakePayload(payload_data)
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/related; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.POST, {})
self.assertEqual(request.body, payload_data)
def test_POST_multipart_with_content_length_zero(self):
"""
Multipart POST requests with Content-Length >= 0 are valid and need to be handled.
"""
# According to:
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13
# Every request.POST with Content-Length >= 0 is a valid request,
# this test ensures that we handle Content-Length == 0.
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
'']))
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': 0,
'wsgi.input': payload})
self.assertEqual(request.POST, {})
def test_POST_binary_only(self):
payload = b'\r\n\x01\x00\x00\x00ab\x00\x00\xcd\xcc,@'
environ = {'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/octet-stream',
'CONTENT_LENGTH': len(payload),
'wsgi.input': BytesIO(payload)}
request = WSGIRequest(environ)
self.assertEqual(request.POST, {})
self.assertEqual(request.FILES, {})
self.assertEqual(request.body, payload)
# Same test without specifying content-type
environ.update({'CONTENT_TYPE': '', 'wsgi.input': BytesIO(payload)})
request = WSGIRequest(environ)
self.assertEqual(request.POST, {})
self.assertEqual(request.FILES, {})
self.assertEqual(request.body, payload)
def test_read_by_lines(self):
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(list(request), [b'name=value'])
def test_POST_after_body_read(self):
"""
POST should be populated even if body is read first
"""
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
request.body # evaluate
self.assertEqual(request.POST, {'name': ['value']})
def test_POST_after_body_read_and_stream_read(self):
"""
POST should be populated even if body is read first, and then
the stream is read second.
"""
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
request.body # evaluate
self.assertEqual(request.read(1), b'n')
self.assertEqual(request.POST, {'name': ['value']})
def test_POST_after_body_read_and_stream_read_multipart(self):
"""
POST should be populated even if body is read first, and then
the stream is read second. Using multipart/form-data instead of urlencoded.
"""
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
'']))
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
request.body # evaluate
# Consume enough data to mess up the parsing:
self.assertEqual(request.read(13), b'--boundary\r\nC')
self.assertEqual(request.POST, {'name': ['value']})
def test_POST_connection_error(self):
"""
If wsgi.input.read() raises an exception while trying to read() the
POST, the exception should be identifiable (not a generic IOError).
"""
class ExplodingBytesIO(BytesIO):
def read(self, len=0):
raise IOError("kaboom!")
payload = b'name=value'
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': ExplodingBytesIO(payload)})
with self.assertRaises(UnreadablePostError):
request.body
def test_FILES_connection_error(self):
"""
If wsgi.input.read() raises an exception while trying to read() the
FILES, the exception should be identifiable (not a generic IOError).
"""
class ExplodingBytesIO(BytesIO):
def read(self, len=0):
raise IOError("kaboom!")
payload = b'x'
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=foo_',
'CONTENT_LENGTH': len(payload),
'wsgi.input': ExplodingBytesIO(payload)})
with self.assertRaises(UnreadablePostError):
request.FILES
class HostValidationTests(SimpleTestCase):
poisoned_hosts = [
'example.com@evil.tld',
'example.com:dr.frankenstein@evil.tld',
'example.com:dr.frankenstein@evil.tld:80',
'example.com:80/badpath',
'example.com: recovermypassword.com',
]
@override_settings(
USE_X_FORWARDED_HOST=False,
ALLOWED_HOSTS=[
'forward.com', 'example.com', 'internal.com', '12.34.56.78',
'[2001:19f0:feee::dead:beef:cafe]', 'xn--4ca9at.com',
'.multitenant.com', 'INSENSITIVE.com',
])
def test_http_get_host(self):
# Check if X_FORWARDED_HOST is provided.
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_HOST': 'forward.com',
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
# X_FORWARDED_HOST is ignored.
self.assertEqual(request.get_host(), 'example.com')
# Check if X_FORWARDED_HOST isn't provided.
request = HttpRequest()
request.META = {
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'example.com')
# Check if HTTP_HOST isn't provided.
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'internal.com')
# Check if HTTP_HOST isn't provided, and we're on a nonstandard port
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 8042,
}
self.assertEqual(request.get_host(), 'internal.com:8042')
legit_hosts = [
'example.com',
'example.com:80',
'12.34.56.78',
'12.34.56.78:443',
'[2001:19f0:feee::dead:beef:cafe]',
'[2001:19f0:feee::dead:beef:cafe]:8080',
'xn--4ca9at.com', # Punnycode for öäü.com
'anything.multitenant.com',
'multitenant.com',
'insensitive.com',
'example.com.',
'example.com.:80',
]
for host in legit_hosts:
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
# Poisoned host headers are rejected as suspicious
for host in chain(self.poisoned_hosts, ['other.com', 'example.com..']):
with self.assertRaises(SuspiciousOperation):
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
@override_settings(USE_X_FORWARDED_HOST=True, ALLOWED_HOSTS=['*'])
def test_http_get_host_with_x_forwarded_host(self):
# Check if X_FORWARDED_HOST is provided.
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_HOST': 'forward.com',
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
# X_FORWARDED_HOST is obeyed.
self.assertEqual(request.get_host(), 'forward.com')
# Check if X_FORWARDED_HOST isn't provided.
request = HttpRequest()
request.META = {
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'example.com')
# Check if HTTP_HOST isn't provided.
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'internal.com')
# Check if HTTP_HOST isn't provided, and we're on a nonstandard port
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 8042,
}
self.assertEqual(request.get_host(), 'internal.com:8042')
# Poisoned host headers are rejected as suspicious
legit_hosts = [
'example.com',
'example.com:80',
'12.34.56.78',
'12.34.56.78:443',
'[2001:19f0:feee::dead:beef:cafe]',
'[2001:19f0:feee::dead:beef:cafe]:8080',
'xn--4ca9at.com', # Punnycode for öäü.com
]
for host in legit_hosts:
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
for host in self.poisoned_hosts:
with self.assertRaises(SuspiciousOperation):
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
@override_settings(DEBUG=True, ALLOWED_HOSTS=[])
def test_host_validation_disabled_in_debug_mode(self):
"""If ALLOWED_HOSTS is empty and DEBUG is True, all hosts pass."""
request = HttpRequest()
request.META = {
'HTTP_HOST': 'example.com',
}
self.assertEqual(request.get_host(), 'example.com')
# Invalid hostnames would normally raise a SuspiciousOperation,
# but we have DEBUG=True, so this check is disabled.
request = HttpRequest()
request.META = {
'HTTP_HOST': "invalid_hostname.com",
}
self.assertEqual(request.get_host(), "invalid_hostname.com")
@override_settings(ALLOWED_HOSTS=[])
def test_get_host_suggestion_of_allowed_host(self):
"""get_host() makes helpful suggestions if a valid-looking host is not in ALLOWED_HOSTS."""
msg_invalid_host = "Invalid HTTP_HOST header: %r."
msg_suggestion = msg_invalid_host + " You may need to add %r to ALLOWED_HOSTS."
msg_suggestion2 = msg_invalid_host + " The domain name provided is not valid according to RFC 1034/1035"
for host in [ # Valid-looking hosts
'example.com',
'12.34.56.78',
'[2001:19f0:feee::dead:beef:cafe]',
'xn--4ca9at.com', # Punnycode for öäü.com
]:
request = HttpRequest()
request.META = {'HTTP_HOST': host}
self.assertRaisesMessage(
SuspiciousOperation,
msg_suggestion % (host, host),
request.get_host
)
for domain, port in [ # Valid-looking hosts with a port number
('example.com', 80),
('12.34.56.78', 443),
('[2001:19f0:feee::dead:beef:cafe]', 8080),
]:
host = '%s:%s' % (domain, port)
request = HttpRequest()
request.META = {'HTTP_HOST': host}
self.assertRaisesMessage(
SuspiciousOperation,
msg_suggestion % (host, domain),
request.get_host
)
for host in self.poisoned_hosts:
request = HttpRequest()
request.META = {'HTTP_HOST': host}
self.assertRaisesMessage(
SuspiciousOperation,
msg_invalid_host % host,
request.get_host
)
request = HttpRequest()
request.META = {'HTTP_HOST': "invalid_hostname.com"}
self.assertRaisesMessage(
SuspiciousOperation,
msg_suggestion2 % "invalid_hostname.com",
request.get_host
)
class BuildAbsoluteURITestCase(SimpleTestCase):
"""
Regression tests for ticket #18314.
"""
def setUp(self):
self.factory = RequestFactory()
def test_build_absolute_uri_no_location(self):
"""
Ensures that ``request.build_absolute_uri()`` returns the proper value
when the ``location`` argument is not provided, and ``request.path``
begins with //.
"""
# //// is needed to create a request with a path beginning with //
request = self.factory.get('////absolute-uri')
self.assertEqual(
request.build_absolute_uri(),
'http://testserver//absolute-uri'
)
def test_build_absolute_uri_absolute_location(self):
"""
Ensures that ``request.build_absolute_uri()`` returns the proper value
when an absolute URL ``location`` argument is provided, and
``request.path`` begins with //.
"""
# //// is needed to create a request with a path beginning with //
request = self.factory.get('////absolute-uri')
self.assertEqual(
request.build_absolute_uri(location='http://example.com/?foo=bar'),
'http://example.com/?foo=bar'
)
def test_build_absolute_uri_schema_relative_location(self):
"""
Ensures that ``request.build_absolute_uri()`` returns the proper value
when a schema-relative URL ``location`` argument is provided, and
``request.path`` begins with //.
"""
# //// is needed to create a request with a path beginning with //
request = self.factory.get('////absolute-uri')
self.assertEqual(
request.build_absolute_uri(location='//example.com/?foo=bar'),
'http://example.com/?foo=bar'
)
def test_build_absolute_uri_relative_location(self):
"""
Ensures that ``request.build_absolute_uri()`` returns the proper value
when a relative URL ``location`` argument is provided, and
``request.path`` begins with //.
"""
# //// is needed to create a request with a path beginning with //
request = self.factory.get('////absolute-uri')
self.assertEqual(
request.build_absolute_uri(location='/foo/bar/'),
'http://testserver/foo/bar/'
)
| bsd-3-clause |
nrao/FLAG-Beamformer-Devel | scripts/dibas/dibas_utils.py | 1 | 8946 | import shm_wrapper as shm
from vegas_hpc.GBTStatus import GBTStatus
import time, pyfits
import vegas_hpc.possem as possem
import numpy as n
#import psr_utils as psr
import vegas_hpc.astro_utils as astro
#import slalib as s
from pyslalib import slalib as s
DEGTORAD = 0.017453292519943295769236907684
RADTODEG = 57.29577951308232087679815481410
NEW_GBT = 1
def header_from_string(str):
"""
header_from_string(str):
Convert an input string (which should be the ASCII header from
a FITS HFU) into an instantiation of a pyfits 'Header' class.
"""
cl = cardlist_from_string(str)
return pyfits.Header(cl)
def card_from_string(str):
"""
card_from_string(str):
Return a pyfits 'Card' based on the input 'str', which should
be an 80-character 'line' from a FITS header.
"""
card = pyfits.Card()
return card.fromstring(str)
def cardlist_from_string(str):
"""
cardlist_from_string(str):
Return a list of pyfits 'Cards' from the input string.
'str' should be the ASCII from a FITS header.
"""
cardlist = []
numcards = len(str)/80
for ii in range(numcards):
str_part = str[ii*80:(ii+1)*80]
if str_part.strip().find("END") == 0:
break
else:
cardlist.append(card_from_string(str_part))
return cardlist
VEGAS_STATUS_KEY = int('0x01001840', 16)
VEGAS_STATUS_SEMID = "/vegas_status"
class vegas_status:
def __init__(self):
self.stat_buf = shm.SharedMemoryHandle(VEGAS_STATUS_KEY)
self.sem = possem.sem_open(VEGAS_STATUS_SEMID, possem.O_CREAT, 00644, 1)
self.hdr = None
self.gbtstat = None
self.read()
self.new_databuf_format = True
if 'BACKEND' in self.keys():
if 'GUPPI' == self['BACKEND']:
self.new_databuf_format=False # GUPPI mode
def __getitem__(self, key):
return self.hdr[key]
def keys(self):
return [k for k, v in self.hdr.items()]
def values(self):
return [v for k, v in self.hdr.items()]
def items(self):
return self.hdr.items()
def lock(self):
return possem.sem_wait(self.sem)
def unlock(self):
return possem.sem_post(self.sem)
def data_buffer_format(self):
"""
Returns true if the vegas data buffer format is in use and False otherwise
"""
self.new_databuf_format=True
if 'BACKEND' in self.keys():
if 'GUPPI' == self['BACKEND']:
self.new_databuf_format=False # GUPPI mode
return self.new_databuf_format
def read(self):
self.lock()
self.hdr = header_from_string(self.stat_buf.read())
self.unlock()
def write(self):
self.lock()
self.stat_buf.write(repr(self.hdr.ascard)+"END"+" "*77)
self.unlock()
def update(self, key, value, comment=None):
self.hdr.update(key, value, comment)
def show(self):
for k, v in self.hdr.items():
print "'%8s' :"%k, v
print ""
def update_with_gbtstatus(self):
if self.gbtstat is None:
self.gbtstat = GBTStatus()
self.gbtstat.collectKVPairs()
g = self.gbtstat.kvPairs
self.update("TELESCOP", "GBT")
self.update("OBSERVER", g['observer'])
self.update("PROJID", g['data_dir'])
self.update("FRONTEND", g['receiver'])
self.update("NRCVR", 2) # I think all the GBT receivers have 2...
if 'inear' in g['rcvr_pol']:
self.update("FD_POLN", 'LIN')
else:
self.update("FD_POLN", 'CIRC')
freq = float(g['freq'])
self.update("OBSFREQ", freq)
self.update("SRC_NAME", g['source'])
if g['ant_motion']=='Tracking' or g['ant_motion']=='Guiding':
self.update("TRK_MODE", 'TRACK')
elif g['ant_motion']=='Stopped':
self.update("TRK_MODE", 'DRIFT')
else:
self.update("TRK_MODE", 'UNKNOWN')
self.ra = float(g['j2000_major'].split()[0])
self.ra_str = self.gbtstat.degrees2hms(self.ra)
self.dec = float(g['j2000_minor'].split()[0])
self.dec_str = self.gbtstat.degrees2dms(self.dec)
self.update("RA_STR", self.ra_str)
self.update("RA", self.ra)
self.update("DEC_STR", self.dec_str)
self.update("DEC", self.dec)
h, m, s = g['lst'].split(":")
lst = int(round(astro.hms_to_rad(int(h),int(m),float(s))*86400.0/astro.TWOPI))
self.update("LST", lst)
self.update("AZ", float(g['az_actual']))
self.update("ZA", 90.0-float(g['el_actual']))
beam_deg = 2.0*astro.beam_halfwidth(freq, 100.0)/60.0
self.update("BMAJ", beam_deg)
self.update("BMIN", beam_deg)
def update_azza(self):
"""
update_azza():
Update the AZ and ZA based on the current time with the vegas_status instance.
"""
(iptr, ang, stat) = s.sla_dafin(self['RA_STR'].replace(':', ' '), 1)
self.update("RA", ang*15.0*RADTODEG)
(iptr, ang, stat) = s.sla_dafin(self['DEC_STR'].replace(':', ' '), 1)
self.update("DEC", ang*RADTODEG)
MJD = astro.current_MJD()
az, za = astro.radec_to_azza(self['RA'], self['DEC'], MJD, scope='GBT')
self.update("AZ", az)
self.update("ZA", za)
VEGAS_DATABUF_KEY = int('0x00C62C70', 16)
class vegas_databuf:
def __init__(self,databuf_id=1, vegas_format=True):
"""
Create an access object to an HPC data buffer.
Two formats are supported. The 'new' vegas format and
the standard guppi format.
"""
self.vegas_format = vegas_format
self.buf = shm.SharedMemoryHandle(VEGAS_DATABUF_KEY+databuf_id-1)
self.data_type = self.buf.read(NumberOfBytes=64, offset=0)
if self.vegas_format:
packed = self.buf.read(NumberOfBytes=8+5*8+3*4, offset=64)
self.buf_type = n.fromstring(packed[0:8], dtype=n.int64)
self.databuf_size, self.struct_size, self.block_size, self.header_size, \
self.index_size = n.fromstring(packed[8:48], dtype=n.int64)
self.shmid, self.semid, self.n_block= \
n.fromstring(packed[48:60], dtype=n.int32)
else:
packed = self.buf.read(NumberOfBytes=3*8+3*4, offset=64)
self.struct_size, self.block_size, self.header_size = \
n.fromstring(packed[0:24], dtype=n.int64)
self.shmid, self.semid, self.n_block= \
n.fromstring(packed[24:36], dtype=n.int32)
self.header_offset = self.struct_size
if self.vegas_format:
self.data_offset = self.struct_size + \
self.n_block*(self.header_size + self.index_size)
else:
self.data_offset = self.struct_size + self.n_block*self.header_size
self.dtype = n.int8
self.read_size = self.block_size
self.read_all_hdr()
def read_hdr(self,block):
if (block<0 or block>=self.n_block):
raise IndexError, "block %d out of range (n_block=%d)" \
% (block, self.n_block)
self.hdr[block] = header_from_string(self.buf.read(self.header_size,\
self.header_offset + block*self.header_size))
def read_all_hdr(self):
self.hdr = []
for i in range(self.n_block):
self.hdr.append(header_from_string(self.buf.read(self.header_size,\
self.header_offset + i*self.header_size)))
def data(self,block):
if (block<0 or block>=self.n_block):
raise IndexError, "block %d out of range (n_block=%d)" \
% (block, self.n_block)
self.read_hdr(block)
try:
if (self.hdr[block]["OBS_MODE"] == "PSR"):
self.dtype = n.float
else:
self.dype = n.int8
except KeyError:
self.dtype = n.int8
raw = n.fromstring(self.buf.read(self.block_size, \
self.data_offset + block*self.block_size), \
dtype=self.dtype)
try:
npol = self.hdr[block]["NPOL"]
nchan = self.hdr[block]["OBSNCHAN"]
if (self.hdr[block]["OBS_MODE"] == "PSR"):
#nbin = self.hdr[block]["NBIN"]
#raw.shape = (nbin, npol, nchan)
return raw
else:
nspec = self.block_size / (npol*nchan)
raw.shape = (nspec, npol, nchan)
except KeyError:
pass
return raw
if __name__=="__main__":
g = vegas_status()
g.show()
print
print 'keys:', g.keys()
print
print 'values:', g.values()
print
print 'items:', g.items()
print
g.update_with_gbtstatus()
g.write()
g.show()
| gpl-2.0 |
0jpq0/kbengine | kbe/src/lib/python/Lib/test/test_zipimport_support.py | 72 | 10704 | # This test module covers support in various parts of the standard library
# for working with modules located inside zipfiles
# The tests are centralised in this fashion to make it easy to drop them
# if a platform doesn't support zipimport
import test.support
import os
import os.path
import sys
import textwrap
import zipfile
import zipimport
import doctest
import inspect
import linecache
import pdb
import unittest
from test.script_helper import (spawn_python, kill_python, assert_python_ok,
temp_dir, make_script, make_zip_script)
verbose = test.support.verbose
# Library modules covered by this test set
# pdb (Issue 4201)
# inspect (Issue 4223)
# doctest (Issue 4197)
# Other test modules with zipimport related tests
# test_zipimport (of course!)
# test_cmd_line_script (covers the zipimport support in runpy)
# Retrieve some helpers from other test cases
from test import (test_doctest, sample_doctest, sample_doctest_no_doctests,
sample_doctest_no_docstrings)
def _run_object_doctest(obj, module):
finder = doctest.DocTestFinder(verbose=verbose, recurse=False)
runner = doctest.DocTestRunner(verbose=verbose)
# Use the object's fully qualified name if it has one
# Otherwise, use the module's name
try:
name = "%s.%s" % (obj.__module__, obj.__name__)
except AttributeError:
name = module.__name__
for example in finder.find(obj, name, module):
runner.run(example)
f, t = runner.failures, runner.tries
if f:
raise test.support.TestFailed("%d of %d doctests failed" % (f, t))
if verbose:
print ('doctest (%s) ... %d tests with zero failures' % (module.__name__, t))
return f, t
class ZipSupportTests(unittest.TestCase):
# This used to use the ImportHooksBaseTestCase to restore
# the state of the import related information
# in the sys module after each test. However, that restores
# *too much* information and breaks for the invocation of
# of test_doctest. So we do our own thing and leave
# sys.modules alone.
# We also clear the linecache and zipimport cache
# just to avoid any bogus errors due to name reuse in the tests
def setUp(self):
linecache.clearcache()
zipimport._zip_directory_cache.clear()
self.path = sys.path[:]
self.meta_path = sys.meta_path[:]
self.path_hooks = sys.path_hooks[:]
sys.path_importer_cache.clear()
def tearDown(self):
sys.path[:] = self.path
sys.meta_path[:] = self.meta_path
sys.path_hooks[:] = self.path_hooks
sys.path_importer_cache.clear()
def test_inspect_getsource_issue4223(self):
test_src = "def foo(): pass\n"
with temp_dir() as d:
init_name = make_script(d, '__init__', test_src)
name_in_zip = os.path.join('zip_pkg',
os.path.basename(init_name))
zip_name, run_name = make_zip_script(d, 'test_zip',
init_name, name_in_zip)
os.remove(init_name)
sys.path.insert(0, zip_name)
import zip_pkg
try:
self.assertEqual(inspect.getsource(zip_pkg.foo), test_src)
finally:
del sys.modules["zip_pkg"]
def test_doctest_issue4197(self):
# To avoid having to keep two copies of the doctest module's
# unit tests in sync, this test works by taking the source of
# test_doctest itself, rewriting it a bit to cope with a new
# location, and then throwing it in a zip file to make sure
# everything still works correctly
test_src = inspect.getsource(test_doctest)
test_src = test_src.replace(
"from test import test_doctest",
"import test_zipped_doctest as test_doctest")
test_src = test_src.replace("test.test_doctest",
"test_zipped_doctest")
test_src = test_src.replace("test.sample_doctest",
"sample_zipped_doctest")
# The sample doctest files rewritten to include in the zipped version.
sample_sources = {}
for mod in [sample_doctest, sample_doctest_no_doctests,
sample_doctest_no_docstrings]:
src = inspect.getsource(mod)
src = src.replace("test.test_doctest", "test_zipped_doctest")
# Rewrite the module name so that, for example,
# "test.sample_doctest" becomes "sample_zipped_doctest".
mod_name = mod.__name__.split(".")[-1]
mod_name = mod_name.replace("sample_", "sample_zipped_")
sample_sources[mod_name] = src
with temp_dir() as d:
script_name = make_script(d, 'test_zipped_doctest',
test_src)
zip_name, run_name = make_zip_script(d, 'test_zip',
script_name)
z = zipfile.ZipFile(zip_name, 'a')
for mod_name, src in sample_sources.items():
z.writestr(mod_name + ".py", src)
z.close()
if verbose:
zip_file = zipfile.ZipFile(zip_name, 'r')
print ('Contents of %r:' % zip_name)
zip_file.printdir()
zip_file.close()
os.remove(script_name)
sys.path.insert(0, zip_name)
import test_zipped_doctest
try:
# Some of the doc tests depend on the colocated text files
# which aren't available to the zipped version (the doctest
# module currently requires real filenames for non-embedded
# tests). So we're forced to be selective about which tests
# to run.
# doctest could really use some APIs which take a text
# string or a file object instead of a filename...
known_good_tests = [
test_zipped_doctest.SampleClass,
test_zipped_doctest.SampleClass.NestedClass,
test_zipped_doctest.SampleClass.NestedClass.__init__,
test_zipped_doctest.SampleClass.__init__,
test_zipped_doctest.SampleClass.a_classmethod,
test_zipped_doctest.SampleClass.a_property,
test_zipped_doctest.SampleClass.a_staticmethod,
test_zipped_doctest.SampleClass.double,
test_zipped_doctest.SampleClass.get,
test_zipped_doctest.SampleNewStyleClass,
test_zipped_doctest.SampleNewStyleClass.__init__,
test_zipped_doctest.SampleNewStyleClass.double,
test_zipped_doctest.SampleNewStyleClass.get,
test_zipped_doctest.sample_func,
test_zipped_doctest.test_DocTest,
test_zipped_doctest.test_DocTestParser,
test_zipped_doctest.test_DocTestRunner.basics,
test_zipped_doctest.test_DocTestRunner.exceptions,
test_zipped_doctest.test_DocTestRunner.option_directives,
test_zipped_doctest.test_DocTestRunner.optionflags,
test_zipped_doctest.test_DocTestRunner.verbose_flag,
test_zipped_doctest.test_Example,
test_zipped_doctest.test_debug,
test_zipped_doctest.test_testsource,
test_zipped_doctest.test_trailing_space_in_test,
test_zipped_doctest.test_DocTestSuite,
test_zipped_doctest.test_DocTestFinder,
]
# These tests are the ones which need access
# to the data files, so we don't run them
fail_due_to_missing_data_files = [
test_zipped_doctest.test_DocFileSuite,
test_zipped_doctest.test_testfile,
test_zipped_doctest.test_unittest_reportflags,
]
for obj in known_good_tests:
_run_object_doctest(obj, test_zipped_doctest)
finally:
del sys.modules["test_zipped_doctest"]
def test_doctest_main_issue4197(self):
test_src = textwrap.dedent("""\
class Test:
">>> 'line 2'"
pass
import doctest
doctest.testmod()
""")
pattern = 'File "%s", line 2, in %s'
with temp_dir() as d:
script_name = make_script(d, 'script', test_src)
rc, out, err = assert_python_ok(script_name)
expected = pattern % (script_name, "__main__.Test")
if verbose:
print ("Expected line", expected)
print ("Got stdout:")
print (ascii(out))
self.assertIn(expected.encode('utf-8'), out)
zip_name, run_name = make_zip_script(d, "test_zip",
script_name, '__main__.py')
rc, out, err = assert_python_ok(zip_name)
expected = pattern % (run_name, "__main__.Test")
if verbose:
print ("Expected line", expected)
print ("Got stdout:")
print (ascii(out))
self.assertIn(expected.encode('utf-8'), out)
def test_pdb_issue4201(self):
test_src = textwrap.dedent("""\
def f():
pass
import pdb
pdb.Pdb(nosigint=True).runcall(f)
""")
with temp_dir() as d:
script_name = make_script(d, 'script', test_src)
p = spawn_python(script_name)
p.stdin.write(b'l\n')
data = kill_python(p)
# bdb/pdb applies normcase to its filename before displaying
self.assertIn(os.path.normcase(script_name.encode('utf-8')), data)
zip_name, run_name = make_zip_script(d, "test_zip",
script_name, '__main__.py')
p = spawn_python(zip_name)
p.stdin.write(b'l\n')
data = kill_python(p)
# bdb/pdb applies normcase to its filename before displaying
self.assertIn(os.path.normcase(run_name.encode('utf-8')), data)
def test_main():
test.support.run_unittest(ZipSupportTests)
test.support.reap_children()
if __name__ == '__main__':
test_main()
| lgpl-3.0 |
alexphelps/django-drip | drip/admin.py | 1 | 3846 | import base64
import json
from django import forms
from django.contrib import admin
from drip.models import Drip, SentDrip, QuerySetRule
from drip.drips import configured_message_classes, message_class_for
from drip.utils import get_user_model
class QuerySetRuleInline(admin.TabularInline):
model = QuerySetRule
class DripForm(forms.ModelForm):
message_class = forms.ChoiceField(
choices=((k, '%s (%s)' % (k, v)) for k, v in configured_message_classes().items())
)
class Meta:
model = Drip
exclude = []
class DripAdmin(admin.ModelAdmin):
list_display = ('name', 'enabled', 'message_class')
inlines = [
QuerySetRuleInline,
]
form = DripForm
av = lambda self, view: self.admin_site.admin_view(view)
def timeline(self, request, drip_id, into_past, into_future):
"""
Return a list of people who should get emails.
"""
from django.shortcuts import render, get_object_or_404
drip = get_object_or_404(Drip, id=drip_id)
shifted_drips = []
seen_users = set()
for shifted_drip in drip.drip.walk(into_past=int(into_past), into_future=int(into_future)+1):
shifted_drip.prune()
shifted_drips.append({
'drip': shifted_drip,
'qs': shifted_drip.get_queryset().exclude(id__in=seen_users)
})
seen_users.update(shifted_drip.get_queryset().values_list('id', flat=True))
return render(request, 'drip/timeline.html', locals())
def view_drip_email(self, request, drip_id, into_past, into_future, user_id):
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
drip = get_object_or_404(Drip, id=drip_id)
User = get_user_model()
user = get_object_or_404(User, id=user_id)
drip_message = message_class_for(drip.message_class)(drip.drip, user)
html = ''
mime = ''
if drip_message.message.alternatives:
for body, mime in drip_message.message.alternatives:
if mime == 'text/html':
html = body
mime = 'text/html'
else:
html = drip_message.message.body
mime = 'text/plain'
return HttpResponse(html, content_type=mime)
def build_extra_context(self, extra_context):
from drip.utils import get_simple_fields
extra_context = extra_context or {}
User = get_user_model()
extra_context['field_data'] = json.dumps(get_simple_fields(User))
return extra_context
def add_view(self, request, extra_context=None):
return super(DripAdmin, self).add_view(
request, extra_context=self.build_extra_context(extra_context))
def change_view(self, request, object_id, extra_context=None):
return super(DripAdmin, self).change_view(
request, object_id, extra_context=self.build_extra_context(extra_context))
def get_urls(self):
from django.conf.urls import patterns, url
urls = super(DripAdmin, self).get_urls()
my_urls = patterns('',
url(
r'^(?P<drip_id>[\d]+)/timeline/(?P<into_past>[\d]+)/(?P<into_future>[\d]+)/$',
self.av(self.timeline),
name='drip_timeline'
),
url(
r'^(?P<drip_id>[\d]+)/timeline/(?P<into_past>[\d]+)/(?P<into_future>[\d]+)/(?P<user_id>[\d]+)/$',
self.av(self.view_drip_email),
name='view_drip_email'
)
)
return my_urls + urls
admin.site.register(Drip, DripAdmin)
class SentDripAdmin(admin.ModelAdmin):
list_display = [f.name for f in SentDrip._meta.fields]
ordering = ['-id']
admin.site.register(SentDrip, SentDripAdmin)
| mit |
Leila20/django | tests/template_tests/filter_tests/test_time.py | 94 | 2322 | from datetime import time
from django.template.defaultfilters import time as time_filter
from django.test import SimpleTestCase, override_settings
from django.utils import timezone, translation
from ..utils import setup
from .timezone_utils import TimezoneTestCase
class TimeTests(TimezoneTestCase):
"""
#20693: Timezone support for the time template filter
"""
@setup({'time00': '{{ dt|time }}'})
def test_time00(self):
output = self.engine.render_to_string('time00', {'dt': time(16, 25)})
self.assertEqual(output, '4:25 p.m.')
@override_settings(USE_L10N=True)
@setup({'time00_l10n': '{{ dt|time }}'})
def test_time00_l10n(self):
with translation.override('fr'):
output = self.engine.render_to_string('time00_l10n', {'dt': time(16, 25)})
self.assertEqual(output, '16:25')
@setup({'time01': '{{ dt|time:"e:O:T:Z" }}'})
def test_time01(self):
output = self.engine.render_to_string('time01', {'dt': self.now_tz_i})
self.assertEqual(output, '+0315:+0315:+0315:11700')
@setup({'time02': '{{ dt|time:"e:T" }}'})
def test_time02(self):
output = self.engine.render_to_string('time02', {'dt': self.now})
self.assertEqual(output, ':' + self.now_tz.tzinfo.tzname(self.now_tz))
@setup({'time03': '{{ t|time:"P:e:O:T:Z" }}'})
def test_time03(self):
output = self.engine.render_to_string('time03', {'t': time(4, 0, tzinfo=timezone.get_fixed_timezone(30))})
self.assertEqual(output, '4 a.m.::::')
@setup({'time04': '{{ t|time:"P:e:O:T:Z" }}'})
def test_time04(self):
output = self.engine.render_to_string('time04', {'t': time(4, 0)})
self.assertEqual(output, '4 a.m.::::')
@setup({'time05': '{{ d|time:"P:e:O:T:Z" }}'})
def test_time05(self):
output = self.engine.render_to_string('time05', {'d': self.today})
self.assertEqual(output, '')
@setup({'time06': '{{ obj|time:"P:e:O:T:Z" }}'})
def test_time06(self):
output = self.engine.render_to_string('time06', {'obj': 'non-datetime-value'})
self.assertEqual(output, '')
class FunctionTests(SimpleTestCase):
def test_inputs(self):
self.assertEqual(time_filter(time(13), 'h'), '01')
self.assertEqual(time_filter(time(0), 'h'), '12')
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.